v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-mips.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #include "v8.h"
37 
38 #if V8_TARGET_ARCH_MIPS
39 
41 #include "serialize.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 #ifdef DEBUG
47 bool CpuFeatures::initialized_ = false;
48 #endif
49 unsigned CpuFeatures::supported_ = 0;
50 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
51 unsigned CpuFeatures::cross_compile_ = 0;
52 
53 
54 ExternalReference ExternalReference::cpu_features() {
55  ASSERT(CpuFeatures::initialized_);
56  return ExternalReference(&CpuFeatures::supported_);
57 }
58 
59 
60 // Get the CPU features enabled by the build. For cross compilation the
61 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
62 // can be defined to enable FPU instructions when building the
63 // snapshot.
64 static uint64_t CpuFeaturesImpliedByCompiler() {
65  uint64_t answer = 0;
66 #ifdef CAN_USE_FPU_INSTRUCTIONS
67  answer |= static_cast<uint64_t>(1) << FPU;
68 #endif // def CAN_USE_FPU_INSTRUCTIONS
69 
70 #ifdef __mips__
71  // If the compiler is allowed to use FPU then we can use FPU too in our code
72  // generation even when generating snapshots. This won't work for cross
73  // compilation.
74 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
75  answer |= static_cast<uint64_t>(1) << FPU;
76 #endif // defined(__mips_hard_float) && __mips_hard_float != 0
77 #endif // def __mips__
78 
79  return answer;
80 }
81 
82 
83 const char* DoubleRegister::AllocationIndexToString(int index) {
84  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
85  const char* const names[] = {
86  "f0",
87  "f2",
88  "f4",
89  "f6",
90  "f8",
91  "f10",
92  "f12",
93  "f14",
94  "f16",
95  "f18",
96  "f20",
97  "f22",
98  "f24",
99  "f26"
100  };
101  return names[index];
102 }
103 
104 
105 void CpuFeatures::Probe() {
106  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
107  CpuFeaturesImpliedByCompiler());
108  ASSERT(supported_ == 0 || supported_ == standard_features);
109 #ifdef DEBUG
110  initialized_ = true;
111 #endif
112 
113  // Get the features implied by the OS and the compiler settings. This is the
114  // minimal set of features which is also allowed for generated code in the
115  // snapshot.
116  supported_ |= standard_features;
117 
118  if (Serializer::enabled()) {
119  // No probing for features if we might serialize (generate snapshot).
120  return;
121  }
122 
123  // If the compiler is allowed to use fpu then we can use fpu too in our
124  // code generation.
125 #if !defined(__mips__)
126  // For the simulator build, use FPU.
127  supported_ |= static_cast<uint64_t>(1) << FPU;
128 #else
129  // Probe for additional features not already known to be available.
130  CPU cpu;
131  if (cpu.has_fpu()) {
132  // This implementation also sets the FPU flags if
133  // runtime detection of FPU returns true.
134  supported_ |= static_cast<uint64_t>(1) << FPU;
135  found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
136  }
137 #endif
138 }
139 
140 
141 int ToNumber(Register reg) {
142  ASSERT(reg.is_valid());
143  const int kNumbers[] = {
144  0, // zero_reg
145  1, // at
146  2, // v0
147  3, // v1
148  4, // a0
149  5, // a1
150  6, // a2
151  7, // a3
152  8, // t0
153  9, // t1
154  10, // t2
155  11, // t3
156  12, // t4
157  13, // t5
158  14, // t6
159  15, // t7
160  16, // s0
161  17, // s1
162  18, // s2
163  19, // s3
164  20, // s4
165  21, // s5
166  22, // s6
167  23, // s7
168  24, // t8
169  25, // t9
170  26, // k0
171  27, // k1
172  28, // gp
173  29, // sp
174  30, // fp
175  31, // ra
176  };
177  return kNumbers[reg.code()];
178 }
179 
180 
181 Register ToRegister(int num) {
182  ASSERT(num >= 0 && num < kNumRegisters);
183  const Register kRegisters[] = {
184  zero_reg,
185  at,
186  v0, v1,
187  a0, a1, a2, a3,
188  t0, t1, t2, t3, t4, t5, t6, t7,
189  s0, s1, s2, s3, s4, s5, s6, s7,
190  t8, t9,
191  k0, k1,
192  gp,
193  sp,
194  fp,
195  ra
196  };
197  return kRegisters[num];
198 }
199 
200 
201 // -----------------------------------------------------------------------------
202 // Implementation of RelocInfo.
203 
204 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
205  1 << RelocInfo::INTERNAL_REFERENCE;
206 
207 
208 bool RelocInfo::IsCodedSpecially() {
209  // The deserializer needs to know whether a pointer is specially coded. Being
210  // specially coded on MIPS means that it is a lui/ori instruction, and that is
211  // always the case inside code objects.
212  return true;
213 }
214 
215 
216 bool RelocInfo::IsInConstantPool() {
217  return false;
218 }
219 
220 
221 // Patch the code at the current address with the supplied instructions.
222 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
223  Instr* pc = reinterpret_cast<Instr*>(pc_);
224  Instr* instr = reinterpret_cast<Instr*>(instructions);
225  for (int i = 0; i < instruction_count; i++) {
226  *(pc + i) = *(instr + i);
227  }
228 
229  // Indicate that code has changed.
230  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
231 }
232 
233 
234 // Patch the code at the current PC with a call to the target address.
235 // Additional guard instructions can be added if required.
236 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
237  // Patch the code at the current address with a call to the target.
239 }
240 
241 
242 // -----------------------------------------------------------------------------
243 // Implementation of Operand and MemOperand.
244 // See assembler-mips-inl.h for inlined constructors.
245 
246 Operand::Operand(Handle<Object> handle) {
247  AllowDeferredHandleDereference using_raw_address;
248  rm_ = no_reg;
249  // Verify all Objects referred by code are NOT in new space.
250  Object* obj = *handle;
251  if (obj->IsHeapObject()) {
252  ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
253  imm32_ = reinterpret_cast<intptr_t>(handle.location());
254  rmode_ = RelocInfo::EMBEDDED_OBJECT;
255  } else {
256  // No relocation needed.
257  imm32_ = reinterpret_cast<intptr_t>(obj);
258  rmode_ = RelocInfo::NONE32;
259  }
260 }
261 
262 
263 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
264  offset_ = offset;
265 }
266 
267 
268 MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
269  OffsetAddend offset_addend) : Operand(rm) {
270  offset_ = unit * multiplier + offset_addend;
271 }
272 
273 
274 // -----------------------------------------------------------------------------
275 // Specific instructions, constants, and masks.
276 
277 static const int kNegOffset = 0x00008000;
278 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
279 // operations as post-increment of sp.
282 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
285 // sw(r, MemOperand(sp, 0))
287  | (0 & kImm16Mask);
288 // lw(r, MemOperand(sp, 0))
290  | (0 & kImm16Mask);
291 
293  | (0 & kImm16Mask);
294 
296  | (0 & kImm16Mask);
297 
299  | (kNegOffset & kImm16Mask);
300 
302  | (kNegOffset & kImm16Mask);
303 // A mask for the Rt register for push, pop, lw, sw instructions.
304 const Instr kRtMask = kRtFieldMask;
305 const Instr kLwSwInstrTypeMask = 0xffe00000;
308 
309 
310 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
311  : AssemblerBase(isolate, buffer, buffer_size),
312  recorded_ast_id_(TypeFeedbackId::None()),
313  positions_recorder_(this) {
314  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
315 
316  last_trampoline_pool_end_ = 0;
317  no_trampoline_pool_before_ = 0;
318  trampoline_pool_blocked_nesting_ = 0;
319  // We leave space (16 * kTrampolineSlotsSize)
320  // for BlockTrampolinePoolScope buffer.
321  next_buffer_check_ = FLAG_force_long_branches
322  ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
323  internal_trampoline_exception_ = false;
324  last_bound_pos_ = 0;
325 
326  trampoline_emitted_ = FLAG_force_long_branches;
327  unbound_labels_count_ = 0;
328  block_buffer_growth_ = false;
329 
330  ClearRecordedAstId();
331 }
332 
333 
334 void Assembler::GetCode(CodeDesc* desc) {
335  ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
336  // Set up code descriptor.
337  desc->buffer = buffer_;
338  desc->buffer_size = buffer_size_;
339  desc->instr_size = pc_offset();
340  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
341  desc->origin = this;
342 }
343 
344 
345 void Assembler::Align(int m) {
346  ASSERT(m >= 4 && IsPowerOf2(m));
347  while ((pc_offset() & (m - 1)) != 0) {
348  nop();
349  }
350 }
351 
352 
354  // No advantage to aligning branch/call targets to more than
355  // single instruction, that I am aware of.
356  Align(4);
357 }
358 
359 
360 Register Assembler::GetRtReg(Instr instr) {
361  Register rt;
362  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
363  return rt;
364 }
365 
366 
367 Register Assembler::GetRsReg(Instr instr) {
368  Register rs;
369  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
370  return rs;
371 }
372 
373 
374 Register Assembler::GetRdReg(Instr instr) {
375  Register rd;
376  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
377  return rd;
378 }
379 
380 
381 uint32_t Assembler::GetRt(Instr instr) {
382  return (instr & kRtFieldMask) >> kRtShift;
383 }
384 
385 
386 uint32_t Assembler::GetRtField(Instr instr) {
387  return instr & kRtFieldMask;
388 }
389 
390 
391 uint32_t Assembler::GetRs(Instr instr) {
392  return (instr & kRsFieldMask) >> kRsShift;
393 }
394 
395 
396 uint32_t Assembler::GetRsField(Instr instr) {
397  return instr & kRsFieldMask;
398 }
399 
400 
401 uint32_t Assembler::GetRd(Instr instr) {
402  return (instr & kRdFieldMask) >> kRdShift;
403 }
404 
405 
406 uint32_t Assembler::GetRdField(Instr instr) {
407  return instr & kRdFieldMask;
408 }
409 
410 
411 uint32_t Assembler::GetSa(Instr instr) {
412  return (instr & kSaFieldMask) >> kSaShift;
413 }
414 
415 
416 uint32_t Assembler::GetSaField(Instr instr) {
417  return instr & kSaFieldMask;
418 }
419 
420 
421 uint32_t Assembler::GetOpcodeField(Instr instr) {
422  return instr & kOpcodeMask;
423 }
424 
425 
426 uint32_t Assembler::GetFunction(Instr instr) {
427  return (instr & kFunctionFieldMask) >> kFunctionShift;
428 }
429 
430 
431 uint32_t Assembler::GetFunctionField(Instr instr) {
432  return instr & kFunctionFieldMask;
433 }
434 
435 
436 uint32_t Assembler::GetImmediate16(Instr instr) {
437  return instr & kImm16Mask;
438 }
439 
440 
441 uint32_t Assembler::GetLabelConst(Instr instr) {
442  return instr & ~kImm16Mask;
443 }
444 
445 
446 bool Assembler::IsPop(Instr instr) {
447  return (instr & ~kRtMask) == kPopRegPattern;
448 }
449 
450 
451 bool Assembler::IsPush(Instr instr) {
452  return (instr & ~kRtMask) == kPushRegPattern;
453 }
454 
455 
456 bool Assembler::IsSwRegFpOffset(Instr instr) {
457  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
458 }
459 
460 
461 bool Assembler::IsLwRegFpOffset(Instr instr) {
462  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
463 }
464 
465 
467  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
469 }
470 
471 
473  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
475 }
476 
477 
478 // Labels refer to positions in the (to be) generated code.
479 // There are bound, linked, and unused labels.
480 //
481 // Bound labels refer to known positions in the already
482 // generated code. pos() is the position the label refers to.
483 //
484 // Linked labels refer to unknown positions in the code
485 // to be generated; pos() is the position of the last
486 // instruction using the label.
487 
488 // The link chain is terminated by a value in the instruction of -1,
489 // which is an otherwise illegal value (branch -1 is inf loop).
490 // The instruction 16-bit offset field addresses 32-bit words, but in
491 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
492 
493 const int kEndOfChain = -4;
494 // Determines the end of the Jump chain (a subset of the label link chain).
495 const int kEndOfJumpChain = 0;
496 
497 
498 bool Assembler::IsBranch(Instr instr) {
499  uint32_t opcode = GetOpcodeField(instr);
500  uint32_t rt_field = GetRtField(instr);
501  uint32_t rs_field = GetRsField(instr);
502  // Checks if the instruction is a branch.
503  return opcode == BEQ ||
504  opcode == BNE ||
505  opcode == BLEZ ||
506  opcode == BGTZ ||
507  opcode == BEQL ||
508  opcode == BNEL ||
509  opcode == BLEZL ||
510  opcode == BGTZL ||
511  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
512  rt_field == BLTZAL || rt_field == BGEZAL)) ||
513  (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
514 }
515 
516 
518  uint32_t label_constant = GetLabelConst(instr);
519  return label_constant == 0; // Emitted label const in reg-exp engine.
520 }
521 
522 
523 bool Assembler::IsBeq(Instr instr) {
524  return GetOpcodeField(instr) == BEQ;
525 }
526 
527 
528 bool Assembler::IsBne(Instr instr) {
529  return GetOpcodeField(instr) == BNE;
530 }
531 
532 
533 bool Assembler::IsJump(Instr instr) {
534  uint32_t opcode = GetOpcodeField(instr);
535  uint32_t rt_field = GetRtField(instr);
536  uint32_t rd_field = GetRdField(instr);
537  uint32_t function_field = GetFunctionField(instr);
538  // Checks if the instruction is a jump.
539  return opcode == J || opcode == JAL ||
540  (opcode == SPECIAL && rt_field == 0 &&
541  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
542 }
543 
544 
545 bool Assembler::IsJ(Instr instr) {
546  uint32_t opcode = GetOpcodeField(instr);
547  // Checks if the instruction is a jump.
548  return opcode == J;
549 }
550 
551 
552 bool Assembler::IsJal(Instr instr) {
553  return GetOpcodeField(instr) == JAL;
554 }
555 
556 
557 bool Assembler::IsJr(Instr instr) {
558  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
559 }
560 
561 
562 bool Assembler::IsJalr(Instr instr) {
563  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
564 }
565 
566 
567 bool Assembler::IsLui(Instr instr) {
568  uint32_t opcode = GetOpcodeField(instr);
569  // Checks if the instruction is a load upper immediate.
570  return opcode == LUI;
571 }
572 
573 
574 bool Assembler::IsOri(Instr instr) {
575  uint32_t opcode = GetOpcodeField(instr);
576  // Checks if the instruction is a load upper immediate.
577  return opcode == ORI;
578 }
579 
580 
581 bool Assembler::IsNop(Instr instr, unsigned int type) {
582  // See Assembler::nop(type).
583  ASSERT(type < 32);
584  uint32_t opcode = GetOpcodeField(instr);
585  uint32_t function = GetFunctionField(instr);
586  uint32_t rt = GetRt(instr);
587  uint32_t rd = GetRd(instr);
588  uint32_t sa = GetSa(instr);
589 
590  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
591  // When marking non-zero type, use sll(zero_reg, at, type)
592  // to avoid use of mips ssnop and ehb special encodings
593  // of the sll instruction.
594 
595  Register nop_rt_reg = (type == 0) ? zero_reg : at;
596  bool ret = (opcode == SPECIAL && function == SLL &&
597  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
598  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
599  sa == type);
600 
601  return ret;
602 }
603 
604 
606  ASSERT(IsBranch(instr));
607  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
608 }
609 
610 
611 bool Assembler::IsLw(Instr instr) {
612  return ((instr & kOpcodeMask) == LW);
613 }
614 
615 
617  ASSERT(IsLw(instr));
618  return ((instr & kImm16Mask));
619 }
620 
621 
622 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
623  ASSERT(IsLw(instr));
624 
625  // We actually create a new lw instruction based on the original one.
626  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
627  | (offset & kImm16Mask);
628 
629  return temp_instr;
630 }
631 
632 
633 bool Assembler::IsSw(Instr instr) {
634  return ((instr & kOpcodeMask) == SW);
635 }
636 
637 
638 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
639  ASSERT(IsSw(instr));
640  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
641 }
642 
643 
644 bool Assembler::IsAddImmediate(Instr instr) {
645  return ((instr & kOpcodeMask) == ADDIU);
646 }
647 
648 
650  ASSERT(IsAddImmediate(instr));
651  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
652 }
653 
654 
655 bool Assembler::IsAndImmediate(Instr instr) {
656  return GetOpcodeField(instr) == ANDI;
657 }
658 
659 
660 int Assembler::target_at(int32_t pos) {
661  Instr instr = instr_at(pos);
662  if ((instr & ~kImm16Mask) == 0) {
663  // Emitted label constant, not part of a branch.
664  if (instr == 0) {
665  return kEndOfChain;
666  } else {
667  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
668  return (imm18 + pos);
669  }
670  }
671  // Check we have a branch or jump instruction.
672  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
673  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
674  // the compiler uses arithmectic shifts for signed integers.
675  if (IsBranch(instr)) {
676  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
677 
678  if (imm18 == kEndOfChain) {
679  // EndOfChain sentinel is returned directly, not relative to pc or pos.
680  return kEndOfChain;
681  } else {
682  return pos + kBranchPCOffset + imm18;
683  }
684  } else if (IsLui(instr)) {
685  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
686  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
687  ASSERT(IsOri(instr_ori));
688  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
689  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
690 
691  if (imm == kEndOfJumpChain) {
692  // EndOfChain sentinel is returned directly, not relative to pc or pos.
693  return kEndOfChain;
694  } else {
695  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
696  int32_t delta = instr_address - imm;
697  ASSERT(pos > delta);
698  return pos - delta;
699  }
700  } else {
701  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
702  if (imm28 == kEndOfJumpChain) {
703  // EndOfChain sentinel is returned directly, not relative to pc or pos.
704  return kEndOfChain;
705  } else {
706  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
707  instr_address &= kImm28Mask;
708  int32_t delta = instr_address - imm28;
709  ASSERT(pos > delta);
710  return pos - delta;
711  }
712  }
713 }
714 
715 
716 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
717  Instr instr = instr_at(pos);
718  if ((instr & ~kImm16Mask) == 0) {
719  ASSERT(target_pos == kEndOfChain || target_pos >= 0);
720  // Emitted label constant, not part of a branch.
721  // Make label relative to Code* of generated Code object.
722  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
723  return;
724  }
725 
726  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
727  if (IsBranch(instr)) {
728  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
729  ASSERT((imm18 & 3) == 0);
730 
731  instr &= ~kImm16Mask;
732  int32_t imm16 = imm18 >> 2;
733  ASSERT(is_int16(imm16));
734 
735  instr_at_put(pos, instr | (imm16 & kImm16Mask));
736  } else if (IsLui(instr)) {
737  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
738  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
739  ASSERT(IsOri(instr_ori));
740  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
741  ASSERT((imm & 3) == 0);
742 
743  instr_lui &= ~kImm16Mask;
744  instr_ori &= ~kImm16Mask;
745 
747  instr_lui | ((imm & kHiMask) >> kLuiShift));
749  instr_ori | (imm & kImm16Mask));
750  } else {
751  uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
752  imm28 &= kImm28Mask;
753  ASSERT((imm28 & 3) == 0);
754 
755  instr &= ~kImm26Mask;
756  uint32_t imm26 = imm28 >> 2;
757  ASSERT(is_uint26(imm26));
758 
759  instr_at_put(pos, instr | (imm26 & kImm26Mask));
760  }
761 }
762 
763 
764 void Assembler::print(Label* L) {
765  if (L->is_unused()) {
766  PrintF("unused label\n");
767  } else if (L->is_bound()) {
768  PrintF("bound label to %d\n", L->pos());
769  } else if (L->is_linked()) {
770  Label l = *L;
771  PrintF("unbound label");
772  while (l.is_linked()) {
773  PrintF("@ %d ", l.pos());
774  Instr instr = instr_at(l.pos());
775  if ((instr & ~kImm16Mask) == 0) {
776  PrintF("value\n");
777  } else {
778  PrintF("%d\n", instr);
779  }
780  next(&l);
781  }
782  } else {
783  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
784  }
785 }
786 
787 
788 void Assembler::bind_to(Label* L, int pos) {
789  ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
790  int32_t trampoline_pos = kInvalidSlotPos;
791  if (L->is_linked() && !trampoline_emitted_) {
792  unbound_labels_count_--;
793  next_buffer_check_ += kTrampolineSlotsSize;
794  }
795 
796  while (L->is_linked()) {
797  int32_t fixup_pos = L->pos();
798  int32_t dist = pos - fixup_pos;
799  next(L); // Call next before overwriting link with target at fixup_pos.
800  Instr instr = instr_at(fixup_pos);
801  if (IsBranch(instr)) {
802  if (dist > kMaxBranchOffset) {
803  if (trampoline_pos == kInvalidSlotPos) {
804  trampoline_pos = get_trampoline_entry(fixup_pos);
805  CHECK(trampoline_pos != kInvalidSlotPos);
806  }
807  ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
808  target_at_put(fixup_pos, trampoline_pos);
809  fixup_pos = trampoline_pos;
810  dist = pos - fixup_pos;
811  }
812  target_at_put(fixup_pos, pos);
813  } else {
814  ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
815  target_at_put(fixup_pos, pos);
816  }
817  }
818  L->bind_to(pos);
819 
820  // Keep track of the last bound label so we don't eliminate any instructions
821  // before a bound label.
822  if (pos > last_bound_pos_)
823  last_bound_pos_ = pos;
824 }
825 
826 
827 void Assembler::bind(Label* L) {
828  ASSERT(!L->is_bound()); // Label can only be bound once.
829  bind_to(L, pc_offset());
830 }
831 
832 
833 void Assembler::next(Label* L) {
834  ASSERT(L->is_linked());
835  int link = target_at(L->pos());
836  if (link == kEndOfChain) {
837  L->Unuse();
838  } else {
839  ASSERT(link >= 0);
840  L->link_to(link);
841  }
842 }
843 
844 
845 bool Assembler::is_near(Label* L) {
846  if (L->is_bound()) {
847  return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
848  }
849  return false;
850 }
851 
852 
853 // We have to use a temporary register for things that can be relocated even
854 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
855 // space. There is no guarantee that the relocated location can be similarly
856 // encoded.
857 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
858  return !RelocInfo::IsNone(rmode);
859 }
860 
861 void Assembler::GenInstrRegister(Opcode opcode,
862  Register rs,
863  Register rt,
864  Register rd,
865  uint16_t sa,
866  SecondaryField func) {
867  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
868  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
869  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
870  emit(instr);
871 }
872 
873 
874 void Assembler::GenInstrRegister(Opcode opcode,
875  Register rs,
876  Register rt,
877  uint16_t msb,
878  uint16_t lsb,
879  SecondaryField func) {
880  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
881  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
882  | (msb << kRdShift) | (lsb << kSaShift) | func;
883  emit(instr);
884 }
885 
886 
887 void Assembler::GenInstrRegister(Opcode opcode,
888  SecondaryField fmt,
889  FPURegister ft,
890  FPURegister fs,
891  FPURegister fd,
892  SecondaryField func) {
893  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
894  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
895  | (fd.code() << kFdShift) | func;
896  emit(instr);
897 }
898 
899 
900 void Assembler::GenInstrRegister(Opcode opcode,
901  FPURegister fr,
902  FPURegister ft,
903  FPURegister fs,
904  FPURegister fd,
905  SecondaryField func) {
906  ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
907  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
908  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
909  emit(instr);
910 }
911 
912 
913 void Assembler::GenInstrRegister(Opcode opcode,
914  SecondaryField fmt,
915  Register rt,
916  FPURegister fs,
917  FPURegister fd,
918  SecondaryField func) {
919  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
920  Instr instr = opcode | fmt | (rt.code() << kRtShift)
921  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
922  emit(instr);
923 }
924 
925 
926 void Assembler::GenInstrRegister(Opcode opcode,
927  SecondaryField fmt,
928  Register rt,
929  FPUControlRegister fs,
930  SecondaryField func) {
931  ASSERT(fs.is_valid() && rt.is_valid());
932  Instr instr =
933  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
934  emit(instr);
935 }
936 
937 
938 // Instructions with immediate value.
939 // Registers are in the order of the instruction encoding, from left to right.
940 void Assembler::GenInstrImmediate(Opcode opcode,
941  Register rs,
942  Register rt,
943  int32_t j) {
944  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
945  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
946  | (j & kImm16Mask);
947  emit(instr);
948 }
949 
950 
951 void Assembler::GenInstrImmediate(Opcode opcode,
952  Register rs,
953  SecondaryField SF,
954  int32_t j) {
955  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
956  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
957  emit(instr);
958 }
959 
960 
961 void Assembler::GenInstrImmediate(Opcode opcode,
962  Register rs,
963  FPURegister ft,
964  int32_t j) {
965  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
966  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
967  | (j & kImm16Mask);
968  emit(instr);
969 }
970 
971 
972 void Assembler::GenInstrJump(Opcode opcode,
973  uint32_t address) {
974  BlockTrampolinePoolScope block_trampoline_pool(this);
975  ASSERT(is_uint26(address));
976  Instr instr = opcode | address;
977  emit(instr);
978  BlockTrampolinePoolFor(1); // For associated delay slot.
979 }
980 
981 
982 // Returns the next free trampoline entry.
983 int32_t Assembler::get_trampoline_entry(int32_t pos) {
984  int32_t trampoline_entry = kInvalidSlotPos;
985 
986  if (!internal_trampoline_exception_) {
987  if (trampoline_.start() > pos) {
988  trampoline_entry = trampoline_.take_slot();
989  }
990 
991  if (kInvalidSlotPos == trampoline_entry) {
992  internal_trampoline_exception_ = true;
993  }
994  }
995  return trampoline_entry;
996 }
997 
998 
999 uint32_t Assembler::jump_address(Label* L) {
1000  int32_t target_pos;
1001 
1002  if (L->is_bound()) {
1003  target_pos = L->pos();
1004  } else {
1005  if (L->is_linked()) {
1006  target_pos = L->pos(); // L's link.
1007  L->link_to(pc_offset());
1008  } else {
1009  L->link_to(pc_offset());
1010  return kEndOfJumpChain;
1011  }
1012  }
1013 
1014  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1015  ASSERT((imm & 3) == 0);
1016 
1017  return imm;
1018 }
1019 
1020 
1021 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1022  int32_t target_pos;
1023 
1024  if (L->is_bound()) {
1025  target_pos = L->pos();
1026  } else {
1027  if (L->is_linked()) {
1028  target_pos = L->pos();
1029  L->link_to(pc_offset());
1030  } else {
1031  L->link_to(pc_offset());
1032  if (!trampoline_emitted_) {
1033  unbound_labels_count_++;
1034  next_buffer_check_ -= kTrampolineSlotsSize;
1035  }
1036  return kEndOfChain;
1037  }
1038  }
1039 
1040  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1041  ASSERT((offset & 3) == 0);
1042  ASSERT(is_int16(offset >> 2));
1043 
1044  return offset;
1045 }
1046 
1047 
1048 void Assembler::label_at_put(Label* L, int at_offset) {
1049  int target_pos;
1050  if (L->is_bound()) {
1051  target_pos = L->pos();
1052  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1053  } else {
1054  if (L->is_linked()) {
1055  target_pos = L->pos(); // L's link.
1056  int32_t imm18 = target_pos - at_offset;
1057  ASSERT((imm18 & 3) == 0);
1058  int32_t imm16 = imm18 >> 2;
1059  ASSERT(is_int16(imm16));
1060  instr_at_put(at_offset, (imm16 & kImm16Mask));
1061  } else {
1062  target_pos = kEndOfChain;
1063  instr_at_put(at_offset, 0);
1064  if (!trampoline_emitted_) {
1065  unbound_labels_count_++;
1066  next_buffer_check_ -= kTrampolineSlotsSize;
1067  }
1068  }
1069  L->link_to(at_offset);
1070  }
1071 }
1072 
1073 
1074 //------- Branch and jump instructions --------
1075 
1076 void Assembler::b(int16_t offset) {
1077  beq(zero_reg, zero_reg, offset);
1078 }
1079 
1080 
1081 void Assembler::bal(int16_t offset) {
1082  positions_recorder()->WriteRecordedPositions();
1083  bgezal(zero_reg, offset);
1084 }
1085 
1086 
1087 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1088  BlockTrampolinePoolScope block_trampoline_pool(this);
1089  GenInstrImmediate(BEQ, rs, rt, offset);
1090  BlockTrampolinePoolFor(1); // For associated delay slot.
1091 }
1092 
1093 
1094 void Assembler::bgez(Register rs, int16_t offset) {
1095  BlockTrampolinePoolScope block_trampoline_pool(this);
1096  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1097  BlockTrampolinePoolFor(1); // For associated delay slot.
1098 }
1099 
1100 
1101 void Assembler::bgezal(Register rs, int16_t offset) {
1102  BlockTrampolinePoolScope block_trampoline_pool(this);
1103  positions_recorder()->WriteRecordedPositions();
1104  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1105  BlockTrampolinePoolFor(1); // For associated delay slot.
1106 }
1107 
1108 
1109 void Assembler::bgtz(Register rs, int16_t offset) {
1110  BlockTrampolinePoolScope block_trampoline_pool(this);
1111  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1112  BlockTrampolinePoolFor(1); // For associated delay slot.
1113 }
1114 
1115 
1116 void Assembler::blez(Register rs, int16_t offset) {
1117  BlockTrampolinePoolScope block_trampoline_pool(this);
1118  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1119  BlockTrampolinePoolFor(1); // For associated delay slot.
1120 }
1121 
1122 
1123 void Assembler::bltz(Register rs, int16_t offset) {
1124  BlockTrampolinePoolScope block_trampoline_pool(this);
1125  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1126  BlockTrampolinePoolFor(1); // For associated delay slot.
1127 }
1128 
1129 
1130 void Assembler::bltzal(Register rs, int16_t offset) {
1131  BlockTrampolinePoolScope block_trampoline_pool(this);
1132  positions_recorder()->WriteRecordedPositions();
1133  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1134  BlockTrampolinePoolFor(1); // For associated delay slot.
1135 }
1136 
1137 
1138 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1139  BlockTrampolinePoolScope block_trampoline_pool(this);
1140  GenInstrImmediate(BNE, rs, rt, offset);
1141  BlockTrampolinePoolFor(1); // For associated delay slot.
1142 }
1143 
1144 
1145 void Assembler::j(int32_t target) {
1146 #if DEBUG
1147  // Get pc of delay slot.
1148  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1149  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1150  (kImm26Bits + kImmFieldShift)) == 0;
1151  ASSERT(in_range && ((target & 3) == 0));
1152 #endif
1153  GenInstrJump(J, target >> 2);
1154 }
1155 
1156 
1157 void Assembler::jr(Register rs) {
1158  BlockTrampolinePoolScope block_trampoline_pool(this);
1159  if (rs.is(ra)) {
1160  positions_recorder()->WriteRecordedPositions();
1161  }
1162  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1163  BlockTrampolinePoolFor(1); // For associated delay slot.
1164 }
1165 
1166 
1167 void Assembler::jal(int32_t target) {
1168 #ifdef DEBUG
1169  // Get pc of delay slot.
1170  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1171  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1172  (kImm26Bits + kImmFieldShift)) == 0;
1173  ASSERT(in_range && ((target & 3) == 0));
1174 #endif
1175  positions_recorder()->WriteRecordedPositions();
1176  GenInstrJump(JAL, target >> 2);
1177 }
1178 
1179 
1180 void Assembler::jalr(Register rs, Register rd) {
1181  BlockTrampolinePoolScope block_trampoline_pool(this);
1182  positions_recorder()->WriteRecordedPositions();
1183  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1184  BlockTrampolinePoolFor(1); // For associated delay slot.
1185 }
1186 
1187 
1188 void Assembler::j_or_jr(int32_t target, Register rs) {
1189  // Get pc of delay slot.
1190  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1191  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1192  (kImm26Bits + kImmFieldShift)) == 0;
1193  if (in_range) {
1194  j(target);
1195  } else {
1196  jr(t9);
1197  }
1198 }
1199 
1200 
1201 void Assembler::jal_or_jalr(int32_t target, Register rs) {
1202  // Get pc of delay slot.
1203  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1204  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1205  (kImm26Bits+kImmFieldShift)) == 0;
1206  if (in_range) {
1207  jal(target);
1208  } else {
1209  jalr(t9);
1210  }
1211 }
1212 
1213 
1214 //-------Data-processing-instructions---------
1215 
1216 // Arithmetic.
1217 
1218 void Assembler::addu(Register rd, Register rs, Register rt) {
1219  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1220 }
1221 
1222 
1223 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1224  GenInstrImmediate(ADDIU, rs, rd, j);
1225 }
1226 
1227 
1228 void Assembler::subu(Register rd, Register rs, Register rt) {
1229  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1230 }
1231 
1232 
1233 void Assembler::mul(Register rd, Register rs, Register rt) {
1234  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1235 }
1236 
1237 
1238 void Assembler::mult(Register rs, Register rt) {
1239  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1240 }
1241 
1242 
1243 void Assembler::multu(Register rs, Register rt) {
1244  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1245 }
1246 
1247 
1248 void Assembler::div(Register rs, Register rt) {
1249  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1250 }
1251 
1252 
1253 void Assembler::divu(Register rs, Register rt) {
1254  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1255 }
1256 
1257 
1258 // Logical.
1259 
1260 void Assembler::and_(Register rd, Register rs, Register rt) {
1261  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1262 }
1263 
1264 
1265 void Assembler::andi(Register rt, Register rs, int32_t j) {
1266  ASSERT(is_uint16(j));
1267  GenInstrImmediate(ANDI, rs, rt, j);
1268 }
1269 
1270 
1271 void Assembler::or_(Register rd, Register rs, Register rt) {
1272  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1273 }
1274 
1275 
1276 void Assembler::ori(Register rt, Register rs, int32_t j) {
1277  ASSERT(is_uint16(j));
1278  GenInstrImmediate(ORI, rs, rt, j);
1279 }
1280 
1281 
1282 void Assembler::xor_(Register rd, Register rs, Register rt) {
1283  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1284 }
1285 
1286 
1287 void Assembler::xori(Register rt, Register rs, int32_t j) {
1288  ASSERT(is_uint16(j));
1289  GenInstrImmediate(XORI, rs, rt, j);
1290 }
1291 
1292 
1293 void Assembler::nor(Register rd, Register rs, Register rt) {
1294  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1295 }
1296 
1297 
1298 // Shifts.
1299 void Assembler::sll(Register rd,
1300  Register rt,
1301  uint16_t sa,
1302  bool coming_from_nop) {
1303  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1304  // generated using the sll instruction. They must be generated using
1305  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1306  // instructions.
1307  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1308  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1309 }
1310 
1311 
1312 void Assembler::sllv(Register rd, Register rt, Register rs) {
1313  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1314 }
1315 
1316 
1317 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1318  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1319 }
1320 
1321 
1322 void Assembler::srlv(Register rd, Register rt, Register rs) {
1323  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1324 }
1325 
1326 
1327 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1328  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1329 }
1330 
1331 
1332 void Assembler::srav(Register rd, Register rt, Register rs) {
1333  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1334 }
1335 
1336 
1337 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1338  // Should be called via MacroAssembler::Ror.
1339  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1340  ASSERT(kArchVariant == kMips32r2);
1341  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1342  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1343  emit(instr);
1344 }
1345 
1346 
1347 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1348  // Should be called via MacroAssembler::Ror.
1349  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1350  ASSERT(kArchVariant == kMips32r2);
1351  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1352  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1353  emit(instr);
1354 }
1355 
1356 
1357 //------------Memory-instructions-------------
1358 
1359 // Helper for base-reg + offset, when offset is larger than int16.
1360 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1361  ASSERT(!src.rm().is(at));
1362  lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1363  ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1364  addu(at, at, src.rm()); // Add base register.
1365 }
1366 
1367 
1368 void Assembler::lb(Register rd, const MemOperand& rs) {
1369  if (is_int16(rs.offset_)) {
1370  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1371  } else { // Offset > 16 bits, use multiple instructions to load.
1372  LoadRegPlusOffsetToAt(rs);
1373  GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1374  }
1375 }
1376 
1377 
1378 void Assembler::lbu(Register rd, const MemOperand& rs) {
1379  if (is_int16(rs.offset_)) {
1380  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1381  } else { // Offset > 16 bits, use multiple instructions to load.
1382  LoadRegPlusOffsetToAt(rs);
1383  GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1384  }
1385 }
1386 
1387 
1388 void Assembler::lh(Register rd, const MemOperand& rs) {
1389  if (is_int16(rs.offset_)) {
1390  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1391  } else { // Offset > 16 bits, use multiple instructions to load.
1392  LoadRegPlusOffsetToAt(rs);
1393  GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1394  }
1395 }
1396 
1397 
1398 void Assembler::lhu(Register rd, const MemOperand& rs) {
1399  if (is_int16(rs.offset_)) {
1400  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1401  } else { // Offset > 16 bits, use multiple instructions to load.
1402  LoadRegPlusOffsetToAt(rs);
1403  GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1404  }
1405 }
1406 
1407 
1408 void Assembler::lw(Register rd, const MemOperand& rs) {
1409  if (is_int16(rs.offset_)) {
1410  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1411  } else { // Offset > 16 bits, use multiple instructions to load.
1412  LoadRegPlusOffsetToAt(rs);
1413  GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1414  }
1415 }
1416 
1417 
1418 void Assembler::lwl(Register rd, const MemOperand& rs) {
1419  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1420 }
1421 
1422 
1423 void Assembler::lwr(Register rd, const MemOperand& rs) {
1424  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1425 }
1426 
1427 
1428 void Assembler::sb(Register rd, const MemOperand& rs) {
1429  if (is_int16(rs.offset_)) {
1430  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1431  } else { // Offset > 16 bits, use multiple instructions to store.
1432  LoadRegPlusOffsetToAt(rs);
1433  GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1434  }
1435 }
1436 
1437 
1438 void Assembler::sh(Register rd, const MemOperand& rs) {
1439  if (is_int16(rs.offset_)) {
1440  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1441  } else { // Offset > 16 bits, use multiple instructions to store.
1442  LoadRegPlusOffsetToAt(rs);
1443  GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1444  }
1445 }
1446 
1447 
1448 void Assembler::sw(Register rd, const MemOperand& rs) {
1449  if (is_int16(rs.offset_)) {
1450  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1451  } else { // Offset > 16 bits, use multiple instructions to store.
1452  LoadRegPlusOffsetToAt(rs);
1453  GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1454  }
1455 }
1456 
1457 
1458 void Assembler::swl(Register rd, const MemOperand& rs) {
1459  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1460 }
1461 
1462 
1463 void Assembler::swr(Register rd, const MemOperand& rs) {
1464  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1465 }
1466 
1467 
1468 void Assembler::lui(Register rd, int32_t j) {
1469  ASSERT(is_uint16(j));
1470  GenInstrImmediate(LUI, zero_reg, rd, j);
1471 }
1472 
1473 
1474 //-------------Misc-instructions--------------
1475 
1476 // Break / Trap instructions.
1477 void Assembler::break_(uint32_t code, bool break_as_stop) {
1478  ASSERT((code & ~0xfffff) == 0);
1479  // We need to invalidate breaks that could be stops as well because the
1480  // simulator expects a char pointer after the stop instruction.
1481  // See constants-mips.h for explanation.
1482  ASSERT((break_as_stop &&
1483  code <= kMaxStopCode &&
1484  code > kMaxWatchpointCode) ||
1485  (!break_as_stop &&
1486  (code > kMaxStopCode ||
1487  code <= kMaxWatchpointCode)));
1488  Instr break_instr = SPECIAL | BREAK | (code << 6);
1489  emit(break_instr);
1490 }
1491 
1492 
1493 void Assembler::stop(const char* msg, uint32_t code) {
1494  ASSERT(code > kMaxWatchpointCode);
1495  ASSERT(code <= kMaxStopCode);
1496 #if V8_HOST_ARCH_MIPS
1497  break_(0x54321);
1498 #else // V8_HOST_ARCH_MIPS
1500  // The Simulator will handle the stop instruction and get the message address.
1501  // On MIPS stop() is just a special kind of break_().
1502  break_(code, true);
1503  emit(reinterpret_cast<Instr>(msg));
1504 #endif
1505 }
1506 
1507 
1508 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1509  ASSERT(is_uint10(code));
1510  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1511  | rt.code() << kRtShift | code << 6;
1512  emit(instr);
1513 }
1514 
1515 
1516 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1517  ASSERT(is_uint10(code));
1518  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1519  | rt.code() << kRtShift | code << 6;
1520  emit(instr);
1521 }
1522 
1523 
1524 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1525  ASSERT(is_uint10(code));
1526  Instr instr =
1527  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1528  emit(instr);
1529 }
1530 
1531 
1532 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1533  ASSERT(is_uint10(code));
1534  Instr instr =
1535  SPECIAL | TLTU | rs.code() << kRsShift
1536  | rt.code() << kRtShift | code << 6;
1537  emit(instr);
1538 }
1539 
1540 
1541 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1542  ASSERT(is_uint10(code));
1543  Instr instr =
1544  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1545  emit(instr);
1546 }
1547 
1548 
1549 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1550  ASSERT(is_uint10(code));
1551  Instr instr =
1552  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1553  emit(instr);
1554 }
1555 
1556 
1557 // Move from HI/LO register.
1558 
1559 void Assembler::mfhi(Register rd) {
1560  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1561 }
1562 
1563 
1564 void Assembler::mflo(Register rd) {
1565  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1566 }
1567 
1568 
1569 // Set on less than instructions.
1570 void Assembler::slt(Register rd, Register rs, Register rt) {
1571  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1572 }
1573 
1574 
1575 void Assembler::sltu(Register rd, Register rs, Register rt) {
1576  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1577 }
1578 
1579 
1580 void Assembler::slti(Register rt, Register rs, int32_t j) {
1581  GenInstrImmediate(SLTI, rs, rt, j);
1582 }
1583 
1584 
1585 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1586  GenInstrImmediate(SLTIU, rs, rt, j);
1587 }
1588 
1589 
1590 // Conditional move.
1591 void Assembler::movz(Register rd, Register rs, Register rt) {
1592  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1593 }
1594 
1595 
1596 void Assembler::movn(Register rd, Register rs, Register rt) {
1597  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1598 }
1599 
1600 
1601 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1602  Register rt;
1603  rt.code_ = (cc & 0x0007) << 2 | 1;
1604  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1605 }
1606 
1607 
1608 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1609  Register rt;
1610  rt.code_ = (cc & 0x0007) << 2 | 0;
1611  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1612 }
1613 
1614 
1615 // Bit twiddling.
1616 void Assembler::clz(Register rd, Register rs) {
1617  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1618  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1619 }
1620 
1621 
1622 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1623  // Should be called via MacroAssembler::Ins.
1624  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1625  ASSERT(kArchVariant == kMips32r2);
1626  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1627 }
1628 
1629 
1630 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1631  // Should be called via MacroAssembler::Ext.
1632  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1633  ASSERT(kArchVariant == kMips32r2);
1634  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1635 }
1636 
1637 
1638 void Assembler::pref(int32_t hint, const MemOperand& rs) {
1639  ASSERT(kArchVariant != kLoongson);
1640  ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
1641  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1642  | (rs.offset_);
1643  emit(instr);
1644 }
1645 
1646 
1647 //--------Coprocessor-instructions----------------
1648 
1649 // Load, store, move.
1650 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1651  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1652 }
1653 
1654 
1655 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1656  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1657  // load to two 32-bit loads.
1658  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1659  FPURegister nextfpreg;
1660  nextfpreg.setcode(fd.code() + 1);
1661  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1662 }
1663 
1664 
1665 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1666  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1667 }
1668 
1669 
1670 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1671  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1672  // store to two 32-bit stores.
1673  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1674  FPURegister nextfpreg;
1675  nextfpreg.setcode(fd.code() + 1);
1676  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1677 }
1678 
1679 
1680 void Assembler::mtc1(Register rt, FPURegister fs) {
1681  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1682 }
1683 
1684 
1685 void Assembler::mfc1(Register rt, FPURegister fs) {
1686  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1687 }
1688 
1689 
1690 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1691  GenInstrRegister(COP1, CTC1, rt, fs);
1692 }
1693 
1694 
1695 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1696  GenInstrRegister(COP1, CFC1, rt, fs);
1697 }
1698 
1699 
1700 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1701  uint64_t i;
1702  OS::MemCopy(&i, &d, 8);
1703 
1704  *lo = i & 0xffffffff;
1705  *hi = i >> 32;
1706 }
1707 
1708 
1709 // Arithmetic.
1710 
1711 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1712  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1713 }
1714 
1715 
1716 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1717  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1718 }
1719 
1720 
1721 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1722  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1723 }
1724 
1725 
1726 void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1727  FPURegister ft) {
1728  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
1729 }
1730 
1731 
1732 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1733  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1734 }
1735 
1736 
1737 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1738  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1739 }
1740 
1741 
1742 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1743  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1744 }
1745 
1746 
1747 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1748  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1749 }
1750 
1751 
1752 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1753  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1754 }
1755 
1756 
1757 // Conversions.
1758 
1759 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1760  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1761 }
1762 
1763 
1764 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1765  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1766 }
1767 
1768 
1769 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1770  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1771 }
1772 
1773 
1774 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1775  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1776 }
1777 
1778 
1779 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1780  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1781 }
1782 
1783 
1784 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1785  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1786 }
1787 
1788 
1789 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1790  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1791 }
1792 
1793 
1794 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1795  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1796 }
1797 
1798 
1799 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1800  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1801 }
1802 
1803 
1804 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1805  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1806 }
1807 
1808 
1809 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1810  ASSERT(kArchVariant == kMips32r2);
1811  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1812 }
1813 
1814 
1815 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1816  ASSERT(kArchVariant == kMips32r2);
1817  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1818 }
1819 
1820 
1821 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1822  ASSERT(kArchVariant == kMips32r2);
1823  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1824 }
1825 
1826 
1827 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1828  ASSERT(kArchVariant == kMips32r2);
1829  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1830 }
1831 
1832 
1833 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1834  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1835 }
1836 
1837 
1838 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1839  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1840 }
1841 
1842 
1843 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1844  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1845 }
1846 
1847 
1848 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1849  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1850 }
1851 
1852 
1853 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1854  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1855 }
1856 
1857 
1858 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1859  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1860 }
1861 
1862 
1863 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1864  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1865 }
1866 
1867 
1868 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1869  ASSERT(kArchVariant == kMips32r2);
1870  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1871 }
1872 
1873 
1874 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1875  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1876 }
1877 
1878 
1879 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1880  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1881 }
1882 
1883 
1884 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1885  ASSERT(kArchVariant == kMips32r2);
1886  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1887 }
1888 
1889 
1890 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1891  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1892 }
1893 
1894 
1895 // Conditions.
1897  FPURegister fs, FPURegister ft, uint16_t cc) {
1898  ASSERT(is_uint3(cc));
1899  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1900  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1901  | cc << 8 | 3 << 4 | cond;
1902  emit(instr);
1903 }
1904 
1905 
1906 void Assembler::fcmp(FPURegister src1, const double src2,
1907  FPUCondition cond) {
1908  ASSERT(src2 == 0.0);
1909  mtc1(zero_reg, f14);
1910  cvt_d_w(f14, f14);
1911  c(cond, D, src1, f14, 0);
1912 }
1913 
1914 
1915 void Assembler::bc1f(int16_t offset, uint16_t cc) {
1916  ASSERT(is_uint3(cc));
1917  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1918  emit(instr);
1919 }
1920 
1921 
1922 void Assembler::bc1t(int16_t offset, uint16_t cc) {
1923  ASSERT(is_uint3(cc));
1924  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1925  emit(instr);
1926 }
1927 
1928 
1929 // Debugging.
1931  positions_recorder()->WriteRecordedPositions();
1932  CheckBuffer();
1933  RecordRelocInfo(RelocInfo::JS_RETURN);
1934 }
1935 
1936 
1938  positions_recorder()->WriteRecordedPositions();
1939  CheckBuffer();
1940  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1941 }
1942 
1943 
1944 void Assembler::RecordComment(const char* msg) {
1945  if (FLAG_code_comments) {
1946  CheckBuffer();
1947  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1948  }
1949 }
1950 
1951 
1952 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1953  Instr instr = instr_at(pc);
1954  ASSERT(IsJ(instr) || IsLui(instr));
1955  if (IsLui(instr)) {
1956  Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1957  Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1958  ASSERT(IsOri(instr_ori));
1959  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1960  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1961  if (imm == kEndOfJumpChain) {
1962  return 0; // Number of instructions patched.
1963  }
1964  imm += pc_delta;
1965  ASSERT((imm & 3) == 0);
1966 
1967  instr_lui &= ~kImm16Mask;
1968  instr_ori &= ~kImm16Mask;
1969 
1971  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1973  instr_ori | (imm & kImm16Mask));
1974  return 2; // Number of instructions patched.
1975  } else {
1976  uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1977  if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1978  return 0; // Number of instructions patched.
1979  }
1980  imm28 += pc_delta;
1981  imm28 &= kImm28Mask;
1982  ASSERT((imm28 & 3) == 0);
1983 
1984  instr &= ~kImm26Mask;
1985  uint32_t imm26 = imm28 >> 2;
1986  ASSERT(is_uint26(imm26));
1987 
1988  instr_at_put(pc, instr | (imm26 & kImm26Mask));
1989  return 1; // Number of instructions patched.
1990  }
1991 }
1992 
1993 
1994 void Assembler::GrowBuffer() {
1995  if (!own_buffer_) FATAL("external code buffer is too small");
1996 
1997  // Compute new buffer size.
1998  CodeDesc desc; // The new buffer.
1999  if (buffer_size_ < 4*KB) {
2000  desc.buffer_size = 4*KB;
2001  } else if (buffer_size_ < 1*MB) {
2002  desc.buffer_size = 2*buffer_size_;
2003  } else {
2004  desc.buffer_size = buffer_size_ + 1*MB;
2005  }
2006  CHECK_GT(desc.buffer_size, 0); // No overflow.
2007 
2008  // Set up new buffer.
2009  desc.buffer = NewArray<byte>(desc.buffer_size);
2010 
2011  desc.instr_size = pc_offset();
2012  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2013 
2014  // Copy the data.
2015  int pc_delta = desc.buffer - buffer_;
2016  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2017  OS::MemMove(desc.buffer, buffer_, desc.instr_size);
2018  OS::MemMove(reloc_info_writer.pos() + rc_delta,
2019  reloc_info_writer.pos(), desc.reloc_size);
2020 
2021  // Switch buffers.
2023  buffer_ = desc.buffer;
2024  buffer_size_ = desc.buffer_size;
2025  pc_ += pc_delta;
2026  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2027  reloc_info_writer.last_pc() + pc_delta);
2028 
2029  // Relocate runtime entries.
2030  for (RelocIterator it(desc); !it.done(); it.next()) {
2031  RelocInfo::Mode rmode = it.rinfo()->rmode();
2032  if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2033  byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2034  RelocateInternalReference(p, pc_delta);
2035  }
2036  }
2037 
2038  ASSERT(!overflow());
2039 }
2040 
2041 
2042 void Assembler::db(uint8_t data) {
2043  CheckBuffer();
2044  *reinterpret_cast<uint8_t*>(pc_) = data;
2045  pc_ += sizeof(uint8_t);
2046 }
2047 
2048 
2049 void Assembler::dd(uint32_t data) {
2050  CheckBuffer();
2051  *reinterpret_cast<uint32_t*>(pc_) = data;
2052  pc_ += sizeof(uint32_t);
2053 }
2054 
2055 
2056 void Assembler::emit_code_stub_address(Code* stub) {
2057  CheckBuffer();
2058  *reinterpret_cast<uint32_t*>(pc_) =
2059  reinterpret_cast<uint32_t>(stub->instruction_start());
2060  pc_ += sizeof(uint32_t);
2061 }
2062 
2063 
2064 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2065  // We do not try to reuse pool constants.
2066  RelocInfo rinfo(pc_, rmode, data, NULL);
2067  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2068  // Adjust code for new modes.
2069  ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2070  || RelocInfo::IsJSReturn(rmode)
2071  || RelocInfo::IsComment(rmode)
2072  || RelocInfo::IsPosition(rmode));
2073  // These modes do not need an entry in the constant pool.
2074  }
2075  if (!RelocInfo::IsNone(rinfo.rmode())) {
2076  // Don't record external references unless the heap will be serialized.
2077  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2078 #ifdef DEBUG
2079  if (!Serializer::enabled()) {
2081  }
2082 #endif
2083  if (!Serializer::enabled() && !emit_debug_code()) {
2084  return;
2085  }
2086  }
2087  ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2088  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2089  RelocInfo reloc_info_with_ast_id(pc_,
2090  rmode,
2091  RecordedAstId().ToInt(),
2092  NULL);
2094  reloc_info_writer.Write(&reloc_info_with_ast_id);
2095  } else {
2096  reloc_info_writer.Write(&rinfo);
2097  }
2098  }
2099 }
2100 
2101 
2102 void Assembler::BlockTrampolinePoolFor(int instructions) {
2103  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2104 }
2105 
2106 
2108  // Some small sequences of instructions must not be broken up by the
2109  // insertion of a trampoline pool; such sequences are protected by setting
2110  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2111  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2112  // are blocked by trampoline_pool_blocked_nesting_.
2113  if ((trampoline_pool_blocked_nesting_ > 0) ||
2114  (pc_offset() < no_trampoline_pool_before_)) {
2115  // Emission is currently blocked; make sure we try again as soon as
2116  // possible.
2117  if (trampoline_pool_blocked_nesting_ > 0) {
2118  next_buffer_check_ = pc_offset() + kInstrSize;
2119  } else {
2120  next_buffer_check_ = no_trampoline_pool_before_;
2121  }
2122  return;
2123  }
2124 
2125  ASSERT(!trampoline_emitted_);
2126  ASSERT(unbound_labels_count_ >= 0);
2127  if (unbound_labels_count_ > 0) {
2128  // First we emit jump (2 instructions), then we emit trampoline pool.
2129  { BlockTrampolinePoolScope block_trampoline_pool(this);
2130  Label after_pool;
2131  b(&after_pool);
2132  nop();
2133 
2134  int pool_start = pc_offset();
2135  for (int i = 0; i < unbound_labels_count_; i++) {
2136  uint32_t imm32;
2137  imm32 = jump_address(&after_pool);
2138  { BlockGrowBufferScope block_buf_growth(this);
2139  // Buffer growth (and relocation) must be blocked for internal
2140  // references until associated instructions are emitted and available
2141  // to be patched.
2142  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2143  lui(at, (imm32 & kHiMask) >> kLuiShift);
2144  ori(at, at, (imm32 & kImm16Mask));
2145  }
2146  jr(at);
2147  nop();
2148  }
2149  bind(&after_pool);
2150  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2151 
2152  trampoline_emitted_ = true;
2153  // As we are only going to emit trampoline once, we need to prevent any
2154  // further emission.
2155  next_buffer_check_ = kMaxInt;
2156  }
2157  } else {
2158  // Number of branches to unbound label at this point is zero, so we can
2159  // move next buffer check to maximum.
2160  next_buffer_check_ = pc_offset() +
2161  kMaxBranchOffset - kTrampolineSlotsSize * 16;
2162  }
2163  return;
2164 }
2165 
2166 
2168  Instr instr1 = instr_at(pc);
2169  Instr instr2 = instr_at(pc + kInstrSize);
2170  // Interpret 2 instructions generated by li: lui/ori
2171  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2172  // Assemble the 32 bit value.
2173  return reinterpret_cast<Address>(
2174  (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2175  }
2176 
2177  // We should never get here, force a bad address if we do.
2178  UNREACHABLE();
2179  return (Address)0x0;
2180 }
2181 
2182 
2183 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2184 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2185 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2186 // OS::nan_value() returns a qNaN.
2187 void Assembler::QuietNaN(HeapObject* object) {
2189 }
2190 
2191 
2192 // On Mips, a target address is stored in a lui/ori instruction pair, each
2193 // of which load 16 bits of the 32-bit address to a register.
2194 // Patching the address must replace both instr, and flush the i-cache.
2195 //
2196 // There is an optimization below, which emits a nop when the address
2197 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2198 // and possibly removed.
2200  Instr instr2 = instr_at(pc + kInstrSize);
2201  uint32_t rt_code = GetRtField(instr2);
2202  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2203  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2204 
2205 #ifdef DEBUG
2206  // Check we have the result from a li macro-instruction, using instr pair.
2207  Instr instr1 = instr_at(pc);
2208  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2209 #endif
2210 
2211  // Must use 2 instructions to insure patchable code => just use lui and ori.
2212  // lui rt, upper-16.
2213  // ori rt rt, lower-16.
2214  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2215  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2216 
2217  // The following code is an optimization for the common case of Call()
2218  // or Jump() which is load to register, and jump through register:
2219  // li(t9, address); jalr(t9) (or jr(t9)).
2220  // If the destination address is in the same 256 MB page as the call, it
2221  // is faster to do a direct jal, or j, rather than jump thru register, since
2222  // that lets the cpu pipeline prefetch the target address. However each
2223  // time the address above is patched, we have to patch the direct jal/j
2224  // instruction, as well as possibly revert to jalr/jr if we now cross a
2225  // 256 MB page. Note that with the jal/j instructions, we do not need to
2226  // load the register, but that code is left, since it makes it easy to
2227  // revert this process. A further optimization could try replacing the
2228  // li sequence with nops.
2229  // This optimization can only be applied if the rt-code from instr2 is the
2230  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2231  // mips return. Occasionally this lands after an li().
2232 
2233  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2234  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2235  bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2236  uint32_t target_field =
2237  static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2238  bool patched_jump = false;
2239 
2240 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2241  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2242  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2243  // apply this workaround for all cores so we don't have to identify the core.
2244  if (in_range) {
2245  // The 24k core E156 bug has some very specific requirements, we only check
2246  // the most simple one: if the address of the delay slot instruction is in
2247  // the first or last 32 KB of the 256 MB segment.
2248  uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2249  uint32_t ipc_segment_addr = ipc & segment_mask;
2250  if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2251  in_range = false;
2252  }
2253 #endif
2254 
2255  if (IsJalr(instr3)) {
2256  // Try to convert JALR to JAL.
2257  if (in_range && GetRt(instr2) == GetRs(instr3)) {
2258  *(p+2) = JAL | target_field;
2259  patched_jump = true;
2260  }
2261  } else if (IsJr(instr3)) {
2262  // Try to convert JR to J, skip returns (jr ra).
2263  bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2264  if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2265  *(p+2) = J | target_field;
2266  patched_jump = true;
2267  }
2268  } else if (IsJal(instr3)) {
2269  if (in_range) {
2270  // We are patching an already converted JAL.
2271  *(p+2) = JAL | target_field;
2272  } else {
2273  // Patch JAL, but out of range, revert to JALR.
2274  // JALR rs reg is the rt reg specified in the ORI instruction.
2275  uint32_t rs_field = GetRt(instr2) << kRsShift;
2276  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2277  *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2278  }
2279  patched_jump = true;
2280  } else if (IsJ(instr3)) {
2281  if (in_range) {
2282  // We are patching an already converted J (jump).
2283  *(p+2) = J | target_field;
2284  } else {
2285  // Trying patch J, but out of range, just go back to JR.
2286  // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2287  uint32_t rs_field = GetRt(instr2) << kRsShift;
2288  *(p+2) = SPECIAL | rs_field | JR;
2289  }
2290  patched_jump = true;
2291  }
2292 
2293  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2294 }
2295 
2296 
2298  // Address pc points to lui/ori instructions.
2299  // Jump to label may follow at pc + 2 * kInstrSize.
2300  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2301 #ifdef DEBUG
2302  Instr instr1 = instr_at(pc);
2303 #endif
2304  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2305  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2306  bool patched = false;
2307 
2308  if (IsJal(instr3)) {
2309  ASSERT(GetOpcodeField(instr1) == LUI);
2310  ASSERT(GetOpcodeField(instr2) == ORI);
2311 
2312  uint32_t rs_field = GetRt(instr2) << kRsShift;
2313  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2314  *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2315  patched = true;
2316  } else if (IsJ(instr3)) {
2317  ASSERT(GetOpcodeField(instr1) == LUI);
2318  ASSERT(GetOpcodeField(instr2) == ORI);
2319 
2320  uint32_t rs_field = GetRt(instr2) << kRsShift;
2321  *(p+2) = SPECIAL | rs_field | JR;
2322  patched = true;
2323  }
2324 
2325  if (patched) {
2326  CPU::FlushICache(pc+2, sizeof(Address));
2327  }
2328 }
2329 
2330 
2331 MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
2332  // No out-of-line constant pool support.
2333  UNREACHABLE();
2334  return NULL;
2335 }
2336 
2337 
2338 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2339  // No out-of-line constant pool support.
2340  UNREACHABLE();
2341 }
2342 
2343 
2344 } } // namespace v8::internal
2345 
2346 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
const SwVfpRegister s2
static const int kBranchPCOffset
const Instr kRtMask
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void trunc_l_d(FPURegister fd, FPURegister fs)
void db(uint8_t data)
static uint32_t GetOpcodeField(Instr instr)
void mtc1(Register rt, FPURegister fs)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
static bool IsAddImmediate(Instr instr)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
#define FATAL(msg)
Definition: checks.h:48
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
const SwVfpRegister s7
const int kNumRegisters
Definition: constants-arm.h:57
void neg_d(FPURegister fd, FPURegister fs)
const FPURegister f0
void blez(Register rs, int16_t offset)
void sw(Register rd, const MemOperand &rs)
static const int kMaxNumAllocatableRegisters
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
const int KB
Definition: globals.h:245
void rotr(Register rd, Register rt, uint16_t sa)
#define CHECK_GT(a, b)
Definition: checks.h:260
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
static uint32_t GetFunctionField(Instr instr)
static HeapObject * cast(Object *obj)
void mflo(Register rd)
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
void round_w_s(FPURegister fd, FPURegister fs)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
void b(int branch_offset, Condition cond=al)
const uint32_t kMaxWatchpointCode
int int32_t
Definition: unicode.cc:47
void floor_l_s(FPURegister fd, FPURegister fs)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
void div(Register rs, Register rt)
const int kMaxInt
Definition: globals.h:248
static bool enabled()
Definition: serialize.h:485
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
const uint32_t kMaxStopCode
const int kLuiShift
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
void pref(int32_t hint, const MemOperand &rs)
const Instr kSwRegFpOffsetPattern
unsigned short uint16_t
Definition: unicode.cc:46
static uint32_t GetRdField(Instr instr)
const int kFrShift
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
const int kJumpAddrMask
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
#define CHECK(condition)
Definition: checks.h:75
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const int kFunctionFieldMask
const int kFdShift
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
const Instr kPopRegPattern
static const char * AllocationIndexToString(int index)
const SwVfpRegister s6
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
const int kImmFieldShift
void cvt_w_d(FPURegister fd, FPURegister fs)
static void JumpLabelToJumpRegister(Address pc)
uint8_t byte
Definition: globals.h:185
const Instr kPushRegPattern
void break_(uint32_t code, bool break_as_stop=false)
void ret(const Register &xn=lr)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
const Register sp
const SwVfpRegister s3
void sh(Register rd, const MemOperand &rs)
#define UNREACHABLE()
Definition: checks.h:52
static bool IsJr(Instr instr)
static bool IsOri(Instr instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
const int kFtShift
void ceil_w_d(FPURegister fd, FPURegister fs)
void trunc_l_s(FPURegister fd, FPURegister fs)
const int kRsFieldMask
const Instr kLwSwInstrTypeMask
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
void trunc_w_s(FPURegister fd, FPURegister fs)
void srlv(Register rd, Register rt, Register rs)
const int kRtFieldMask
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void bal(int16_t offset)
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:268
void jal_or_jalr(int32_t target, Register rs)
const int kRdFieldMask
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kLwRegFpOffsetPattern
int branch_offset(Label *L, bool jump_elimination_allowed)
const Instr kPushInstruction
static void TooLateToEnableNow()
Definition: serialize.h:484
const int kHeapObjectTag
Definition: v8.h:5473
const int kRtShift
static bool IsPop(Instr instr)
void movt(Register reg, uint32_t immediate, Condition cond=al)
void lui(Register rd, int32_t j)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void MemMove(void *dest, const void *src, size_t size)
Definition: platform.h:402
const Register pc
bool is_near(Label *L)
const int kFunctionShift
const Instr kPopInstruction
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
void srl(Register rd, Register rt, uint16_t sa)
static Register GetRdReg(Instr instr)
MaybeObject * AllocateConstantPool(Heap *heap)
const int kImm28Mask
void tlt(Register rs, Register rt, uint16_t code)
void slti(Register rd, Register rs, int32_t j)
const SwVfpRegister s0
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
const int kRegister_fp_Code
void jal(int32_t target)
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
bool IsPowerOf2(T x)
Definition: utils.h:51
void movz(const Register &rd, uint64_t imm, int shift=-1)
void floor_l_d(FPURegister fd, FPURegister fs)
const int kSaShift
void cfc1(Register rt, FPUControlRegister fs)
friend class BlockTrampolinePoolScope
const SwVfpRegister s5
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
#define UNIMPLEMENTED_MIPS()
const int kSaFieldMask
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static uint32_t GetLabelConst(Instr instr)
const SwVfpRegister s1
void ori(Register rd, Register rs, int32_t j)
Definition: v8.h:2107
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
static HeapNumber * cast(Object *obj)
void set_value(double value)
Definition: objects-inl.h:1406
const int kRdShift
static double nan_value()
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
bool emit_debug_code() const
Definition: assembler.h:65
static void QuietNaN(HeapObject *nan)
void emit_code_stub_address(Code *stub)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
static bool IsSwRegFpOffset(Instr instr)
static const int kHeaderSize
Definition: objects.h:5604
static bool IsJump(Instr instr)
void mfhi(Register rd)
void mfc1(Register rt, FPURegister fs)
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
bool MustUseReg(RelocInfo::Mode rmode)
const int kRsShift
void jr(Register target)
const Instr kLwSwInstrArgumentMask
void trunc_w_d(FPURegister fd, FPURegister fs)
static bool IsEmittedConstant(Instr instr)
const SwVfpRegister s4
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
void floor_w_d(FPURegister fd, FPURegister fs)
void sdc1(FPURegister fs, const MemOperand &dst)
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
void movn(const Register &rd, uint64_t imm, int shift=-1)
static bool IsBeq(Instr instr)
static const int kInstrSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static uint64_t CpuFeaturesImpliedByPlatform()
HeapObject * obj
const Instr kLwRegFpNegOffsetPattern
void fcmp(const FPRegister &fn, const FPRegister &fm)
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
const Register no_reg
const int kImm26Mask
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
const Register fp
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
const Instr kLwSwOffsetMask
const int kRegister_sp_Code
void DeleteArray(T *array)
Definition: allocation.h:91
const int kHiMask
signed short int16_t
Definition: unicode.cc:45
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
const FPURegister f14
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
const Instr kSwRegFpNegOffsetPattern
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const int kImm26Bits
static bool IsLui(Instr instr)
static bool IsAndImmediate(Instr instr)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
const int kFsShift
void sb(Register rd, const MemOperand &rs)
const int MB
Definition: globals.h:246