v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-mips.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #include "v8.h"
37 
38 #if defined(V8_TARGET_ARCH_MIPS)
39 
41 #include "serialize.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 #ifdef DEBUG
47 bool CpuFeatures::initialized_ = false;
48 #endif
49 unsigned CpuFeatures::supported_ = 0;
50 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51 
52 
53 // Get the CPU features enabled by the build. For cross compilation the
54 // preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
55 // can be defined to enable FPU instructions when building the
56 // snapshot.
57 static uint64_t CpuFeaturesImpliedByCompiler() {
58  uint64_t answer = 0;
59 #ifdef CAN_USE_FPU_INSTRUCTIONS
60  answer |= 1u << FPU;
61 #endif // def CAN_USE_FPU_INSTRUCTIONS
62 
63 #ifdef __mips__
64  // If the compiler is allowed to use FPU then we can use FPU too in our code
65  // generation even when generating snapshots. This won't work for cross
66  // compilation.
67 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
68  answer |= 1u << FPU;
69 #endif // defined(__mips_hard_float) && __mips_hard_float != 0
70 #endif // def __mips__
71 
72  return answer;
73 }
74 
75 
76 void CpuFeatures::Probe() {
77  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
78  CpuFeaturesImpliedByCompiler());
79  ASSERT(supported_ == 0 || supported_ == standard_features);
80 #ifdef DEBUG
81  initialized_ = true;
82 #endif
83 
84  // Get the features implied by the OS and the compiler settings. This is the
85  // minimal set of features which is also allowed for generated code in the
86  // snapshot.
87  supported_ |= standard_features;
88 
89  if (Serializer::enabled()) {
90  // No probing for features if we might serialize (generate snapshot).
91  return;
92  }
93 
94  // If the compiler is allowed to use fpu then we can use fpu too in our
95  // code generation.
96 #if !defined(__mips__)
97  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
98  if (FLAG_enable_fpu) {
99  supported_ |= 1u << FPU;
100  }
101 #else
102  // Probe for additional features not already known to be available.
103  if (OS::MipsCpuHasFeature(FPU)) {
104  // This implementation also sets the FPU flags if
105  // runtime detection of FPU returns true.
106  supported_ |= 1u << FPU;
107  found_by_runtime_probing_ |= 1u << FPU;
108  }
109 #endif
110 }
111 
112 
113 int ToNumber(Register reg) {
114  ASSERT(reg.is_valid());
115  const int kNumbers[] = {
116  0, // zero_reg
117  1, // at
118  2, // v0
119  3, // v1
120  4, // a0
121  5, // a1
122  6, // a2
123  7, // a3
124  8, // t0
125  9, // t1
126  10, // t2
127  11, // t3
128  12, // t4
129  13, // t5
130  14, // t6
131  15, // t7
132  16, // s0
133  17, // s1
134  18, // s2
135  19, // s3
136  20, // s4
137  21, // s5
138  22, // s6
139  23, // s7
140  24, // t8
141  25, // t9
142  26, // k0
143  27, // k1
144  28, // gp
145  29, // sp
146  30, // fp
147  31, // ra
148  };
149  return kNumbers[reg.code()];
150 }
151 
152 
153 Register ToRegister(int num) {
154  ASSERT(num >= 0 && num < kNumRegisters);
155  const Register kRegisters[] = {
156  zero_reg,
157  at,
158  v0, v1,
159  a0, a1, a2, a3,
160  t0, t1, t2, t3, t4, t5, t6, t7,
161  s0, s1, s2, s3, s4, s5, s6, s7,
162  t8, t9,
163  k0, k1,
164  gp,
165  sp,
166  fp,
167  ra
168  };
169  return kRegisters[num];
170 }
171 
172 
173 // -----------------------------------------------------------------------------
174 // Implementation of RelocInfo.
175 
176 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
177  1 << RelocInfo::INTERNAL_REFERENCE;
178 
179 
180 bool RelocInfo::IsCodedSpecially() {
181  // The deserializer needs to know whether a pointer is specially coded. Being
182  // specially coded on MIPS means that it is a lui/ori instruction, and that is
183  // always the case inside code objects.
184  return true;
185 }
186 
187 
188 // Patch the code at the current address with the supplied instructions.
189 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
190  Instr* pc = reinterpret_cast<Instr*>(pc_);
191  Instr* instr = reinterpret_cast<Instr*>(instructions);
192  for (int i = 0; i < instruction_count; i++) {
193  *(pc + i) = *(instr + i);
194  }
195 
196  // Indicate that code has changed.
197  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
198 }
199 
200 
201 // Patch the code at the current PC with a call to the target address.
202 // Additional guard instructions can be added if required.
203 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
204  // Patch the code at the current address with a call to the target.
206 }
207 
208 
209 // -----------------------------------------------------------------------------
210 // Implementation of Operand and MemOperand.
211 // See assembler-mips-inl.h for inlined constructors.
212 
213 Operand::Operand(Handle<Object> handle) {
214  rm_ = no_reg;
215  // Verify all Objects referred by code are NOT in new space.
216  Object* obj = *handle;
217  ASSERT(!HEAP->InNewSpace(obj));
218  if (obj->IsHeapObject()) {
219  imm32_ = reinterpret_cast<intptr_t>(handle.location());
220  rmode_ = RelocInfo::EMBEDDED_OBJECT;
221  } else {
222  // No relocation needed.
223  imm32_ = reinterpret_cast<intptr_t>(obj);
224  rmode_ = RelocInfo::NONE;
225  }
226 }
227 
228 
229 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
230  offset_ = offset;
231 }
232 
233 
234 // -----------------------------------------------------------------------------
235 // Specific instructions, constants, and masks.
236 
237 static const int kNegOffset = 0x00008000;
238 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
239 // operations as post-increment of sp.
242 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
245 // sw(r, MemOperand(sp, 0))
247  | (0 & kImm16Mask);
248 // lw(r, MemOperand(sp, 0))
250  | (0 & kImm16Mask);
251 
253  | (0 & kImm16Mask);
254 
256  | (0 & kImm16Mask);
257 
259  | (kNegOffset & kImm16Mask);
260 
262  | (kNegOffset & kImm16Mask);
263 // A mask for the Rt register for push, pop, lw, sw instructions.
264 const Instr kRtMask = kRtFieldMask;
265 const Instr kLwSwInstrTypeMask = 0xffe00000;
268 
269 
270 // Spare buffer.
271 static const int kMinimalBufferSize = 4 * KB;
272 
273 
274 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
275  : AssemblerBase(arg_isolate),
276  recorded_ast_id_(TypeFeedbackId::None()),
277  positions_recorder_(this),
278  emit_debug_code_(FLAG_debug_code) {
279  if (buffer == NULL) {
280  // Do our own buffer management.
281  if (buffer_size <= kMinimalBufferSize) {
282  buffer_size = kMinimalBufferSize;
283 
284  if (isolate()->assembler_spare_buffer() != NULL) {
285  buffer = isolate()->assembler_spare_buffer();
286  isolate()->set_assembler_spare_buffer(NULL);
287  }
288  }
289  if (buffer == NULL) {
290  buffer_ = NewArray<byte>(buffer_size);
291  } else {
292  buffer_ = static_cast<byte*>(buffer);
293  }
294  buffer_size_ = buffer_size;
295  own_buffer_ = true;
296 
297  } else {
298  // Use externally provided buffer instead.
299  ASSERT(buffer_size > 0);
300  buffer_ = static_cast<byte*>(buffer);
301  buffer_size_ = buffer_size;
302  own_buffer_ = false;
303  }
304 
305  // Set up buffer pointers.
306  ASSERT(buffer_ != NULL);
307  pc_ = buffer_;
308  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
309 
310  last_trampoline_pool_end_ = 0;
311  no_trampoline_pool_before_ = 0;
312  trampoline_pool_blocked_nesting_ = 0;
313  // We leave space (16 * kTrampolineSlotsSize)
314  // for BlockTrampolinePoolScope buffer.
315  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
316  internal_trampoline_exception_ = false;
317  last_bound_pos_ = 0;
318 
319  trampoline_emitted_ = false;
320  unbound_labels_count_ = 0;
321  block_buffer_growth_ = false;
322 
323  ClearRecordedAstId();
324 }
325 
326 
328  if (own_buffer_) {
329  if (isolate()->assembler_spare_buffer() == NULL &&
330  buffer_size_ == kMinimalBufferSize) {
331  isolate()->set_assembler_spare_buffer(buffer_);
332  } else {
333  DeleteArray(buffer_);
334  }
335  }
336 }
337 
338 
339 void Assembler::GetCode(CodeDesc* desc) {
340  ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
341  // Set up code descriptor.
342  desc->buffer = buffer_;
343  desc->buffer_size = buffer_size_;
344  desc->instr_size = pc_offset();
345  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
346 }
347 
348 
349 void Assembler::Align(int m) {
350  ASSERT(m >= 4 && IsPowerOf2(m));
351  while ((pc_offset() & (m - 1)) != 0) {
352  nop();
353  }
354 }
355 
356 
358  // No advantage to aligning branch/call targets to more than
359  // single instruction, that I am aware of.
360  Align(4);
361 }
362 
363 
364 Register Assembler::GetRtReg(Instr instr) {
365  Register rt;
366  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
367  return rt;
368 }
369 
370 
371 Register Assembler::GetRsReg(Instr instr) {
372  Register rs;
373  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
374  return rs;
375 }
376 
377 
378 Register Assembler::GetRdReg(Instr instr) {
379  Register rd;
380  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
381  return rd;
382 }
383 
384 
385 uint32_t Assembler::GetRt(Instr instr) {
386  return (instr & kRtFieldMask) >> kRtShift;
387 }
388 
389 
390 uint32_t Assembler::GetRtField(Instr instr) {
391  return instr & kRtFieldMask;
392 }
393 
394 
395 uint32_t Assembler::GetRs(Instr instr) {
396  return (instr & kRsFieldMask) >> kRsShift;
397 }
398 
399 
400 uint32_t Assembler::GetRsField(Instr instr) {
401  return instr & kRsFieldMask;
402 }
403 
404 
405 uint32_t Assembler::GetRd(Instr instr) {
406  return (instr & kRdFieldMask) >> kRdShift;
407 }
408 
409 
410 uint32_t Assembler::GetRdField(Instr instr) {
411  return instr & kRdFieldMask;
412 }
413 
414 
415 uint32_t Assembler::GetSa(Instr instr) {
416  return (instr & kSaFieldMask) >> kSaShift;
417 }
418 
419 
420 uint32_t Assembler::GetSaField(Instr instr) {
421  return instr & kSaFieldMask;
422 }
423 
424 
425 uint32_t Assembler::GetOpcodeField(Instr instr) {
426  return instr & kOpcodeMask;
427 }
428 
429 
430 uint32_t Assembler::GetFunction(Instr instr) {
431  return (instr & kFunctionFieldMask) >> kFunctionShift;
432 }
433 
434 
435 uint32_t Assembler::GetFunctionField(Instr instr) {
436  return instr & kFunctionFieldMask;
437 }
438 
439 
440 uint32_t Assembler::GetImmediate16(Instr instr) {
441  return instr & kImm16Mask;
442 }
443 
444 
445 uint32_t Assembler::GetLabelConst(Instr instr) {
446  return instr & ~kImm16Mask;
447 }
448 
449 
450 bool Assembler::IsPop(Instr instr) {
451  return (instr & ~kRtMask) == kPopRegPattern;
452 }
453 
454 
455 bool Assembler::IsPush(Instr instr) {
456  return (instr & ~kRtMask) == kPushRegPattern;
457 }
458 
459 
460 bool Assembler::IsSwRegFpOffset(Instr instr) {
461  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
462 }
463 
464 
465 bool Assembler::IsLwRegFpOffset(Instr instr) {
466  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
467 }
468 
469 
471  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
473 }
474 
475 
477  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
479 }
480 
481 
482 // Labels refer to positions in the (to be) generated code.
483 // There are bound, linked, and unused labels.
484 //
485 // Bound labels refer to known positions in the already
486 // generated code. pos() is the position the label refers to.
487 //
488 // Linked labels refer to unknown positions in the code
489 // to be generated; pos() is the position of the last
490 // instruction using the label.
491 
492 // The link chain is terminated by a value in the instruction of -1,
493 // which is an otherwise illegal value (branch -1 is inf loop).
494 // The instruction 16-bit offset field addresses 32-bit words, but in
495 // code is conv to an 18-bit value addressing bytes, hence the -4 value.
496 
497 const int kEndOfChain = -4;
498 // Determines the end of the Jump chain (a subset of the label link chain).
499 const int kEndOfJumpChain = 0;
500 
501 
502 bool Assembler::IsBranch(Instr instr) {
503  uint32_t opcode = GetOpcodeField(instr);
504  uint32_t rt_field = GetRtField(instr);
505  uint32_t rs_field = GetRsField(instr);
506  uint32_t label_constant = GetLabelConst(instr);
507  // Checks if the instruction is a branch.
508  return opcode == BEQ ||
509  opcode == BNE ||
510  opcode == BLEZ ||
511  opcode == BGTZ ||
512  opcode == BEQL ||
513  opcode == BNEL ||
514  opcode == BLEZL ||
515  opcode == BGTZL ||
516  (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
517  rt_field == BLTZAL || rt_field == BGEZAL)) ||
518  (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
519  label_constant == 0; // Emitted label const in reg-exp engine.
520 }
521 
522 
523 bool Assembler::IsBeq(Instr instr) {
524  return GetOpcodeField(instr) == BEQ;
525 }
526 
527 
528 bool Assembler::IsBne(Instr instr) {
529  return GetOpcodeField(instr) == BNE;
530 }
531 
532 
533 bool Assembler::IsJump(Instr instr) {
534  uint32_t opcode = GetOpcodeField(instr);
535  uint32_t rt_field = GetRtField(instr);
536  uint32_t rd_field = GetRdField(instr);
537  uint32_t function_field = GetFunctionField(instr);
538  // Checks if the instruction is a jump.
539  return opcode == J || opcode == JAL ||
540  (opcode == SPECIAL && rt_field == 0 &&
541  ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
542 }
543 
544 
545 bool Assembler::IsJ(Instr instr) {
546  uint32_t opcode = GetOpcodeField(instr);
547  // Checks if the instruction is a jump.
548  return opcode == J;
549 }
550 
551 
552 bool Assembler::IsJal(Instr instr) {
553  return GetOpcodeField(instr) == JAL;
554 }
555 
556 bool Assembler::IsJr(Instr instr) {
557  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
558 }
559 
560 bool Assembler::IsJalr(Instr instr) {
561  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
562 }
563 
564 
565 bool Assembler::IsLui(Instr instr) {
566  uint32_t opcode = GetOpcodeField(instr);
567  // Checks if the instruction is a load upper immediate.
568  return opcode == LUI;
569 }
570 
571 
572 bool Assembler::IsOri(Instr instr) {
573  uint32_t opcode = GetOpcodeField(instr);
574  // Checks if the instruction is a load upper immediate.
575  return opcode == ORI;
576 }
577 
578 
579 bool Assembler::IsNop(Instr instr, unsigned int type) {
580  // See Assembler::nop(type).
581  ASSERT(type < 32);
582  uint32_t opcode = GetOpcodeField(instr);
583  uint32_t function = GetFunctionField(instr);
584  uint32_t rt = GetRt(instr);
585  uint32_t rd = GetRd(instr);
586  uint32_t sa = GetSa(instr);
587 
588  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
589  // When marking non-zero type, use sll(zero_reg, at, type)
590  // to avoid use of mips ssnop and ehb special encodings
591  // of the sll instruction.
592 
593  Register nop_rt_reg = (type == 0) ? zero_reg : at;
594  bool ret = (opcode == SPECIAL && function == SLL &&
595  rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
596  rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
597  sa == type);
598 
599  return ret;
600 }
601 
602 
604  ASSERT(IsBranch(instr));
605  return ((int16_t)(instr & kImm16Mask)) << 2;
606 }
607 
608 
609 bool Assembler::IsLw(Instr instr) {
610  return ((instr & kOpcodeMask) == LW);
611 }
612 
613 
615  ASSERT(IsLw(instr));
616  return ((instr & kImm16Mask));
617 }
618 
619 
620 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
621  ASSERT(IsLw(instr));
622 
623  // We actually create a new lw instruction based on the original one.
624  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
625  | (offset & kImm16Mask);
626 
627  return temp_instr;
628 }
629 
630 
631 bool Assembler::IsSw(Instr instr) {
632  return ((instr & kOpcodeMask) == SW);
633 }
634 
635 
636 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
637  ASSERT(IsSw(instr));
638  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
639 }
640 
641 
642 bool Assembler::IsAddImmediate(Instr instr) {
643  return ((instr & kOpcodeMask) == ADDIU);
644 }
645 
646 
648  ASSERT(IsAddImmediate(instr));
649  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
650 }
651 
652 
653 bool Assembler::IsAndImmediate(Instr instr) {
654  return GetOpcodeField(instr) == ANDI;
655 }
656 
657 
658 int Assembler::target_at(int32_t pos) {
659  Instr instr = instr_at(pos);
660  if ((instr & ~kImm16Mask) == 0) {
661  // Emitted label constant, not part of a branch.
662  if (instr == 0) {
663  return kEndOfChain;
664  } else {
665  int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
666  return (imm18 + pos);
667  }
668  }
669  // Check we have a branch or jump instruction.
670  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
671  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
672  // the compiler uses arithmectic shifts for signed integers.
673  if (IsBranch(instr)) {
674  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
675 
676  if (imm18 == kEndOfChain) {
677  // EndOfChain sentinel is returned directly, not relative to pc or pos.
678  return kEndOfChain;
679  } else {
680  return pos + kBranchPCOffset + imm18;
681  }
682  } else if (IsLui(instr)) {
683  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
684  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
685  ASSERT(IsOri(instr_ori));
686  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
687  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
688 
689  if (imm == kEndOfJumpChain) {
690  // EndOfChain sentinel is returned directly, not relative to pc or pos.
691  return kEndOfChain;
692  } else {
693  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
694  int32_t delta = instr_address - imm;
695  ASSERT(pos > delta);
696  return pos - delta;
697  }
698  } else {
699  int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
700  if (imm28 == kEndOfJumpChain) {
701  // EndOfChain sentinel is returned directly, not relative to pc or pos.
702  return kEndOfChain;
703  } else {
704  uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
705  instr_address &= kImm28Mask;
706  int32_t delta = instr_address - imm28;
707  ASSERT(pos > delta);
708  return pos - delta;
709  }
710  }
711 }
712 
713 
714 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
715  Instr instr = instr_at(pos);
716  if ((instr & ~kImm16Mask) == 0) {
717  ASSERT(target_pos == kEndOfChain || target_pos >= 0);
718  // Emitted label constant, not part of a branch.
719  // Make label relative to Code* of generated Code object.
720  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
721  return;
722  }
723 
724  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
725  if (IsBranch(instr)) {
726  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
727  ASSERT((imm18 & 3) == 0);
728 
729  instr &= ~kImm16Mask;
730  int32_t imm16 = imm18 >> 2;
731  ASSERT(is_int16(imm16));
732 
733  instr_at_put(pos, instr | (imm16 & kImm16Mask));
734  } else if (IsLui(instr)) {
735  Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
736  Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
737  ASSERT(IsOri(instr_ori));
738  uint32_t imm = (uint32_t)buffer_ + target_pos;
739  ASSERT((imm & 3) == 0);
740 
741  instr_lui &= ~kImm16Mask;
742  instr_ori &= ~kImm16Mask;
743 
745  instr_lui | ((imm & kHiMask) >> kLuiShift));
747  instr_ori | (imm & kImm16Mask));
748  } else {
749  uint32_t imm28 = (uint32_t)buffer_ + target_pos;
750  imm28 &= kImm28Mask;
751  ASSERT((imm28 & 3) == 0);
752 
753  instr &= ~kImm26Mask;
754  uint32_t imm26 = imm28 >> 2;
755  ASSERT(is_uint26(imm26));
756 
757  instr_at_put(pos, instr | (imm26 & kImm26Mask));
758  }
759 }
760 
761 
762 void Assembler::print(Label* L) {
763  if (L->is_unused()) {
764  PrintF("unused label\n");
765  } else if (L->is_bound()) {
766  PrintF("bound label to %d\n", L->pos());
767  } else if (L->is_linked()) {
768  Label l = *L;
769  PrintF("unbound label");
770  while (l.is_linked()) {
771  PrintF("@ %d ", l.pos());
772  Instr instr = instr_at(l.pos());
773  if ((instr & ~kImm16Mask) == 0) {
774  PrintF("value\n");
775  } else {
776  PrintF("%d\n", instr);
777  }
778  next(&l);
779  }
780  } else {
781  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
782  }
783 }
784 
785 
786 void Assembler::bind_to(Label* L, int pos) {
787  ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
788  int32_t trampoline_pos = kInvalidSlotPos;
789  if (L->is_linked() && !trampoline_emitted_) {
790  unbound_labels_count_--;
791  next_buffer_check_ += kTrampolineSlotsSize;
792  }
793 
794  while (L->is_linked()) {
795  int32_t fixup_pos = L->pos();
796  int32_t dist = pos - fixup_pos;
797  next(L); // Call next before overwriting link with target at fixup_pos.
798  Instr instr = instr_at(fixup_pos);
799  if (IsBranch(instr)) {
800  if (dist > kMaxBranchOffset) {
801  if (trampoline_pos == kInvalidSlotPos) {
802  trampoline_pos = get_trampoline_entry(fixup_pos);
803  CHECK(trampoline_pos != kInvalidSlotPos);
804  }
805  ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
806  target_at_put(fixup_pos, trampoline_pos);
807  fixup_pos = trampoline_pos;
808  dist = pos - fixup_pos;
809  }
810  target_at_put(fixup_pos, pos);
811  } else {
812  ASSERT(IsJ(instr) || IsLui(instr));
813  target_at_put(fixup_pos, pos);
814  }
815  }
816  L->bind_to(pos);
817 
818  // Keep track of the last bound label so we don't eliminate any instructions
819  // before a bound label.
820  if (pos > last_bound_pos_)
821  last_bound_pos_ = pos;
822 }
823 
824 
825 void Assembler::bind(Label* L) {
826  ASSERT(!L->is_bound()); // Label can only be bound once.
827  bind_to(L, pc_offset());
828 }
829 
830 
831 void Assembler::next(Label* L) {
832  ASSERT(L->is_linked());
833  int link = target_at(L->pos());
834  if (link == kEndOfChain) {
835  L->Unuse();
836  } else {
837  ASSERT(link >= 0);
838  L->link_to(link);
839  }
840 }
841 
842 bool Assembler::is_near(Label* L) {
843  if (L->is_bound()) {
844  return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
845  }
846  return false;
847 }
848 
849 // We have to use a temporary register for things that can be relocated even
850 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
851 // space. There is no guarantee that the relocated location can be similarly
852 // encoded.
853 bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
854  return rmode != RelocInfo::NONE;
855 }
856 
857 void Assembler::GenInstrRegister(Opcode opcode,
858  Register rs,
859  Register rt,
860  Register rd,
861  uint16_t sa,
862  SecondaryField func) {
863  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
864  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
865  | (rd.code() << kRdShift) | (sa << kSaShift) | func;
866  emit(instr);
867 }
868 
869 
870 void Assembler::GenInstrRegister(Opcode opcode,
871  Register rs,
872  Register rt,
873  uint16_t msb,
874  uint16_t lsb,
875  SecondaryField func) {
876  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
877  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
878  | (msb << kRdShift) | (lsb << kSaShift) | func;
879  emit(instr);
880 }
881 
882 
883 void Assembler::GenInstrRegister(Opcode opcode,
884  SecondaryField fmt,
885  FPURegister ft,
886  FPURegister fs,
887  FPURegister fd,
888  SecondaryField func) {
889  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
890  ASSERT(CpuFeatures::IsEnabled(FPU));
891  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
892  | (fd.code() << kFdShift) | func;
893  emit(instr);
894 }
895 
896 
897 void Assembler::GenInstrRegister(Opcode opcode,
898  SecondaryField fmt,
899  Register rt,
900  FPURegister fs,
901  FPURegister fd,
902  SecondaryField func) {
903  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
904  ASSERT(CpuFeatures::IsEnabled(FPU));
905  Instr instr = opcode | fmt | (rt.code() << kRtShift)
906  | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
907  emit(instr);
908 }
909 
910 
911 void Assembler::GenInstrRegister(Opcode opcode,
912  SecondaryField fmt,
913  Register rt,
914  FPUControlRegister fs,
915  SecondaryField func) {
916  ASSERT(fs.is_valid() && rt.is_valid());
917  ASSERT(CpuFeatures::IsEnabled(FPU));
918  Instr instr =
919  opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
920  emit(instr);
921 }
922 
923 
924 // Instructions with immediate value.
925 // Registers are in the order of the instruction encoding, from left to right.
926 void Assembler::GenInstrImmediate(Opcode opcode,
927  Register rs,
928  Register rt,
929  int32_t j) {
930  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
931  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
932  | (j & kImm16Mask);
933  emit(instr);
934 }
935 
936 
937 void Assembler::GenInstrImmediate(Opcode opcode,
938  Register rs,
939  SecondaryField SF,
940  int32_t j) {
941  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
942  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
943  emit(instr);
944 }
945 
946 
947 void Assembler::GenInstrImmediate(Opcode opcode,
948  Register rs,
949  FPURegister ft,
950  int32_t j) {
951  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
952  ASSERT(CpuFeatures::IsEnabled(FPU));
953  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
954  | (j & kImm16Mask);
955  emit(instr);
956 }
957 
958 
959 void Assembler::GenInstrJump(Opcode opcode,
960  uint32_t address) {
961  BlockTrampolinePoolScope block_trampoline_pool(this);
962  ASSERT(is_uint26(address));
963  Instr instr = opcode | address;
964  emit(instr);
965  BlockTrampolinePoolFor(1); // For associated delay slot.
966 }
967 
968 
969 // Returns the next free trampoline entry.
970 int32_t Assembler::get_trampoline_entry(int32_t pos) {
971  int32_t trampoline_entry = kInvalidSlotPos;
972 
973  if (!internal_trampoline_exception_) {
974  if (trampoline_.start() > pos) {
975  trampoline_entry = trampoline_.take_slot();
976  }
977 
978  if (kInvalidSlotPos == trampoline_entry) {
979  internal_trampoline_exception_ = true;
980  }
981  }
982  return trampoline_entry;
983 }
984 
985 
986 uint32_t Assembler::jump_address(Label* L) {
987  int32_t target_pos;
988 
989  if (L->is_bound()) {
990  target_pos = L->pos();
991  } else {
992  if (L->is_linked()) {
993  target_pos = L->pos(); // L's link.
994  L->link_to(pc_offset());
995  } else {
996  L->link_to(pc_offset());
997  return kEndOfJumpChain;
998  }
999  }
1000 
1001  uint32_t imm = (uint32_t)buffer_ + target_pos;
1002  ASSERT((imm & 3) == 0);
1003 
1004  return imm;
1005 }
1006 
1007 
1008 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1009  int32_t target_pos;
1010 
1011  if (L->is_bound()) {
1012  target_pos = L->pos();
1013  } else {
1014  if (L->is_linked()) {
1015  target_pos = L->pos();
1016  L->link_to(pc_offset());
1017  } else {
1018  L->link_to(pc_offset());
1019  if (!trampoline_emitted_) {
1020  unbound_labels_count_++;
1021  next_buffer_check_ -= kTrampolineSlotsSize;
1022  }
1023  return kEndOfChain;
1024  }
1025  }
1026 
1027  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1028  ASSERT((offset & 3) == 0);
1029  ASSERT(is_int16(offset >> 2));
1030 
1031  return offset;
1032 }
1033 
1034 
1035 void Assembler::label_at_put(Label* L, int at_offset) {
1036  int target_pos;
1037  if (L->is_bound()) {
1038  target_pos = L->pos();
1039  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1040  } else {
1041  if (L->is_linked()) {
1042  target_pos = L->pos(); // L's link.
1043  int32_t imm18 = target_pos - at_offset;
1044  ASSERT((imm18 & 3) == 0);
1045  int32_t imm16 = imm18 >> 2;
1046  ASSERT(is_int16(imm16));
1047  instr_at_put(at_offset, (imm16 & kImm16Mask));
1048  } else {
1049  target_pos = kEndOfChain;
1050  instr_at_put(at_offset, 0);
1051  if (!trampoline_emitted_) {
1052  unbound_labels_count_++;
1053  next_buffer_check_ -= kTrampolineSlotsSize;
1054  }
1055  }
1056  L->link_to(at_offset);
1057  }
1058 }
1059 
1060 
1061 //------- Branch and jump instructions --------
1062 
1063 void Assembler::b(int16_t offset) {
1064  beq(zero_reg, zero_reg, offset);
1065 }
1066 
1067 
1068 void Assembler::bal(int16_t offset) {
1069  positions_recorder()->WriteRecordedPositions();
1070  bgezal(zero_reg, offset);
1071 }
1072 
1073 
1074 void Assembler::beq(Register rs, Register rt, int16_t offset) {
1075  BlockTrampolinePoolScope block_trampoline_pool(this);
1076  GenInstrImmediate(BEQ, rs, rt, offset);
1077  BlockTrampolinePoolFor(1); // For associated delay slot.
1078 }
1079 
1080 
1081 void Assembler::bgez(Register rs, int16_t offset) {
1082  BlockTrampolinePoolScope block_trampoline_pool(this);
1083  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1084  BlockTrampolinePoolFor(1); // For associated delay slot.
1085 }
1086 
1087 
1088 void Assembler::bgezal(Register rs, int16_t offset) {
1089  BlockTrampolinePoolScope block_trampoline_pool(this);
1090  positions_recorder()->WriteRecordedPositions();
1091  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1092  BlockTrampolinePoolFor(1); // For associated delay slot.
1093 }
1094 
1095 
1096 void Assembler::bgtz(Register rs, int16_t offset) {
1097  BlockTrampolinePoolScope block_trampoline_pool(this);
1098  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1099  BlockTrampolinePoolFor(1); // For associated delay slot.
1100 }
1101 
1102 
1103 void Assembler::blez(Register rs, int16_t offset) {
1104  BlockTrampolinePoolScope block_trampoline_pool(this);
1105  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1106  BlockTrampolinePoolFor(1); // For associated delay slot.
1107 }
1108 
1109 
1110 void Assembler::bltz(Register rs, int16_t offset) {
1111  BlockTrampolinePoolScope block_trampoline_pool(this);
1112  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1113  BlockTrampolinePoolFor(1); // For associated delay slot.
1114 }
1115 
1116 
1117 void Assembler::bltzal(Register rs, int16_t offset) {
1118  BlockTrampolinePoolScope block_trampoline_pool(this);
1119  positions_recorder()->WriteRecordedPositions();
1120  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1121  BlockTrampolinePoolFor(1); // For associated delay slot.
1122 }
1123 
1124 
1125 void Assembler::bne(Register rs, Register rt, int16_t offset) {
1126  BlockTrampolinePoolScope block_trampoline_pool(this);
1127  GenInstrImmediate(BNE, rs, rt, offset);
1128  BlockTrampolinePoolFor(1); // For associated delay slot.
1129 }
1130 
1131 
1132 void Assembler::j(int32_t target) {
1133 #if DEBUG
1134  // Get pc of delay slot.
1135  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1136  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1137  ASSERT(in_range && ((target & 3) == 0));
1138 #endif
1139  GenInstrJump(J, target >> 2);
1140 }
1141 
1142 
1143 void Assembler::jr(Register rs) {
1144  BlockTrampolinePoolScope block_trampoline_pool(this);
1145  if (rs.is(ra)) {
1146  positions_recorder()->WriteRecordedPositions();
1147  }
1148  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1149  BlockTrampolinePoolFor(1); // For associated delay slot.
1150 }
1151 
1152 
1153 void Assembler::jal(int32_t target) {
1154 #ifdef DEBUG
1155  // Get pc of delay slot.
1156  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1157  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1158  ASSERT(in_range && ((target & 3) == 0));
1159 #endif
1160  positions_recorder()->WriteRecordedPositions();
1161  GenInstrJump(JAL, target >> 2);
1162 }
1163 
1164 
1165 void Assembler::jalr(Register rs, Register rd) {
1166  BlockTrampolinePoolScope block_trampoline_pool(this);
1167  positions_recorder()->WriteRecordedPositions();
1168  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1169  BlockTrampolinePoolFor(1); // For associated delay slot.
1170 }
1171 
1172 
1173 void Assembler::j_or_jr(int32_t target, Register rs) {
1174  // Get pc of delay slot.
1175  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1176  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1177 
1178  if (in_range) {
1179  j(target);
1180  } else {
1181  jr(t9);
1182  }
1183 }
1184 
1185 
1186 void Assembler::jal_or_jalr(int32_t target, Register rs) {
1187  // Get pc of delay slot.
1188  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1189  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1190 
1191  if (in_range) {
1192  jal(target);
1193  } else {
1194  jalr(t9);
1195  }
1196 }
1197 
1198 
1199 //-------Data-processing-instructions---------
1200 
1201 // Arithmetic.
1202 
1203 void Assembler::addu(Register rd, Register rs, Register rt) {
1204  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1205 }
1206 
1207 
1208 void Assembler::addiu(Register rd, Register rs, int32_t j) {
1209  GenInstrImmediate(ADDIU, rs, rd, j);
1210 }
1211 
1212 
1213 void Assembler::subu(Register rd, Register rs, Register rt) {
1214  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1215 }
1216 
1217 
1218 void Assembler::mul(Register rd, Register rs, Register rt) {
1219  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1220 }
1221 
1222 
1223 void Assembler::mult(Register rs, Register rt) {
1224  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1225 }
1226 
1227 
1228 void Assembler::multu(Register rs, Register rt) {
1229  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1230 }
1231 
1232 
1233 void Assembler::div(Register rs, Register rt) {
1234  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1235 }
1236 
1237 
1238 void Assembler::divu(Register rs, Register rt) {
1239  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1240 }
1241 
1242 
1243 // Logical.
1244 
1245 void Assembler::and_(Register rd, Register rs, Register rt) {
1246  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1247 }
1248 
1249 
1250 void Assembler::andi(Register rt, Register rs, int32_t j) {
1251  ASSERT(is_uint16(j));
1252  GenInstrImmediate(ANDI, rs, rt, j);
1253 }
1254 
1255 
1256 void Assembler::or_(Register rd, Register rs, Register rt) {
1257  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1258 }
1259 
1260 
1261 void Assembler::ori(Register rt, Register rs, int32_t j) {
1262  ASSERT(is_uint16(j));
1263  GenInstrImmediate(ORI, rs, rt, j);
1264 }
1265 
1266 
1267 void Assembler::xor_(Register rd, Register rs, Register rt) {
1268  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1269 }
1270 
1271 
1272 void Assembler::xori(Register rt, Register rs, int32_t j) {
1273  ASSERT(is_uint16(j));
1274  GenInstrImmediate(XORI, rs, rt, j);
1275 }
1276 
1277 
1278 void Assembler::nor(Register rd, Register rs, Register rt) {
1279  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1280 }
1281 
1282 
1283 // Shifts.
1284 void Assembler::sll(Register rd,
1285  Register rt,
1286  uint16_t sa,
1287  bool coming_from_nop) {
1288  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1289  // generated using the sll instruction. They must be generated using
1290  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1291  // instructions.
1292  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1293  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1294 }
1295 
1296 
1297 void Assembler::sllv(Register rd, Register rt, Register rs) {
1298  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1299 }
1300 
1301 
1302 void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1303  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1304 }
1305 
1306 
1307 void Assembler::srlv(Register rd, Register rt, Register rs) {
1308  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1309 }
1310 
1311 
1312 void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1313  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1314 }
1315 
1316 
1317 void Assembler::srav(Register rd, Register rt, Register rs) {
1318  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1319 }
1320 
1321 
1322 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1323  // Should be called via MacroAssembler::Ror.
1324  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1325  ASSERT(kArchVariant == kMips32r2);
1326  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1327  | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1328  emit(instr);
1329 }
1330 
1331 
1332 void Assembler::rotrv(Register rd, Register rt, Register rs) {
1333  // Should be called via MacroAssembler::Ror.
1334  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1335  ASSERT(kArchVariant == kMips32r2);
1336  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1337  | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1338  emit(instr);
1339 }
1340 
1341 
1342 //------------Memory-instructions-------------
1343 
1344 // Helper for base-reg + offset, when offset is larger than int16.
1345 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1346  ASSERT(!src.rm().is(at));
1347  lui(at, src.offset_ >> kLuiShift);
1348  ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1349  addu(at, at, src.rm()); // Add base register.
1350 }
1351 
1352 
1353 void Assembler::lb(Register rd, const MemOperand& rs) {
1354  if (is_int16(rs.offset_)) {
1355  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1356  } else { // Offset > 16 bits, use multiple instructions to load.
1357  LoadRegPlusOffsetToAt(rs);
1358  GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1359  }
1360 }
1361 
1362 
1363 void Assembler::lbu(Register rd, const MemOperand& rs) {
1364  if (is_int16(rs.offset_)) {
1365  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1366  } else { // Offset > 16 bits, use multiple instructions to load.
1367  LoadRegPlusOffsetToAt(rs);
1368  GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1369  }
1370 }
1371 
1372 
1373 void Assembler::lh(Register rd, const MemOperand& rs) {
1374  if (is_int16(rs.offset_)) {
1375  GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1376  } else { // Offset > 16 bits, use multiple instructions to load.
1377  LoadRegPlusOffsetToAt(rs);
1378  GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1379  }
1380 }
1381 
1382 
1383 void Assembler::lhu(Register rd, const MemOperand& rs) {
1384  if (is_int16(rs.offset_)) {
1385  GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1386  } else { // Offset > 16 bits, use multiple instructions to load.
1387  LoadRegPlusOffsetToAt(rs);
1388  GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1389  }
1390 }
1391 
1392 
1393 void Assembler::lw(Register rd, const MemOperand& rs) {
1394  if (is_int16(rs.offset_)) {
1395  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1396  } else { // Offset > 16 bits, use multiple instructions to load.
1397  LoadRegPlusOffsetToAt(rs);
1398  GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1399  }
1400 }
1401 
1402 
1403 void Assembler::lwl(Register rd, const MemOperand& rs) {
1404  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1405 }
1406 
1407 
1408 void Assembler::lwr(Register rd, const MemOperand& rs) {
1409  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1410 }
1411 
1412 
1413 void Assembler::sb(Register rd, const MemOperand& rs) {
1414  if (is_int16(rs.offset_)) {
1415  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1416  } else { // Offset > 16 bits, use multiple instructions to store.
1417  LoadRegPlusOffsetToAt(rs);
1418  GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1419  }
1420 }
1421 
1422 
1423 void Assembler::sh(Register rd, const MemOperand& rs) {
1424  if (is_int16(rs.offset_)) {
1425  GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1426  } else { // Offset > 16 bits, use multiple instructions to store.
1427  LoadRegPlusOffsetToAt(rs);
1428  GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1429  }
1430 }
1431 
1432 
1433 void Assembler::sw(Register rd, const MemOperand& rs) {
1434  if (is_int16(rs.offset_)) {
1435  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1436  } else { // Offset > 16 bits, use multiple instructions to store.
1437  LoadRegPlusOffsetToAt(rs);
1438  GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1439  }
1440 }
1441 
1442 
1443 void Assembler::swl(Register rd, const MemOperand& rs) {
1444  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1445 }
1446 
1447 
1448 void Assembler::swr(Register rd, const MemOperand& rs) {
1449  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1450 }
1451 
1452 
1453 void Assembler::lui(Register rd, int32_t j) {
1454  ASSERT(is_uint16(j));
1455  GenInstrImmediate(LUI, zero_reg, rd, j);
1456 }
1457 
1458 
1459 //-------------Misc-instructions--------------
1460 
1461 // Break / Trap instructions.
1462 void Assembler::break_(uint32_t code, bool break_as_stop) {
1463  ASSERT((code & ~0xfffff) == 0);
1464  // We need to invalidate breaks that could be stops as well because the
1465  // simulator expects a char pointer after the stop instruction.
1466  // See constants-mips.h for explanation.
1467  ASSERT((break_as_stop &&
1468  code <= kMaxStopCode &&
1469  code > kMaxWatchpointCode) ||
1470  (!break_as_stop &&
1471  (code > kMaxStopCode ||
1472  code <= kMaxWatchpointCode)));
1473  Instr break_instr = SPECIAL | BREAK | (code << 6);
1474  emit(break_instr);
1475 }
1476 
1477 
1478 void Assembler::stop(const char* msg, uint32_t code) {
1479  ASSERT(code > kMaxWatchpointCode);
1480  ASSERT(code <= kMaxStopCode);
1481 #if defined(V8_HOST_ARCH_MIPS)
1482  break_(0x54321);
1483 #else // V8_HOST_ARCH_MIPS
1485  // The Simulator will handle the stop instruction and get the message address.
1486  // On MIPS stop() is just a special kind of break_().
1487  break_(code, true);
1488  emit(reinterpret_cast<Instr>(msg));
1489 #endif
1490 }
1491 
1492 
1493 void Assembler::tge(Register rs, Register rt, uint16_t code) {
1494  ASSERT(is_uint10(code));
1495  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1496  | rt.code() << kRtShift | code << 6;
1497  emit(instr);
1498 }
1499 
1500 
1501 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1502  ASSERT(is_uint10(code));
1503  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1504  | rt.code() << kRtShift | code << 6;
1505  emit(instr);
1506 }
1507 
1508 
1509 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1510  ASSERT(is_uint10(code));
1511  Instr instr =
1512  SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1513  emit(instr);
1514 }
1515 
1516 
1517 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1518  ASSERT(is_uint10(code));
1519  Instr instr =
1520  SPECIAL | TLTU | rs.code() << kRsShift
1521  | rt.code() << kRtShift | code << 6;
1522  emit(instr);
1523 }
1524 
1525 
1526 void Assembler::teq(Register rs, Register rt, uint16_t code) {
1527  ASSERT(is_uint10(code));
1528  Instr instr =
1529  SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1530  emit(instr);
1531 }
1532 
1533 
1534 void Assembler::tne(Register rs, Register rt, uint16_t code) {
1535  ASSERT(is_uint10(code));
1536  Instr instr =
1537  SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1538  emit(instr);
1539 }
1540 
1541 
1542 // Move from HI/LO register.
1543 
1544 void Assembler::mfhi(Register rd) {
1545  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1546 }
1547 
1548 
1549 void Assembler::mflo(Register rd) {
1550  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1551 }
1552 
1553 
1554 // Set on less than instructions.
1555 void Assembler::slt(Register rd, Register rs, Register rt) {
1556  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1557 }
1558 
1559 
1560 void Assembler::sltu(Register rd, Register rs, Register rt) {
1561  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1562 }
1563 
1564 
1565 void Assembler::slti(Register rt, Register rs, int32_t j) {
1566  GenInstrImmediate(SLTI, rs, rt, j);
1567 }
1568 
1569 
1570 void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1571  GenInstrImmediate(SLTIU, rs, rt, j);
1572 }
1573 
1574 
1575 // Conditional move.
1576 void Assembler::movz(Register rd, Register rs, Register rt) {
1577  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1578 }
1579 
1580 
1581 void Assembler::movn(Register rd, Register rs, Register rt) {
1582  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1583 }
1584 
1585 
1586 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1587  Register rt;
1588  rt.code_ = (cc & 0x0007) << 2 | 1;
1589  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1590 }
1591 
1592 
1593 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1594  Register rt;
1595  rt.code_ = (cc & 0x0007) << 2 | 0;
1596  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1597 }
1598 
1599 
1600 // Bit twiddling.
1601 void Assembler::clz(Register rd, Register rs) {
1602  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1603  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1604 }
1605 
1606 
1607 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1608  // Should be called via MacroAssembler::Ins.
1609  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1610  ASSERT(kArchVariant == kMips32r2);
1611  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1612 }
1613 
1614 
1615 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1616  // Should be called via MacroAssembler::Ext.
1617  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1618  ASSERT(kArchVariant == kMips32r2);
1619  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1620 }
1621 
1622 
1623 //--------Coprocessor-instructions----------------
1624 
1625 // Load, store, move.
1626 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1627  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1628 }
1629 
1630 
1631 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1632  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1633  // load to two 32-bit loads.
1634  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1635  FPURegister nextfpreg;
1636  nextfpreg.setcode(fd.code() + 1);
1637  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1638 }
1639 
1640 
1641 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1642  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1643 }
1644 
1645 
1646 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1647  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1648  // store to two 32-bit stores.
1649  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1650  FPURegister nextfpreg;
1651  nextfpreg.setcode(fd.code() + 1);
1652  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1653 }
1654 
1655 
1656 void Assembler::mtc1(Register rt, FPURegister fs) {
1657  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1658 }
1659 
1660 
1661 void Assembler::mfc1(Register rt, FPURegister fs) {
1662  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1663 }
1664 
1665 
1666 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1667  GenInstrRegister(COP1, CTC1, rt, fs);
1668 }
1669 
1670 
1671 void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1672  GenInstrRegister(COP1, CFC1, rt, fs);
1673 }
1674 
1675 void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1676  uint64_t i;
1677  memcpy(&i, &d, 8);
1678 
1679  *lo = i & 0xffffffff;
1680  *hi = i >> 32;
1681 }
1682 
1683 // Arithmetic.
1684 
1685 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1686  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1687 }
1688 
1689 
1690 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1691  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1692 }
1693 
1694 
1695 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1696  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1697 }
1698 
1699 
1700 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1701  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1702 }
1703 
1704 
1705 void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1706  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1707 }
1708 
1709 
1710 void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1711  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1712 }
1713 
1714 
1715 void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1716  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1717 }
1718 
1719 
1720 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1721  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1722 }
1723 
1724 
1725 // Conversions.
1726 
1727 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1728  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1729 }
1730 
1731 
1732 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1733  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1734 }
1735 
1736 
1737 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1738  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1739 }
1740 
1741 
1742 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1743  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1744 }
1745 
1746 
1747 void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1748  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1749 }
1750 
1751 
1752 void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1753  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1754 }
1755 
1756 
1757 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1758  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1759 }
1760 
1761 
1762 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1763  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1764 }
1765 
1766 
1767 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1768  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1769 }
1770 
1771 
1772 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1773  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1774 }
1775 
1776 
1777 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1778  ASSERT(kArchVariant == kMips32r2);
1779  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1780 }
1781 
1782 
1783 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1784  ASSERT(kArchVariant == kMips32r2);
1785  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1786 }
1787 
1788 
1789 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1790  ASSERT(kArchVariant == kMips32r2);
1791  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1792 }
1793 
1794 
1795 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1796  ASSERT(kArchVariant == kMips32r2);
1797  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1798 }
1799 
1800 
1801 void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1802  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1803 }
1804 
1805 
1806 void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1807  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1808 }
1809 
1810 
1811 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1812  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1813 }
1814 
1815 
1816 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1817  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1818 }
1819 
1820 
1821 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1822  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1823 }
1824 
1825 
1826 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1827  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1828 }
1829 
1830 
1831 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1832  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1833 }
1834 
1835 
1836 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1837  ASSERT(kArchVariant == kMips32r2);
1838  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1839 }
1840 
1841 
1842 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1843  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1844 }
1845 
1846 
1847 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1848  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1849 }
1850 
1851 
1852 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1853  ASSERT(kArchVariant == kMips32r2);
1854  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1855 }
1856 
1857 
1858 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1859  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1860 }
1861 
1862 
1863 // Conditions.
1865  FPURegister fs, FPURegister ft, uint16_t cc) {
1866  ASSERT(CpuFeatures::IsEnabled(FPU));
1867  ASSERT(is_uint3(cc));
1868  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1869  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1870  | cc << 8 | 3 << 4 | cond;
1871  emit(instr);
1872 }
1873 
1874 
1875 void Assembler::fcmp(FPURegister src1, const double src2,
1876  FPUCondition cond) {
1877  ASSERT(CpuFeatures::IsEnabled(FPU));
1878  ASSERT(src2 == 0.0);
1879  mtc1(zero_reg, f14);
1880  cvt_d_w(f14, f14);
1881  c(cond, D, src1, f14, 0);
1882 }
1883 
1884 
1885 void Assembler::bc1f(int16_t offset, uint16_t cc) {
1886  ASSERT(CpuFeatures::IsEnabled(FPU));
1887  ASSERT(is_uint3(cc));
1888  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1889  emit(instr);
1890 }
1891 
1892 
1893 void Assembler::bc1t(int16_t offset, uint16_t cc) {
1894  ASSERT(CpuFeatures::IsEnabled(FPU));
1895  ASSERT(is_uint3(cc));
1896  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1897  emit(instr);
1898 }
1899 
1900 
1901 // Debugging.
1903  positions_recorder()->WriteRecordedPositions();
1904  CheckBuffer();
1905  RecordRelocInfo(RelocInfo::JS_RETURN);
1906 }
1907 
1908 
1910  positions_recorder()->WriteRecordedPositions();
1911  CheckBuffer();
1912  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1913 }
1914 
1915 
1916 void Assembler::RecordComment(const char* msg) {
1917  if (FLAG_code_comments) {
1918  CheckBuffer();
1919  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1920  }
1921 }
1922 
1923 
1924 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1925  Instr instr = instr_at(pc);
1926  ASSERT(IsJ(instr) || IsLui(instr));
1927  if (IsLui(instr)) {
1928  Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1929  Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1930  ASSERT(IsOri(instr_ori));
1931  int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1932  imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1933  if (imm == kEndOfJumpChain) {
1934  return 0; // Number of instructions patched.
1935  }
1936  imm += pc_delta;
1937  ASSERT((imm & 3) == 0);
1938 
1939  instr_lui &= ~kImm16Mask;
1940  instr_ori &= ~kImm16Mask;
1941 
1943  instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1945  instr_ori | (imm & kImm16Mask));
1946  return 2; // Number of instructions patched.
1947  } else {
1948  uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1949  if ((int32_t)imm28 == kEndOfJumpChain) {
1950  return 0; // Number of instructions patched.
1951  }
1952  imm28 += pc_delta;
1953  imm28 &= kImm28Mask;
1954  ASSERT((imm28 & 3) == 0);
1955 
1956  instr &= ~kImm26Mask;
1957  uint32_t imm26 = imm28 >> 2;
1958  ASSERT(is_uint26(imm26));
1959 
1960  instr_at_put(pc, instr | (imm26 & kImm26Mask));
1961  return 1; // Number of instructions patched.
1962  }
1963 }
1964 
1965 
1966 void Assembler::GrowBuffer() {
1967  if (!own_buffer_) FATAL("external code buffer is too small");
1968 
1969  // Compute new buffer size.
1970  CodeDesc desc; // The new buffer.
1971  if (buffer_size_ < 4*KB) {
1972  desc.buffer_size = 4*KB;
1973  } else if (buffer_size_ < 1*MB) {
1974  desc.buffer_size = 2*buffer_size_;
1975  } else {
1976  desc.buffer_size = buffer_size_ + 1*MB;
1977  }
1978  CHECK_GT(desc.buffer_size, 0); // No overflow.
1979 
1980  // Set up new buffer.
1981  desc.buffer = NewArray<byte>(desc.buffer_size);
1982 
1983  desc.instr_size = pc_offset();
1984  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1985 
1986  // Copy the data.
1987  int pc_delta = desc.buffer - buffer_;
1988  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1989  memmove(desc.buffer, buffer_, desc.instr_size);
1990  memmove(reloc_info_writer.pos() + rc_delta,
1991  reloc_info_writer.pos(), desc.reloc_size);
1992 
1993  // Switch buffers.
1994  DeleteArray(buffer_);
1995  buffer_ = desc.buffer;
1996  buffer_size_ = desc.buffer_size;
1997  pc_ += pc_delta;
1998  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1999  reloc_info_writer.last_pc() + pc_delta);
2000 
2001  // Relocate runtime entries.
2002  for (RelocIterator it(desc); !it.done(); it.next()) {
2003  RelocInfo::Mode rmode = it.rinfo()->rmode();
2004  if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2005  byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2006  RelocateInternalReference(p, pc_delta);
2007  }
2008  }
2009 
2010  ASSERT(!overflow());
2011 }
2012 
2013 
2014 void Assembler::db(uint8_t data) {
2015  CheckBuffer();
2016  *reinterpret_cast<uint8_t*>(pc_) = data;
2017  pc_ += sizeof(uint8_t);
2018 }
2019 
2020 
2021 void Assembler::dd(uint32_t data) {
2022  CheckBuffer();
2023  *reinterpret_cast<uint32_t*>(pc_) = data;
2024  pc_ += sizeof(uint32_t);
2025 }
2026 
2027 
2028 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2029  // We do not try to reuse pool constants.
2030  RelocInfo rinfo(pc_, rmode, data, NULL);
2031  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2032  // Adjust code for new modes.
2033  ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2034  || RelocInfo::IsJSReturn(rmode)
2035  || RelocInfo::IsComment(rmode)
2036  || RelocInfo::IsPosition(rmode));
2037  // These modes do not need an entry in the constant pool.
2038  }
2039  if (rinfo.rmode() != RelocInfo::NONE) {
2040  // Don't record external references unless the heap will be serialized.
2041  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2042 #ifdef DEBUG
2043  if (!Serializer::enabled()) {
2045  }
2046 #endif
2047  if (!Serializer::enabled() && !emit_debug_code()) {
2048  return;
2049  }
2050  }
2051  ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
2052  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2053  RelocInfo reloc_info_with_ast_id(pc_,
2054  rmode,
2055  RecordedAstId().ToInt(),
2056  NULL);
2058  reloc_info_writer.Write(&reloc_info_with_ast_id);
2059  } else {
2060  reloc_info_writer.Write(&rinfo);
2061  }
2062  }
2063 }
2064 
2065 
2066 void Assembler::BlockTrampolinePoolFor(int instructions) {
2067  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2068 }
2069 
2070 
2072  // Some small sequences of instructions must not be broken up by the
2073  // insertion of a trampoline pool; such sequences are protected by setting
2074  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2075  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2076  // are blocked by trampoline_pool_blocked_nesting_.
2077  if ((trampoline_pool_blocked_nesting_ > 0) ||
2078  (pc_offset() < no_trampoline_pool_before_)) {
2079  // Emission is currently blocked; make sure we try again as soon as
2080  // possible.
2081  if (trampoline_pool_blocked_nesting_ > 0) {
2082  next_buffer_check_ = pc_offset() + kInstrSize;
2083  } else {
2084  next_buffer_check_ = no_trampoline_pool_before_;
2085  }
2086  return;
2087  }
2088 
2089  ASSERT(!trampoline_emitted_);
2090  ASSERT(unbound_labels_count_ >= 0);
2091  if (unbound_labels_count_ > 0) {
2092  // First we emit jump (2 instructions), then we emit trampoline pool.
2093  { BlockTrampolinePoolScope block_trampoline_pool(this);
2094  Label after_pool;
2095  b(&after_pool);
2096  nop();
2097 
2098  int pool_start = pc_offset();
2099  for (int i = 0; i < unbound_labels_count_; i++) {
2100  uint32_t imm32;
2101  imm32 = jump_address(&after_pool);
2102  { BlockGrowBufferScope block_buf_growth(this);
2103  // Buffer growth (and relocation) must be blocked for internal
2104  // references until associated instructions are emitted and available
2105  // to be patched.
2106  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2107  lui(at, (imm32 & kHiMask) >> kLuiShift);
2108  ori(at, at, (imm32 & kImm16Mask));
2109  }
2110  jr(at);
2111  nop();
2112  }
2113  bind(&after_pool);
2114  trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2115 
2116  trampoline_emitted_ = true;
2117  // As we are only going to emit trampoline once, we need to prevent any
2118  // further emission.
2119  next_buffer_check_ = kMaxInt;
2120  }
2121  } else {
2122  // Number of branches to unbound label at this point is zero, so we can
2123  // move next buffer check to maximum.
2124  next_buffer_check_ = pc_offset() +
2125  kMaxBranchOffset - kTrampolineSlotsSize * 16;
2126  }
2127  return;
2128 }
2129 
2130 
2132  Instr instr1 = instr_at(pc);
2133  Instr instr2 = instr_at(pc + kInstrSize);
2134  // Interpret 2 instructions generated by li: lui/ori
2135  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2136  // Assemble the 32 bit value.
2137  return reinterpret_cast<Address>(
2138  (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2139  }
2140 
2141  // We should never get here, force a bad address if we do.
2142  UNREACHABLE();
2143  return (Address)0x0;
2144 }
2145 
2146 
2147 // MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2148 // qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2149 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2150 // OS::nan_value() returns a qNaN.
2151 void Assembler::QuietNaN(HeapObject* object) {
2153 }
2154 
2155 
2156 // On Mips, a target address is stored in a lui/ori instruction pair, each
2157 // of which load 16 bits of the 32-bit address to a register.
2158 // Patching the address must replace both instr, and flush the i-cache.
2159 //
2160 // There is an optimization below, which emits a nop when the address
2161 // fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2162 // and possibly removed.
2164  Instr instr2 = instr_at(pc + kInstrSize);
2165  uint32_t rt_code = GetRtField(instr2);
2166  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2167  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2168 
2169 #ifdef DEBUG
2170  // Check we have the result from a li macro-instruction, using instr pair.
2171  Instr instr1 = instr_at(pc);
2172  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2173 #endif
2174 
2175  // Must use 2 instructions to insure patchable code => just use lui and ori.
2176  // lui rt, upper-16.
2177  // ori rt rt, lower-16.
2178  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2179  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2180 
2181  // The following code is an optimization for the common case of Call()
2182  // or Jump() which is load to register, and jump through register:
2183  // li(t9, address); jalr(t9) (or jr(t9)).
2184  // If the destination address is in the same 256 MB page as the call, it
2185  // is faster to do a direct jal, or j, rather than jump thru register, since
2186  // that lets the cpu pipeline prefetch the target address. However each
2187  // time the address above is patched, we have to patch the direct jal/j
2188  // instruction, as well as possibly revert to jalr/jr if we now cross a
2189  // 256 MB page. Note that with the jal/j instructions, we do not need to
2190  // load the register, but that code is left, since it makes it easy to
2191  // revert this process. A further optimization could try replacing the
2192  // li sequence with nops.
2193  // This optimization can only be applied if the rt-code from instr2 is the
2194  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2195  // mips return. Occasionally this lands after an li().
2196 
2197  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2198  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2199  bool in_range =
2200  ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2201  uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
2202  bool patched_jump = false;
2203 
2204 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2205  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2206  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2207  // apply this workaround for all cores so we don't have to identify the core.
2208  if (in_range) {
2209  // The 24k core E156 bug has some very specific requirements, we only check
2210  // the most simple one: if the address of the delay slot instruction is in
2211  // the first or last 32 KB of the 256 MB segment.
2212  uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2213  uint32_t ipc_segment_addr = ipc & segment_mask;
2214  if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2215  in_range = false;
2216  }
2217 #endif
2218 
2219  if (IsJalr(instr3)) {
2220  // Try to convert JALR to JAL.
2221  if (in_range && GetRt(instr2) == GetRs(instr3)) {
2222  *(p+2) = JAL | target_field;
2223  patched_jump = true;
2224  }
2225  } else if (IsJr(instr3)) {
2226  // Try to convert JR to J, skip returns (jr ra).
2227  bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2228  if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2229  *(p+2) = J | target_field;
2230  patched_jump = true;
2231  }
2232  } else if (IsJal(instr3)) {
2233  if (in_range) {
2234  // We are patching an already converted JAL.
2235  *(p+2) = JAL | target_field;
2236  } else {
2237  // Patch JAL, but out of range, revert to JALR.
2238  // JALR rs reg is the rt reg specified in the ORI instruction.
2239  uint32_t rs_field = GetRt(instr2) << kRsShift;
2240  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2241  *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2242  }
2243  patched_jump = true;
2244  } else if (IsJ(instr3)) {
2245  if (in_range) {
2246  // We are patching an already converted J (jump).
2247  *(p+2) = J | target_field;
2248  } else {
2249  // Trying patch J, but out of range, just go back to JR.
2250  // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2251  uint32_t rs_field = GetRt(instr2) << kRsShift;
2252  *(p+2) = SPECIAL | rs_field | JR;
2253  }
2254  patched_jump = true;
2255  }
2256 
2257  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2258 }
2259 
2261  // Address pc points to lui/ori instructions.
2262  // Jump to label may follow at pc + 2 * kInstrSize.
2263  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2264 #ifdef DEBUG
2265  Instr instr1 = instr_at(pc);
2266 #endif
2267  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2268  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2269  bool patched = false;
2270 
2271  if (IsJal(instr3)) {
2272  ASSERT(GetOpcodeField(instr1) == LUI);
2273  ASSERT(GetOpcodeField(instr2) == ORI);
2274 
2275  uint32_t rs_field = GetRt(instr2) << kRsShift;
2276  uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2277  *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2278  patched = true;
2279  } else if (IsJ(instr3)) {
2280  ASSERT(GetOpcodeField(instr1) == LUI);
2281  ASSERT(GetOpcodeField(instr2) == ORI);
2282 
2283  uint32_t rs_field = GetRt(instr2) << kRsShift;
2284  *(p+2) = SPECIAL | rs_field | JR;
2285  patched = true;
2286  }
2287 
2288  if (patched) {
2289  CPU::FlushICache(pc+2, sizeof(Address));
2290  }
2291 }
2292 
2293 } } // namespace v8::internal
2294 
2295 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:157
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
const SwVfpRegister s2
Isolate * isolate() const
Definition: assembler.h:61
static const int kBranchPCOffset
const Instr kRtMask
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void trunc_l_d(FPURegister fd, FPURegister fs)
void db(uint8_t data)
static uint32_t GetOpcodeField(Instr instr)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
void mtc1(Register rt, FPURegister fs)
static bool IsAddImmediate(Instr instr)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
#define FATAL(msg)
Definition: checks.h:46
bool is_uint26(int x)
Definition: assembler.h:855
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
const SwVfpRegister s7
const int kNumRegisters
Definition: constants-arm.h:92
void neg_d(FPURegister fd, FPURegister fs)
const FPURegister f0
void blez(Register rs, int16_t offset)
void sw(Register rd, const MemOperand &rs)
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
const int KB
Definition: globals.h:207
void rotr(Register rd, Register rt, uint16_t sa)
#define CHECK_GT(a, b)
Definition: checks.h:227
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
const int kImm16Mask
static uint32_t GetFunctionField(Instr instr)
void mflo(Register rd)
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
void round_w_s(FPURegister fd, FPURegister fs)
bool is_uint10(int x)
Definition: assembler.h:851
void b(int branch_offset, Condition cond=al)
const uint32_t kMaxWatchpointCode
int int32_t
Definition: unicode.cc:47
void floor_l_s(FPURegister fd, FPURegister fs)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
const int kMaxInt
Definition: globals.h:210
void div(Register rs, Register rt)
static bool enabled()
Definition: serialize.h:481
bool is_uint3(int x)
Definition: assembler.h:846
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
const uint32_t kMaxStopCode
static const int kMinimalBufferSize
const int kLuiShift
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:270
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
const Instr kSwRegFpOffsetPattern
unsigned short uint16_t
Definition: unicode.cc:46
static uint32_t GetRdField(Instr instr)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
const int kJumpAddrMask
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
#define CHECK(condition)
Definition: checks.h:56
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
StringInputBuffer *const buffer_
const int kFunctionFieldMask
const int kFdShift
void ret(int imm16)
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
const Instr kPopRegPattern
const SwVfpRegister s6
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
const int kImmFieldShift
void cvt_w_d(FPURegister fd, FPURegister fs)
static void JumpLabelToJumpRegister(Address pc)
uint8_t byte
Definition: globals.h:156
const Instr kPushRegPattern
void break_(uint32_t code, bool break_as_stop=false)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
const Register sp
const SwVfpRegister s3
void sh(Register rd, const MemOperand &rs)
#define UNREACHABLE()
Definition: checks.h:50
static bool IsJr(Instr instr)
static bool IsOri(Instr instr)
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
const int kFtShift
void ceil_w_d(FPURegister fd, FPURegister fs)
void trunc_l_s(FPURegister fd, FPURegister fs)
const int kRsFieldMask
const Instr kLwSwInstrTypeMask
void trunc_w_s(FPURegister fd, FPURegister fs)
void srlv(Register rd, Register rt, Register rs)
const int kRtFieldMask
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void bal(int16_t offset)
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:220
void jal_or_jalr(int32_t target, Register rs)
const int kRdFieldMask
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kLwRegFpOffsetPattern
int branch_offset(Label *L, bool jump_elimination_allowed)
const Instr kPushInstruction
static void set_target_address_at(Address pc, Address target)
static void TooLateToEnableNow()
Definition: serialize.h:480
const int kHeapObjectTag
Definition: v8.h:4009
const int kRtShift
static bool IsPop(Instr instr)
void movt(Register reg, uint32_t immediate, Condition cond=al)
bool is_uint5(int x)
Definition: assembler.h:848
bool is_uint16(int x)
Definition: assembler.h:853
void lui(Register rd, int32_t j)
const Register pc
bool is_near(Label *L)
const int kFunctionShift
const Instr kPopInstruction
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
void srl(Register rd, Register rt, uint16_t sa)
static Register GetRdReg(Instr instr)
const int kImm28Mask
void tlt(Register rs, Register rt, uint16_t code)
void slti(Register rd, Register rs, int32_t j)
const SwVfpRegister s0
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
const int kRegister_fp_Code
void jal(int32_t target)
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
bool IsPowerOf2(T x)
Definition: utils.h:50
void floor_l_d(FPURegister fd, FPURegister fs)
const int kSaShift
static bool MipsCpuHasFeature(CpuFeature feature)
void cfc1(Register rt, FPUControlRegister fs)
friend class BlockTrampolinePoolScope
const SwVfpRegister s5
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
#define UNIMPLEMENTED_MIPS()
const int kSaFieldMask
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static uint32_t GetLabelConst(Instr instr)
const SwVfpRegister s1
void ori(Register rd, Register rs, int32_t j)
Definition: v8.h:1425
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
void movz(Register rd, Register rs, Register rt)
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
static Address target_address_at(Address pc)
static HeapNumber * cast(Object *obj)
void set_value(double value)
Definition: objects-inl.h:1203
const int kRdShift
static double nan_value()
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
void fcmp(FPURegister src1, const double src2, FPUCondition cond)
bool is_int16(int x)
Definition: assembler.h:837
static void QuietNaN(HeapObject *nan)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
static bool IsSwRegFpOffset(Instr instr)
static const int kHeaderSize
Definition: objects.h:4549
static bool IsJump(Instr instr)
void mfhi(Register rd)
void mfc1(Register rt, FPURegister fs)
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
#define HEAP
Definition: isolate.h:1433
bool MustUseReg(RelocInfo::Mode rmode)
const int kRsShift
void jr(Register target)
const Instr kLwSwInstrArgumentMask
void trunc_w_d(FPURegister fd, FPURegister fs)
const SwVfpRegister s4
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
void floor_w_d(FPURegister fd, FPURegister fs)
void sdc1(FPURegister fs, const MemOperand &dst)
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
static bool IsBeq(Instr instr)
static const int kInstrSize
MemOperand(Register rn, int32_t offset=0)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint64_t CpuFeaturesImpliedByPlatform()
const Instr kLwRegFpNegOffsetPattern
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
const int kImm26Mask
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
const Register fp
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
const Instr kLwSwOffsetMask
const int kRegister_sp_Code
void DeleteArray(T *array)
Definition: allocation.h:91
const int kHiMask
signed short int16_t
Definition: unicode.cc:45
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
const FPURegister f14
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
const Instr kSwRegFpNegOffsetPattern
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
bool emit_debug_code() const
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const int kImm26Bits
static bool IsLui(Instr instr)
static bool IsAndImmediate(Instr instr)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void movn(Register rd, Register rs, Register rt)
const int kFsShift
void sb(Register rd, const MemOperand &rs)
const int MB
Definition: globals.h:208