v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-mips.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
29 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
30 
31 #include "assembler.h"
32 #include "mips/assembler-mips.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Forward declaration.
39 class JumpTarget;
40 
41 // Reserved Register Usage Summary.
42 //
43 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
44 //
45 // The programmer should know that the MacroAssembler may clobber these three,
46 // but won't touch other registers except in special cases.
47 //
48 // Per the MIPS ABI, register t9 must be used for indirect function call
49 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
50 // trying to update gp register for position-independent-code. Whenever
51 // MIPS generated code calls C code, it must be via t9 register.
52 
53 
54 // Flags used for the AllocateInNewSpace functions.
56  // No special flags.
58  // Return the pointer to the allocated already tagged as a heap object.
59  TAG_OBJECT = 1 << 0,
60  // The content of the result register already contains the allocation top in
61  // new space.
62  RESULT_CONTAINS_TOP = 1 << 1,
63  // Specify that the requested size of the space to allocate is specified in
64  // words instead of bytes.
65  SIZE_IN_WORDS = 1 << 2
66 };
67 
68 // Flags used for the ObjectToDoubleFPURegister function.
70  // No special flags.
72  // Object is known to be a non smi.
73  OBJECT_NOT_SMI = 1 << 0,
74  // Don't load NaNs or infinities, branch to the non number case instead.
76 };
77 
78 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
82 };
83 
84 // Flags used for the li macro-assembler function.
85 enum LiFlags {
86  // If the constant value can be represented in just 16 bits, then
87  // optimize the li to use a single instruction, rather than lui/ori pair.
89  // Always use 2 instructions (lui/ori pair), even if the constant could
90  // be loaded with just one, so that this value is patchable later.
92 };
93 
94 
98 
99 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
100 
101 
102 // -----------------------------------------------------------------------------
103 // Static helper functions.
104 
105 inline MemOperand ContextOperand(Register context, int index) {
106  return MemOperand(context, Context::SlotOffset(index));
107 }
108 
109 
112 }
113 
114 
115 // Generate a MemOperand for loading a field from an object.
116 inline MemOperand FieldMemOperand(Register object, int offset) {
117  return MemOperand(object, offset - kHeapObjectTag);
118 }
119 
120 
121 // Generate a MemOperand for storing arguments 5..N on the stack
122 // when calling CallCFunction().
124  ASSERT(index > kCArgSlotCount);
125  // Argument 5 takes the slot just past the four Arg-slots.
126  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
127  return MemOperand(sp, offset);
128 }
129 
130 
131 // MacroAssembler implements a collection of frequently used macros.
132 class MacroAssembler: public Assembler {
133  public:
134  // The isolate parameter can be NULL if the macro assembler should
135  // not use isolate-dependent functionality. In this case, it's the
136  // responsibility of the caller to never invoke such function on the
137  // macro assembler.
138  MacroAssembler(Isolate* isolate, void* buffer, int size);
139 
140  // Arguments macros.
141 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
142 #define COND_ARGS cond, r1, r2
143 
144  // Cases when relocation is not needed.
145 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
146  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
147  inline void Name(BranchDelaySlot bd, target_type target) { \
148  Name(target, bd); \
149  } \
150  void Name(target_type target, \
151  COND_TYPED_ARGS, \
152  BranchDelaySlot bd = PROTECT); \
153  inline void Name(BranchDelaySlot bd, \
154  target_type target, \
155  COND_TYPED_ARGS) { \
156  Name(target, COND_ARGS, bd); \
157  }
158 
159 #define DECLARE_BRANCH_PROTOTYPES(Name) \
160  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
161  DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
162 
164  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
165 
166 #undef DECLARE_BRANCH_PROTOTYPES
167 #undef COND_TYPED_ARGS
168 #undef COND_ARGS
169 
170 
171  // Jump, Call, and Ret pseudo instructions implementing inter-working.
172 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
173  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
174 
175  void Jump(Register target, COND_ARGS);
176  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
177  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
178  void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
179  static int CallSize(Register target, COND_ARGS);
180  void Call(Register target, COND_ARGS);
181  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
182  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
183  static int CallSize(Handle<Code> code,
184  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
185  TypeFeedbackId ast_id = TypeFeedbackId::None(),
186  COND_ARGS);
187  void Call(Handle<Code> code,
188  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
189  TypeFeedbackId ast_id = TypeFeedbackId::None(),
190  COND_ARGS);
191  void Ret(COND_ARGS);
192  inline void Ret(BranchDelaySlot bd, Condition cond = al,
193  Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
194  Ret(cond, rs, rt, bd);
195  }
196 
197  void Branch(Label* L,
198  Condition cond,
199  Register rs,
200  Heap::RootListIndex index,
201  BranchDelaySlot bdslot = PROTECT);
202 
203 #undef COND_ARGS
204 
205  // Emit code to discard a non-negative number of pointer-sized elements
206  // from the stack, clobbering only the sp register.
207  void Drop(int count,
208  Condition cond = cc_always,
209  Register reg = no_reg,
210  const Operand& op = Operand(no_reg));
211 
212  // Trivial case of DropAndRet that utilizes the delay slot and only emits
213  // 2 instructions.
214  void DropAndRet(int drop);
215 
216  void DropAndRet(int drop,
217  Condition cond,
218  Register reg,
219  const Operand& op);
220 
221  // Swap two registers. If the scratch register is omitted then a slightly
222  // less efficient form using xor instead of mov is emitted.
223  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
224 
225  void Call(Label* target);
226 
227  inline void Move(Register dst, Register src) {
228  if (!dst.is(src)) {
229  mov(dst, src);
230  }
231  }
232 
233  inline void Move(FPURegister dst, FPURegister src) {
234  if (!dst.is(src)) {
235  mov_d(dst, src);
236  }
237  }
238 
239  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
240  mfc1(dst_low, src);
241  mfc1(dst_high, FPURegister::from_code(src.code() + 1));
242  }
243 
244  inline void Move(FPURegister dst, Register src_low, Register src_high) {
245  mtc1(src_low, dst);
246  mtc1(src_high, FPURegister::from_code(dst.code() + 1));
247  }
248 
249  // Conditional move.
250  void Move(FPURegister dst, double imm);
251  void Movz(Register rd, Register rs, Register rt);
252  void Movn(Register rd, Register rs, Register rt);
253  void Movt(Register rd, Register rs, uint16_t cc = 0);
254  void Movf(Register rd, Register rs, uint16_t cc = 0);
255 
256  void Clz(Register rd, Register rs);
257 
258  // Jump unconditionally to given label.
259  // We NEED a nop in the branch delay slot, as it used by v8, for example in
260  // CodeGenerator::ProcessDeferred().
261  // Currently the branch delay slot is filled by the MacroAssembler.
262  // Use rather b(Label) for code generation.
263  void jmp(Label* L) {
264  Branch(L);
265  }
266 
267  // Load an object from the root table.
268  void LoadRoot(Register destination,
269  Heap::RootListIndex index);
270  void LoadRoot(Register destination,
271  Heap::RootListIndex index,
272  Condition cond, Register src1, const Operand& src2);
273 
274  // Store an object to the root table.
275  void StoreRoot(Register source,
276  Heap::RootListIndex index);
277  void StoreRoot(Register source,
278  Heap::RootListIndex index,
279  Condition cond, Register src1, const Operand& src2);
280 
281  void LoadHeapObject(Register dst, Handle<HeapObject> object);
282 
283  void LoadObject(Register result, Handle<Object> object) {
284  if (object->IsHeapObject()) {
285  LoadHeapObject(result, Handle<HeapObject>::cast(object));
286  } else {
287  li(result, object);
288  }
289  }
290 
291  // ---------------------------------------------------------------------------
292  // GC Support
293 
295  Register value,
296  Register address);
297 
299  kReturnAtEnd,
301  };
302 
303 
304  // Record in the remembered set the fact that we have a pointer to new space
305  // at the address pointed to by the addr register. Only works if addr is not
306  // in new space.
307  void RememberedSetHelper(Register object, // Used for debug code.
308  Register addr,
309  Register scratch,
310  SaveFPRegsMode save_fp,
311  RememberedSetFinalAction and_then);
312 
313  void CheckPageFlag(Register object,
314  Register scratch,
315  int mask,
316  Condition cc,
317  Label* condition_met);
318 
319  // Check if object is in new space. Jumps if the object is not in new space.
320  // The register scratch can be object itself, but it will be clobbered.
322  Register scratch,
323  Label* branch) {
324  InNewSpace(object, scratch, ne, branch);
325  }
326 
327  // Check if object is in new space. Jumps if the object is in new space.
328  // The register scratch can be object itself, but scratch will be clobbered.
330  Register scratch,
331  Label* branch) {
332  InNewSpace(object, scratch, eq, branch);
333  }
334 
335  // Check if an object has a given incremental marking color.
336  void HasColor(Register object,
337  Register scratch0,
338  Register scratch1,
339  Label* has_color,
340  int first_bit,
341  int second_bit);
342 
343  void JumpIfBlack(Register object,
344  Register scratch0,
345  Register scratch1,
346  Label* on_black);
347 
348  // Checks the color of an object. If the object is already grey or black
349  // then we just fall through, since it is already live. If it is white and
350  // we can determine that it doesn't need to be scanned, then we just mark it
351  // black and fall through. For the rest we jump to the label so the
352  // incremental marker can fix its assumptions.
353  void EnsureNotWhite(Register object,
354  Register scratch1,
355  Register scratch2,
356  Register scratch3,
357  Label* object_is_white_and_not_data);
358 
359  // Detects conservatively whether an object is data-only, i.e. it does need to
360  // be scanned by the garbage collector.
361  void JumpIfDataObject(Register value,
362  Register scratch,
363  Label* not_data_object);
364 
365  // Notify the garbage collector that we wrote a pointer into an object.
366  // |object| is the object being stored into, |value| is the object being
367  // stored. value and scratch registers are clobbered by the operation.
368  // The offset is the offset from the start of the object, not the offset from
369  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
370  void RecordWriteField(
371  Register object,
372  int offset,
373  Register value,
374  Register scratch,
375  RAStatus ra_status,
376  SaveFPRegsMode save_fp,
377  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
378  SmiCheck smi_check = INLINE_SMI_CHECK);
379 
380  // As above, but the offset has the tag presubtracted. For use with
381  // MemOperand(reg, off).
383  Register context,
384  int offset,
385  Register value,
386  Register scratch,
387  RAStatus ra_status,
388  SaveFPRegsMode save_fp,
389  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
390  SmiCheck smi_check = INLINE_SMI_CHECK) {
391  RecordWriteField(context,
392  offset + kHeapObjectTag,
393  value,
394  scratch,
395  ra_status,
396  save_fp,
397  remembered_set_action,
398  smi_check);
399  }
400 
401  // For a given |object| notify the garbage collector that the slot |address|
402  // has been written. |value| is the object being stored. The value and
403  // address registers are clobbered by the operation.
404  void RecordWrite(
405  Register object,
406  Register address,
407  Register value,
408  RAStatus ra_status,
409  SaveFPRegsMode save_fp,
410  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
411  SmiCheck smi_check = INLINE_SMI_CHECK);
412 
413 
414  // ---------------------------------------------------------------------------
415  // Inline caching support.
416 
417  // Generate code for checking access rights - used for security checks
418  // on access to global objects across environments. The holder register
419  // is left untouched, whereas both scratch registers are clobbered.
420  void CheckAccessGlobalProxy(Register holder_reg,
421  Register scratch,
422  Label* miss);
423 
424  void GetNumberHash(Register reg0, Register scratch);
425 
426  void LoadFromNumberDictionary(Label* miss,
427  Register elements,
428  Register key,
429  Register result,
430  Register reg0,
431  Register reg1,
432  Register reg2);
433 
434 
435  inline void MarkCode(NopMarkerTypes type) {
436  nop(type);
437  }
438 
439  // Check if the given instruction is a 'type' marker.
440  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
441  // nop(type)). These instructions are generated to mark special location in
442  // the code, like some special IC code.
443  static inline bool IsMarkedCode(Instr instr, int type) {
444  ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
445  return IsNop(instr, type);
446  }
447 
448 
449  static inline int GetCodeMarker(Instr instr) {
450  uint32_t opcode = ((instr & kOpcodeMask));
451  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
452  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
453  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
454 
455  // Return <n> if we have a sll zero_reg, zero_reg, n
456  // else return -1.
457  bool sllzz = (opcode == SLL &&
458  rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
459  rs == static_cast<uint32_t>(ToNumber(zero_reg)));
460  int type =
461  (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
462  ASSERT((type == -1) ||
463  ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
464  return type;
465  }
466 
467 
468 
469  // ---------------------------------------------------------------------------
470  // Allocation support.
471 
472  // Allocate an object in new space. The object_size is specified
473  // either in bytes or in words if the allocation flag SIZE_IN_WORDS
474  // is passed. If the new space is exhausted control continues at the
475  // gc_required label. The allocated object is returned in result. If
476  // the flag tag_allocated_object is true the result is tagged as as
477  // a heap object. All registers are clobbered also when control
478  // continues at the gc_required label.
479  void AllocateInNewSpace(int object_size,
480  Register result,
481  Register scratch1,
482  Register scratch2,
483  Label* gc_required,
485  void AllocateInNewSpace(Register object_size,
486  Register result,
487  Register scratch1,
488  Register scratch2,
489  Label* gc_required,
491 
492  // Undo allocation in new space. The object passed and objects allocated after
493  // it will no longer be allocated. The caller must make sure that no pointers
494  // are left to the object(s) no longer allocated as they would be invalid when
495  // allocation is undone.
496  void UndoAllocationInNewSpace(Register object, Register scratch);
497 
498 
499  void AllocateTwoByteString(Register result,
500  Register length,
501  Register scratch1,
502  Register scratch2,
503  Register scratch3,
504  Label* gc_required);
505  void AllocateAsciiString(Register result,
506  Register length,
507  Register scratch1,
508  Register scratch2,
509  Register scratch3,
510  Label* gc_required);
512  Register length,
513  Register scratch1,
514  Register scratch2,
515  Label* gc_required);
516  void AllocateAsciiConsString(Register result,
517  Register length,
518  Register scratch1,
519  Register scratch2,
520  Label* gc_required);
522  Register length,
523  Register scratch1,
524  Register scratch2,
525  Label* gc_required);
527  Register length,
528  Register scratch1,
529  Register scratch2,
530  Label* gc_required);
531 
532  // Allocates a heap number or jumps to the gc_required label if the young
533  // space is full and a scavenge is needed. All registers are clobbered also
534  // when control continues at the gc_required label.
535  void AllocateHeapNumber(Register result,
536  Register scratch1,
537  Register scratch2,
538  Register heap_number_map,
539  Label* gc_required);
541  FPURegister value,
542  Register scratch1,
543  Register scratch2,
544  Label* gc_required);
545 
546  // ---------------------------------------------------------------------------
547  // Instruction macros.
548 
549 #define DEFINE_INSTRUCTION(instr) \
550  void instr(Register rd, Register rs, const Operand& rt); \
551  void instr(Register rd, Register rs, Register rt) { \
552  instr(rd, rs, Operand(rt)); \
553  } \
554  void instr(Register rs, Register rt, int32_t j) { \
555  instr(rs, rt, Operand(j)); \
556  }
557 
558 #define DEFINE_INSTRUCTION2(instr) \
559  void instr(Register rs, const Operand& rt); \
560  void instr(Register rs, Register rt) { \
561  instr(rs, Operand(rt)); \
562  } \
563  void instr(Register rs, int32_t j) { \
564  instr(rs, Operand(j)); \
565  }
566 
567  DEFINE_INSTRUCTION(Addu);
568  DEFINE_INSTRUCTION(Subu);
569  DEFINE_INSTRUCTION(Mul);
570  DEFINE_INSTRUCTION2(Mult);
571  DEFINE_INSTRUCTION2(Multu);
572  DEFINE_INSTRUCTION2(Div);
573  DEFINE_INSTRUCTION2(Divu);
574 
576  DEFINE_INSTRUCTION(Or);
577  DEFINE_INSTRUCTION(Xor);
578  DEFINE_INSTRUCTION(Nor);
579  DEFINE_INSTRUCTION2(Neg);
580 
581  DEFINE_INSTRUCTION(Slt);
582  DEFINE_INSTRUCTION(Sltu);
583 
584  // MIPS32 R2 instruction macro.
585  DEFINE_INSTRUCTION(Ror);
586 
587 #undef DEFINE_INSTRUCTION
588 #undef DEFINE_INSTRUCTION2
589 
590 
591  // ---------------------------------------------------------------------------
592  // Pseudo-instructions.
593 
594  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
595 
596  // Load int32 in the rd register.
597  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
598  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
599  li(rd, Operand(j), mode);
600  }
601  inline void li(Register dst, Handle<Object> value,
602  LiFlags mode = OPTIMIZE_SIZE) {
603  li(dst, Operand(value), mode);
604  }
605 
606  // Push multiple registers on the stack.
607  // Registers are saved in numerical order, with higher numbered registers
608  // saved in higher memory addresses.
609  void MultiPush(RegList regs);
610  void MultiPushReversed(RegList regs);
611 
612  void MultiPushFPU(RegList regs);
613  void MultiPushReversedFPU(RegList regs);
614 
615  // Lower case push() for compatibility with arch-independent code.
616  void push(Register src) {
617  Addu(sp, sp, Operand(-kPointerSize));
618  sw(src, MemOperand(sp, 0));
619  }
620 
621  // Push a handle.
622  void Push(Handle<Object> handle);
623 
624  // Push two registers. Pushes leftmost register first (to highest address).
625  void Push(Register src1, Register src2) {
626  Subu(sp, sp, Operand(2 * kPointerSize));
627  sw(src1, MemOperand(sp, 1 * kPointerSize));
628  sw(src2, MemOperand(sp, 0 * kPointerSize));
629  }
630 
631  // Push three registers. Pushes leftmost register first (to highest address).
632  void Push(Register src1, Register src2, Register src3) {
633  Subu(sp, sp, Operand(3 * kPointerSize));
634  sw(src1, MemOperand(sp, 2 * kPointerSize));
635  sw(src2, MemOperand(sp, 1 * kPointerSize));
636  sw(src3, MemOperand(sp, 0 * kPointerSize));
637  }
638 
639  // Push four registers. Pushes leftmost register first (to highest address).
640  void Push(Register src1, Register src2, Register src3, Register src4) {
641  Subu(sp, sp, Operand(4 * kPointerSize));
642  sw(src1, MemOperand(sp, 3 * kPointerSize));
643  sw(src2, MemOperand(sp, 2 * kPointerSize));
644  sw(src3, MemOperand(sp, 1 * kPointerSize));
645  sw(src4, MemOperand(sp, 0 * kPointerSize));
646  }
647 
648  void Push(Register src, Condition cond, Register tst1, Register tst2) {
649  // Since we don't have conditional execution we use a Branch.
650  Branch(3, cond, tst1, Operand(tst2));
651  Subu(sp, sp, Operand(kPointerSize));
652  sw(src, MemOperand(sp, 0));
653  }
654 
655  // Pops multiple values from the stack and load them in the
656  // registers specified in regs. Pop order is the opposite as in MultiPush.
657  void MultiPop(RegList regs);
658  void MultiPopReversed(RegList regs);
659 
660  void MultiPopFPU(RegList regs);
661  void MultiPopReversedFPU(RegList regs);
662 
663  // Lower case pop() for compatibility with arch-independent code.
664  void pop(Register dst) {
665  lw(dst, MemOperand(sp, 0));
666  Addu(sp, sp, Operand(kPointerSize));
667  }
668 
669  // Pop two registers. Pops rightmost register first (from lower address).
670  void Pop(Register src1, Register src2) {
671  ASSERT(!src1.is(src2));
672  lw(src2, MemOperand(sp, 0 * kPointerSize));
673  lw(src1, MemOperand(sp, 1 * kPointerSize));
674  Addu(sp, sp, 2 * kPointerSize);
675  }
676 
677  // Pop three registers. Pops rightmost register first (from lower address).
678  void Pop(Register src1, Register src2, Register src3) {
679  lw(src3, MemOperand(sp, 0 * kPointerSize));
680  lw(src2, MemOperand(sp, 1 * kPointerSize));
681  lw(src1, MemOperand(sp, 2 * kPointerSize));
682  Addu(sp, sp, 3 * kPointerSize);
683  }
684 
685  void Pop(uint32_t count = 1) {
686  Addu(sp, sp, Operand(count * kPointerSize));
687  }
688 
689  // Push and pop the registers that can hold pointers, as defined by the
690  // RegList constant kSafepointSavedRegisters.
691  void PushSafepointRegisters();
692  void PopSafepointRegisters();
695  // Store value in register src in the safepoint stack slot for
696  // register dst.
699  // Load the value of the src register from its safepoint stack slot
700  // into register dst.
702 
703  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
704  // Does not handle errors.
705  void FlushICache(Register address, unsigned instructions);
706 
707  // MIPS32 R2 instruction macro.
708  void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
709  void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
710 
711  // ---------------------------------------------------------------------------
712  // FPU macros. These do not handle special cases like NaN or +- inf.
713 
714  // Convert unsigned word to double.
715  void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
716  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
717 
718  // Convert double to unsigned word.
719  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
720  void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
721 
722  void Trunc_w_d(FPURegister fd, FPURegister fs);
723  void Round_w_d(FPURegister fd, FPURegister fs);
724  void Floor_w_d(FPURegister fd, FPURegister fs);
725  void Ceil_w_d(FPURegister fd, FPURegister fs);
726  // Wrapper function for the different cmp/branch types.
727  void BranchF(Label* target,
728  Label* nan,
729  Condition cc,
730  FPURegister cmp1,
731  FPURegister cmp2,
732  BranchDelaySlot bd = PROTECT);
733 
734  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
735  inline void BranchF(BranchDelaySlot bd,
736  Label* target,
737  Label* nan,
738  Condition cc,
739  FPURegister cmp1,
740  FPURegister cmp2) {
741  BranchF(target, nan, cc, cmp1, cmp2, bd);
742  };
743 
744  // Convert the HeapNumber pointed to by source to a 32bits signed integer
745  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
746  // to not_int32 label. If FPU is available double_scratch is used but not
747  // scratch2.
748  void ConvertToInt32(Register source,
749  Register dest,
750  Register scratch,
751  Register scratch2,
752  FPURegister double_scratch,
753  Label *not_int32);
754 
755  // Truncates a double using a specific rounding mode.
756  // The except_flag will contain any exceptions caused by the instruction.
757  // If check_inexact is kDontCheckForInexactConversion, then the inexacat
758  // exception is masked.
759  void EmitFPUTruncate(FPURoundingMode rounding_mode,
760  FPURegister result,
761  DoubleRegister double_input,
762  Register scratch1,
763  Register except_flag,
764  CheckForInexactConversion check_inexact
766 
767  // Helper for EmitECMATruncate.
768  // This will truncate a floating-point value outside of the singed 32bit
769  // integer range to a 32bit signed integer.
770  // Expects the double value loaded in input_high and input_low.
771  // Exits with the answer in 'result'.
772  // Note that this code does not work for values in the 32bit range!
774  Register input_high,
775  Register input_low,
776  Register scratch);
777 
778  // Performs a truncating conversion of a floating point number as used by
779  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
780  // Exits with 'result' holding the answer and all other registers clobbered.
781  void EmitECMATruncate(Register result,
782  FPURegister double_input,
783  FPURegister single_scratch,
784  Register scratch,
785  Register scratch2,
786  Register scratch3);
787 
788  // Enter exit frame.
789  // argc - argument count to be dropped by LeaveExitFrame.
790  // save_doubles - saves FPU registers on stack, currently disabled.
791  // stack_space - extra stack space.
792  void EnterExitFrame(bool save_doubles,
793  int stack_space = 0);
794 
795  // Leave the current exit frame.
796  void LeaveExitFrame(bool save_doubles,
797  Register arg_count,
798  bool do_return = false);
799 
800  // Get the actual activation frame alignment for target environment.
801  static int ActivationFrameAlignment();
802 
803  // Make sure the stack is aligned. Only emits code in debug mode.
804  void AssertStackIsAligned();
805 
806  void LoadContext(Register dst, int context_chain_length);
807 
808  // Conditionally load the cached Array transitioned map of type
809  // transitioned_kind from the native context if the map in register
810  // map_in_out is the cached Array map in the native context of
811  // expected_kind.
813  ElementsKind expected_kind,
814  ElementsKind transitioned_kind,
815  Register map_in_out,
816  Register scratch,
817  Label* no_map_match);
818 
819  // Load the initial map for new Arrays from a JSFunction.
820  void LoadInitialArrayMap(Register function_in,
821  Register scratch,
822  Register map_out,
823  bool can_have_holes);
824 
825  void LoadGlobalFunction(int index, Register function);
826 
827  // Load the initial map from the global function. The registers
828  // function and map can be the same, function is then overwritten.
830  Register map,
831  Register scratch);
832 
834  ExternalReference roots_array_start =
835  ExternalReference::roots_array_start(isolate());
836  li(kRootRegister, Operand(roots_array_start));
837  }
838 
839  // -------------------------------------------------------------------------
840  // JavaScript invokes.
841 
842  // Set up call kind marking in t1. The method takes t1 as an
843  // explicit first parameter to make the code more readable at the
844  // call sites.
845  void SetCallKind(Register dst, CallKind kind);
846 
847  // Invoke the JavaScript function code by either calling or jumping.
848  void InvokeCode(Register code,
849  const ParameterCount& expected,
850  const ParameterCount& actual,
852  const CallWrapper& call_wrapper,
853  CallKind call_kind);
854 
855  void InvokeCode(Handle<Code> code,
856  const ParameterCount& expected,
857  const ParameterCount& actual,
858  RelocInfo::Mode rmode,
860  CallKind call_kind);
861 
862  // Invoke the JavaScript function in the given register. Changes the
863  // current context to the context in the function before invoking.
864  void InvokeFunction(Register function,
865  const ParameterCount& actual,
867  const CallWrapper& call_wrapper,
868  CallKind call_kind);
869 
870  void InvokeFunction(Handle<JSFunction> function,
871  const ParameterCount& actual,
873  const CallWrapper& call_wrapper,
874  CallKind call_kind);
875 
876 
877  void IsObjectJSObjectType(Register heap_object,
878  Register map,
879  Register scratch,
880  Label* fail);
881 
883  Register scratch,
884  Label* fail);
885 
886  void IsObjectJSStringType(Register object,
887  Register scratch,
888  Label* fail);
889 
890 #ifdef ENABLE_DEBUGGER_SUPPORT
891  // -------------------------------------------------------------------------
892  // Debugger Support.
893 
894  void DebugBreak();
895 #endif
896 
897 
898  // -------------------------------------------------------------------------
899  // Exception handling.
900 
901  // Push a new try handler and link into try handler chain.
902  void PushTryHandler(StackHandler::Kind kind, int handler_index);
903 
904  // Unlink the stack handler on top of the stack from the try handler chain.
905  // Must preserve the result register.
906  void PopTryHandler();
907 
908  // Passes thrown value to the handler of top of the try handler chain.
909  void Throw(Register value);
910 
911  // Propagates an uncatchable exception to the top of the current JS stack's
912  // handler chain.
913  void ThrowUncatchable(Register value);
914 
915  // Copies a fixed number of fields of heap objects from src to dst.
916  void CopyFields(Register dst, Register src, RegList temps, int field_count);
917 
918  // Copies a number of bytes from src to dst. All registers are clobbered. On
919  // exit src and dst will point to the place just after where the last byte was
920  // read or written and length will be zero.
921  void CopyBytes(Register src,
922  Register dst,
923  Register length,
924  Register scratch);
925 
926  // Initialize fields with filler values. Fields starting at |start_offset|
927  // not including end_offset are overwritten with the value in |filler|. At
928  // the end the loop, |start_offset| takes the value of |end_offset|.
929  void InitializeFieldsWithFiller(Register start_offset,
930  Register end_offset,
931  Register filler);
932 
933  // -------------------------------------------------------------------------
934  // Support functions.
935 
936  // Try to get function prototype of a function and puts the value in
937  // the result register. Checks that the function really is a
938  // function and jumps to the miss label if the fast checks fail. The
939  // function register will be untouched; the other registers may be
940  // clobbered.
941  void TryGetFunctionPrototype(Register function,
942  Register result,
943  Register scratch,
944  Label* miss,
945  bool miss_on_bound_function = false);
946 
947  void GetObjectType(Register function,
948  Register map,
949  Register type_reg);
950 
951  // Check if a map for a JSObject indicates that the object has fast elements.
952  // Jump to the specified label if it does not.
953  void CheckFastElements(Register map,
954  Register scratch,
955  Label* fail);
956 
957  // Check if a map for a JSObject indicates that the object can have both smi
958  // and HeapObject elements. Jump to the specified label if it does not.
960  Register scratch,
961  Label* fail);
962 
963  // Check if a map for a JSObject indicates that the object has fast smi only
964  // elements. Jump to the specified label if it does not.
966  Register scratch,
967  Label* fail);
968 
969  // Check to see if maybe_number can be stored as a double in
970  // FastDoubleElements. If it can, store it at the index specified by key in
971  // the FastDoubleElements array elements. Otherwise jump to fail, in which
972  // case scratch2, scratch3 and scratch4 are unmodified.
973  void StoreNumberToDoubleElements(Register value_reg,
974  Register key_reg,
975  Register receiver_reg,
976  // All regs below here overwritten.
977  Register elements_reg,
978  Register scratch1,
979  Register scratch2,
980  Register scratch3,
981  Register scratch4,
982  Label* fail);
983 
984  // Compare an object's map with the specified map and its transitioned
985  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
986  // "branch_to" if the result of the comparison is "cond". If multiple map
987  // compares are required, the compare sequences branches to early_success.
988  void CompareMapAndBranch(Register obj,
989  Register scratch,
990  Handle<Map> map,
991  Label* early_success,
992  Condition cond,
993  Label* branch_to,
995 
996  // As above, but the map of the object is already loaded into the register
997  // which is preserved by the code generated.
998  void CompareMapAndBranch(Register obj_map,
999  Handle<Map> map,
1000  Label* early_success,
1001  Condition cond,
1002  Label* branch_to,
1004 
1005  // Check if the map of an object is equal to a specified map and branch to
1006  // label if not. Skip the smi check if not required (object is known to be a
1007  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1008  // against maps that are ElementsKind transition maps of the specificed map.
1009  void CheckMap(Register obj,
1010  Register scratch,
1011  Handle<Map> map,
1012  Label* fail,
1013  SmiCheckType smi_check_type,
1015 
1016 
1017  void CheckMap(Register obj,
1018  Register scratch,
1019  Heap::RootListIndex index,
1020  Label* fail,
1021  SmiCheckType smi_check_type);
1022 
1023  // Check if the map of an object is equal to a specified map and branch to a
1024  // specified target if equal. Skip the smi check if not required (object is
1025  // known to be a heap object)
1026  void DispatchMap(Register obj,
1027  Register scratch,
1028  Handle<Map> map,
1029  Handle<Code> success,
1030  SmiCheckType smi_check_type);
1031 
1032  // Generates code for reporting that an illegal operation has
1033  // occurred.
1034  void IllegalOperation(int num_arguments);
1035 
1036 
1037  // Load and check the instance type of an object for being a string.
1038  // Loads the type into the second argument register.
1039  // Returns a condition that will be enabled if the object was a string.
1041  Register type,
1042  Register result) {
1045  And(type, type, Operand(kIsNotStringMask));
1046  ASSERT_EQ(0, kStringTag);
1047  return eq;
1048  }
1049 
1050 
1051  // Picks out an array index from the hash field.
1052  // Register use:
1053  // hash - holds the index's hash. Clobbered.
1054  // index - holds the overwritten index on exit.
1055  void IndexFromHash(Register hash, Register index);
1056 
1057  // Get the number of least significant bits from a register.
1058  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1059  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1060 
1061  // Load the value of a number object into a FPU double register. If the
1062  // object is not a number a jump to the label not_number is performed
1063  // and the FPU double register is unchanged.
1065  Register object,
1066  FPURegister value,
1067  Register scratch1,
1068  Register scratch2,
1069  Register heap_number_map,
1070  Label* not_number,
1072 
1073  // Load the value of a smi object into a FPU double register. The register
1074  // scratch1 can be the same register as smi in which case smi will hold the
1075  // untagged value afterwards.
1077  FPURegister value,
1078  Register scratch1);
1079 
1080  // -------------------------------------------------------------------------
1081  // Overflow handling functions.
1082  // Usage: first call the appropriate arithmetic function, then call one of the
1083  // jump functions with the overflow_dst register as the second parameter.
1084 
1086  Register left,
1087  Register right,
1088  Register overflow_dst,
1089  Register scratch = at);
1090 
1092  Register left,
1093  Register right,
1094  Register overflow_dst,
1095  Register scratch = at);
1096 
1097  void BranchOnOverflow(Label* label,
1098  Register overflow_check,
1099  BranchDelaySlot bd = PROTECT) {
1100  Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1101  }
1102 
1103  void BranchOnNoOverflow(Label* label,
1104  Register overflow_check,
1105  BranchDelaySlot bd = PROTECT) {
1106  Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1107  }
1108 
1109  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1110  Ret(lt, overflow_check, Operand(zero_reg), bd);
1111  }
1112 
1113  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1114  Ret(ge, overflow_check, Operand(zero_reg), bd);
1115  }
1116 
1117  // -------------------------------------------------------------------------
1118  // Runtime calls.
1119 
1120  // See comments at the beginning of CEntryStub::Generate.
1121  inline void PrepareCEntryArgs(int num_args) {
1122  li(s0, num_args);
1123  li(s1, (num_args - 1) * kPointerSize);
1124  }
1125 
1126  inline void PrepareCEntryFunction(const ExternalReference& ref) {
1127  li(s2, Operand(ref));
1128  }
1129 
1130  // Call a code stub.
1131  void CallStub(CodeStub* stub,
1132  Condition cond = cc_always,
1133  Register r1 = zero_reg,
1134  const Operand& r2 = Operand(zero_reg),
1135  BranchDelaySlot bd = PROTECT);
1136 
1137  // Tail call a code stub (jump).
1138  void TailCallStub(CodeStub* stub);
1139 
1140  void CallJSExitStub(CodeStub* stub);
1141 
1142  // Call a runtime routine.
1143  void CallRuntime(const Runtime::Function* f, int num_arguments);
1145 
1146  // Convenience function: Same as above, but takes the fid instead.
1147  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
1148 
1149  // Convenience function: call an external reference.
1150  void CallExternalReference(const ExternalReference& ext,
1151  int num_arguments,
1152  BranchDelaySlot bd = PROTECT);
1153 
1154  // Tail call of a runtime routine (jump).
1155  // Like JumpToExternalReference, but also takes care of passing the number
1156  // of parameters.
1157  void TailCallExternalReference(const ExternalReference& ext,
1158  int num_arguments,
1159  int result_size);
1160 
1161  // Convenience function: tail call a runtime routine (jump).
1163  int num_arguments,
1164  int result_size);
1165 
1166  int CalculateStackPassedWords(int num_reg_arguments,
1167  int num_double_arguments);
1168 
1169  // Before calling a C-function from generated code, align arguments on stack
1170  // and add space for the four mips argument slots.
1171  // After aligning the frame, non-register arguments must be stored on the
1172  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1173  // The argument count assumes all arguments are word sized.
1174  // Some compilers/platforms require the stack to be aligned when calling
1175  // C++ code.
1176  // Needs a scratch register to do some arithmetic. This register will be
1177  // trashed.
1178  void PrepareCallCFunction(int num_reg_arguments,
1179  int num_double_registers,
1180  Register scratch);
1181  void PrepareCallCFunction(int num_reg_arguments,
1182  Register scratch);
1183 
1184  // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1185  // Arguments 5..n are stored to stack using following:
1186  // sw(t0, CFunctionArgumentOperand(5));
1187 
1188  // Calls a C function and cleans up the space for arguments allocated
1189  // by PrepareCallCFunction. The called function is not allowed to trigger a
1190  // garbage collection, since that might move the code and invalidate the
1191  // return address (unless this is somehow accounted for by the called
1192  // function).
1193  void CallCFunction(ExternalReference function, int num_arguments);
1194  void CallCFunction(Register function, int num_arguments);
1195  void CallCFunction(ExternalReference function,
1196  int num_reg_arguments,
1197  int num_double_arguments);
1198  void CallCFunction(Register function,
1199  int num_reg_arguments,
1200  int num_double_arguments);
1201  void GetCFunctionDoubleResult(const DoubleRegister dst);
1202 
1203  // There are two ways of passing double arguments on MIPS, depending on
1204  // whether soft or hard floating point ABI is used. These functions
1205  // abstract parameter passing for the three different ways we call
1206  // C functions from generated code.
1210 
1211  // Calls an API function. Allocates HandleScope, extracts returned value
1212  // from handle and propagates exceptions. Restores context. stack_space
1213  // - space to be unwound on exit (includes the call JS arguments space and
1214  // the additional space allocated for the fast call).
1215  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
1216 
1217  // Jump to the builtin routine.
1218  void JumpToExternalReference(const ExternalReference& builtin,
1219  BranchDelaySlot bd = PROTECT);
1220 
1221  // Invoke specified builtin JavaScript function. Adds an entry to
1222  // the unresolved list if the name does not resolve.
1224  InvokeFlag flag,
1225  const CallWrapper& call_wrapper = NullCallWrapper());
1226 
1227  // Store the code object for the given builtin in the target register and
1228  // setup the function in a1.
1230 
1231  // Store the function for the given builtin in the target register.
1233 
1234  struct Unresolved {
1235  int pc;
1236  uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
1237  const char* name;
1238  };
1239 
1241  ASSERT(!code_object_.is_null());
1242  return code_object_;
1243  }
1244 
1245  // -------------------------------------------------------------------------
1246  // StatsCounter support.
1247 
1248  void SetCounter(StatsCounter* counter, int value,
1249  Register scratch1, Register scratch2);
1250  void IncrementCounter(StatsCounter* counter, int value,
1251  Register scratch1, Register scratch2);
1252  void DecrementCounter(StatsCounter* counter, int value,
1253  Register scratch1, Register scratch2);
1254 
1255 
1256  // -------------------------------------------------------------------------
1257  // Debugging.
1258 
1259  // Calls Abort(msg) if the condition cc is not satisfied.
1260  // Use --debug_code to enable.
1261  void Assert(Condition cc, const char* msg, Register rs, Operand rt);
1263  void AssertFastElements(Register elements);
1264 
1265  // Like Assert(), but always enabled.
1266  void Check(Condition cc, const char* msg, Register rs, Operand rt);
1267 
1268  // Print a message to stdout and abort execution.
1269  void Abort(const char* msg);
1270 
1271  // Verify restrictions about code generated in stubs.
1272  void set_generating_stub(bool value) { generating_stub_ = value; }
1273  bool generating_stub() { return generating_stub_; }
1274  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1275  bool allow_stub_calls() { return allow_stub_calls_; }
1276  void set_has_frame(bool value) { has_frame_ = value; }
1277  bool has_frame() { return has_frame_; }
1278  inline bool AllowThisStubCall(CodeStub* stub);
1279 
1280  // ---------------------------------------------------------------------------
1281  // Number utilities.
1282 
1283  // Check whether the value of reg is a power of two and not zero. If not
1284  // control continues at the label not_power_of_two. If reg is a power of two
1285  // the register scratch contains the value of (reg - 1) when control falls
1286  // through.
1288  Register scratch,
1289  Label* not_power_of_two_or_zero);
1290 
1291  // -------------------------------------------------------------------------
1292  // Smi utilities.
1293 
1294  void SmiTag(Register reg) {
1295  Addu(reg, reg, reg);
1296  }
1297 
1298  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1301 
1302  void SmiTag(Register dst, Register src) {
1303  Addu(dst, src, src);
1304  }
1305 
1306  void SmiUntag(Register reg) {
1307  sra(reg, reg, kSmiTagSize);
1308  }
1309 
1310  void SmiUntag(Register dst, Register src) {
1311  sra(dst, src, kSmiTagSize);
1312  }
1313 
1314  // Untag the source value into destination and jump if source is a smi.
1315  // Souce and destination can be the same register.
1316  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1317 
1318  // Untag the source value into destination and jump if source is not a smi.
1319  // Souce and destination can be the same register.
1320  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1321 
1322  // Jump the register contains a smi.
1323  void JumpIfSmi(Register value,
1324  Label* smi_label,
1325  Register scratch = at,
1326  BranchDelaySlot bd = PROTECT);
1327 
1328  // Jump if the register contains a non-smi.
1329  void JumpIfNotSmi(Register value,
1330  Label* not_smi_label,
1331  Register scratch = at,
1332  BranchDelaySlot bd = PROTECT);
1333 
1334  // Jump if either of the registers contain a non-smi.
1335  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1336  // Jump if either of the registers contain a smi.
1337  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1338 
1339  // Abort execution if argument is a smi, enabled via --debug-code.
1340  void AssertNotSmi(Register object);
1341  void AssertSmi(Register object);
1342 
1343  // Abort execution if argument is not a string, enabled via --debug-code.
1344  void AssertString(Register object);
1345 
1346  // Abort execution if argument is not the root value with the given index,
1347  // enabled via --debug-code.
1348  void AssertRootValue(Register src,
1349  Heap::RootListIndex root_value_index,
1350  const char* message);
1351 
1352  // ---------------------------------------------------------------------------
1353  // HeapNumber utilities.
1354 
1355  void JumpIfNotHeapNumber(Register object,
1356  Register heap_number_map,
1357  Register scratch,
1358  Label* on_not_heap_number);
1359 
1360  // -------------------------------------------------------------------------
1361  // String utilities.
1362 
1363  // Checks if both instance types are sequential ASCII strings and jumps to
1364  // label if either is not.
1366  Register first_object_instance_type,
1367  Register second_object_instance_type,
1368  Register scratch1,
1369  Register scratch2,
1370  Label* failure);
1371 
1372  // Check if instance type is sequential ASCII string and jump to label if
1373  // it is not.
1375  Register scratch,
1376  Label* failure);
1377 
1378  // Test that both first and second are sequential ASCII strings.
1379  // Assume that they are non-smis.
1381  Register second,
1382  Register scratch1,
1383  Register scratch2,
1384  Label* failure);
1385 
1386  // Test that both first and second are sequential ASCII strings.
1387  // Check that they are non-smis.
1389  Register second,
1390  Register scratch1,
1391  Register scratch2,
1392  Label* failure);
1393 
1394  void ClampUint8(Register output_reg, Register input_reg);
1395 
1396  void ClampDoubleToUint8(Register result_reg,
1397  DoubleRegister input_reg,
1398  DoubleRegister temp_double_reg);
1399 
1400 
1401  void LoadInstanceDescriptors(Register map, Register descriptors);
1402  void EnumLength(Register dst, Register map);
1403  void NumberOfOwnDescriptors(Register dst, Register map);
1404 
1405  template<typename Field>
1406  void DecodeField(Register reg) {
1407  static const int shift = Field::kShift;
1408  static const int mask = (Field::kMask >> shift) << kSmiTagSize;
1409  srl(reg, reg, shift);
1410  And(reg, reg, Operand(mask));
1411  }
1412 
1413  // Activation support.
1414  void EnterFrame(StackFrame::Type type);
1415  void LeaveFrame(StackFrame::Type type);
1416 
1417  // Patch the relocated value (lui/ori pair).
1418  void PatchRelocatedValue(Register li_location,
1419  Register scratch,
1420  Register new_value);
1421  // Get the relocatad value (loaded data) from the lui/ori pair.
1422  void GetRelocatedValue(Register li_location,
1423  Register value,
1424  Register scratch);
1425 
1426  // Expects object in a0 and returns map with validated enum cache
1427  // in a0. Assumes that any other register can be used as a scratch.
1428  void CheckEnumCache(Register null_value, Label* call_runtime);
1429 
1430  private:
1431  void CallCFunctionHelper(Register function,
1432  int num_reg_arguments,
1433  int num_double_arguments);
1434 
1435  void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1436  void BranchShort(int16_t offset, Condition cond, Register rs,
1437  const Operand& rt,
1438  BranchDelaySlot bdslot = PROTECT);
1439  void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1440  void BranchShort(Label* L, Condition cond, Register rs,
1441  const Operand& rt,
1442  BranchDelaySlot bdslot = PROTECT);
1443  void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1444  void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1445  const Operand& rt,
1446  BranchDelaySlot bdslot = PROTECT);
1447  void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1448  void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1449  const Operand& rt,
1450  BranchDelaySlot bdslot = PROTECT);
1451  void J(Label* L, BranchDelaySlot bdslot);
1452  void Jr(Label* L, BranchDelaySlot bdslot);
1453  void Jalr(Label* L, BranchDelaySlot bdslot);
1454 
1455  // Helper functions for generating invokes.
1456  void InvokePrologue(const ParameterCount& expected,
1457  const ParameterCount& actual,
1458  Handle<Code> code_constant,
1459  Register code_reg,
1460  Label* done,
1461  bool* definitely_mismatches,
1462  InvokeFlag flag,
1463  const CallWrapper& call_wrapper,
1464  CallKind call_kind);
1465 
1466  // Get the code for the given builtin. Returns if able to resolve
1467  // the function in the 'resolved' flag.
1468  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1469 
1470  void InitializeNewString(Register string,
1471  Register length,
1472  Heap::RootListIndex map_index,
1473  Register scratch1,
1474  Register scratch2);
1475 
1476  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1477  void InNewSpace(Register object,
1478  Register scratch,
1479  Condition cond, // eq for new space, ne otherwise.
1480  Label* branch);
1481 
1482  // Helper for finding the mark bits for an address. Afterwards, the
1483  // bitmap register points at the word with the mark bits and the mask
1484  // the position of the first bit. Leaves addr_reg unchanged.
1485  inline void GetMarkBits(Register addr_reg,
1486  Register bitmap_reg,
1487  Register mask_reg);
1488 
1489  // Helper for throwing exceptions. Compute a handler address and jump to
1490  // it. See the implementation for register usage.
1491  void JumpToHandlerEntry();
1492 
1493  // Compute memory operands for safepoint stack slots.
1494  static int SafepointRegisterStackIndex(int reg_code);
1495  MemOperand SafepointRegisterSlot(Register reg);
1496  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1497 
1498  bool generating_stub_;
1499  bool allow_stub_calls_;
1500  bool has_frame_;
1501  // This handle will be patched with the code object on installation.
1502  Handle<Object> code_object_;
1503 
1504  // Needs access to SafepointRegisterStackIndex for optimized frame
1505  // traversal.
1506  friend class OptimizedFrame;
1507 };
1508 
1509 
1510 // The code patcher is used to patch (typically) small parts of code e.g. for
1511 // debugging and other types of instrumentation. When using the code patcher
1512 // the exact number of bytes specified must be emitted. It is not legal to emit
1513 // relocation information. If any of these constraints are violated it causes
1514 // an assertion to fail.
1515 class CodePatcher {
1516  public:
1517  CodePatcher(byte* address, int instructions);
1518  virtual ~CodePatcher();
1519 
1520  // Macro assembler to emit code.
1521  MacroAssembler* masm() { return &masm_; }
1522 
1523  // Emit an instruction directly.
1524  void Emit(Instr instr);
1525 
1526  // Emit an address directly.
1527  void Emit(Address addr);
1528 
1529  // Change the condition part of an instruction leaving the rest of the current
1530  // instruction unchanged.
1531  void ChangeBranchCondition(Condition cond);
1532 
1533  private:
1534  byte* address_; // The address of the code being patched.
1535  int instructions_; // Number of instructions of the expected patch size.
1536  int size_; // Number of bytes of the expected patch size.
1537  MacroAssembler masm_; // Macro assembler used to generate the code.
1538 };
1539 
1540 
1541 
1542 #ifdef GENERATED_CODE_COVERAGE
1543 #define CODE_COVERAGE_STRINGIFY(x) #x
1544 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1545 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1546 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1547 #else
1548 #define ACCESS_MASM(masm) masm->
1549 #endif
1550 
1551 } } // namespace v8::internal
1552 
1553 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
byte * Address
Definition: globals.h:157
void CallRuntime(const Runtime::Function *f, int num_arguments)
void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, DoubleRegister temp_double_reg)
void Push(Handle< Object > handle)
const SwVfpRegister s2
void ClampUint8(Register output_reg, Register input_reg)
const int kCArgsSlotsSize
Isolate * isolate() const
Definition: assembler.h:61
void FlushICache(Register address, unsigned instructions)
void Assert(Condition cond, const char *msg)
void Branch(Label *L, Condition cond, Register rs, Heap::RootListIndex index, BranchDelaySlot bdslot=PROTECT)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
static int SlotOffset(int index)
Definition: contexts.h:425
void mtc1(Register rt, FPURegister fs)
void GetCFunctionDoubleResult(const DoubleRegister dst)
const Register r3
void SmiTag(Register dst, Register src)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void mov(Register rd, Register rt)
void IncrementalMarkingRecordWriteHelper(Register object, Register value, Register address)
const Register cp
void Push(Register src1, Register src2, Register src3, Register src4)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch)
void LeaveExitFrame(bool save_doubles, Register argument_count)
void AssertString(Register object)
void sw(Register rd, const MemOperand &rs)
void AdduAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=at)
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
void mov_d(FPURegister fd, FPURegister fs)
static TypeFeedbackId None()
Definition: utils.h:999
void JumpToExternalReference(const ExternalReference &builtin)
void Pop(Register src1, Register src2, Register src3)
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch)
void UntagAndJumpIfSmi(Register dst, Register src, Label *smi_case)
void LoadInstanceDescriptors(Register map, Register descriptors)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void SetCallCDoubleArguments(DoubleRegister dreg)
void or_(Register dst, int32_t imm32)
void SubuAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch=at)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void JumpIfSmi(Register value, Label *smi_label)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
bool AllowThisStubCall(CodeStub *stub)
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:38
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, Label *not_power_of_two_or_zero)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void Swap(Register reg1, Register reg2, Register scratch=no_reg, Condition cond=al)
bool is(FPURegister creg) const
void MultiPopReversed(RegList regs)
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
#define DECLARE_BRANCH_PROTOTYPES(Name)
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
unsigned short uint16_t
Definition: unicode.cc:46
void SmiTagCheckOverflow(Register reg, Register overflow)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
#define COND_ARGS
static bool IsMarkedCode(Instr instr, int type)
void NumberOfOwnDescriptors(Register dst, Register map)
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Move(FPURegister dst, Register src_low, Register src_high)
const Register r2
void Abort(const char *msg)
void RetOnOverflow(Register overflow_check, BranchDelaySlot bd=PROTECT)
void MarkCode(NopMarkerTypes type)
MemOperand ContextOperand(Register context, int index)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type, CompareMapMode mode=REQUIRE_EXACT_MAP)
void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd=PROTECT)
void CopyFields(Register dst, Register src, RegList temps, int field_count)
void AssertSmi(Register object)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst)
void Movn(Register rd, Register rs, Register rt)
const Register kRootRegister
void Movz(Register rd, Register rs, Register rt)
uint8_t byte
Definition: globals.h:156
void li(Register dst, Handle< Object > value, LiFlags mode=OPTIMIZE_SIZE)
void PrepareCEntryFunction(const ExternalReference &ref)
void MultiPush(RegList regs)
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void SmiToDoubleFPURegister(Register smi, FPURegister value, Register scratch1)
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
const Register sp
void MultiPop(RegList regs)
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void BranchOnNoOverflow(Label *label, Register overflow_check, BranchDelaySlot bd=PROTECT)
void sra(Register rt, Register rd, uint16_t sa)
void EnumLength(Register dst, Register map)
static int ActivationFrameAlignment()
void lbu(Register rd, const MemOperand &rs)
void LeaveFrame(StackFrame::Type type)
void CheckFastElements(Register map, Register scratch, Label *fail)
const int kRsFieldMask
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
const int kRtFieldMask
void Emit(Instr instr)
void ChangeBranchCondition(Condition cond)
void EmitECMATruncate(Register result, DwVfpRegister double_input, SwVfpRegister single_scratch, Register scratch, Register scratch2, Register scratch3)
const int kOpcodeMask
const int kPointerSize
Definition: globals.h:220
void CallCFunction(ExternalReference function, int num_arguments)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:4009
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
const int kRtShift
void Push(Register src1, Register src2)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void LoadHeapObject(Register dst, Handle< HeapObject > object)
void Throw(Register value)
void Move(FPURegister dst, FPURegister src)
void ConvertToInt32(Register source, Register dest, Register scratch, Register scratch2, DwVfpRegister double_scratch, Label *not_int32)
void Move(Register dst, Handle< Object > value)
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size)
void Movt(Register rd, Register rs, uint16_t cc=0)
const int kCArgSlotCount
void srl(Register rd, Register rt, uint16_t sa)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void GetRelocatedValue(Register li_location, Register value, Register scratch)
void Floor_w_d(FPURegister fd, FPURegister fs)
const SwVfpRegister s0
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
void SmiUntag(Register dst, Register src)
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits)
void Clz(Register rd, Register rs)
const int kSaShift
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:456
void LoadContext(Register dst, int context_chain_length)
Condition IsObjectStringType(Register obj, Register type, Register result)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static int CallSize(Register target, Condition cond=al)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Label *fail)
void AssertFastElements(Register elements)
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits)
const int kSaFieldMask
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void lw(Register rd, const MemOperand &rs)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Pop(Register src1, Register src2)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void Drop(int count, Condition cond=al)
const SwVfpRegister s1
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void Movf(Register rd, Register rs, uint16_t cc=0)
void IllegalOperation(int num_arguments)
void ObjectToDoubleFPURegister(Register object, FPURegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *not_number, ObjectToDoubleFlags flags=NO_OBJECT_TO_DOUBLE_FLAGS)
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CallApiFunctionAndReturn(ExternalReference function, int stack_space)
static const int kMapOffset
Definition: objects.h:1261
int ToNumber(Register reg)
bool is(Register reg) const
const uint32_t kIsNotStringMask
Definition: objects.h:455
void LoadObject(Register result, Handle< Object > object)
const Register r1
void li(Register rd, Operand j, LiFlags mode=OPTIMIZE_SIZE)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
bool is_null() const
Definition: handles.h:87
void ThrowUncatchable(Register value)
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch)
MemOperand CFunctionArgumentOperand(int index)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void AllocateInNewSpace(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
MemOperand FieldMemOperand(Register object, int offset)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void GetObjectType(Register function, Register map, Register type_reg)
void EmitOutOfInt32RangeTruncate(Register result, Register input_high, Register input_low, Register scratch)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
void CompareMapAndBranch(Register obj, Register scratch, Handle< Map > map, Label *early_success, Condition cond, Label *branch_to, CompareMapMode mode=REQUIRE_EXACT_MAP)
void MultiPopReversedFPU(RegList regs)
const int kSmiTagSize
Definition: v8.h:4015
static int GetCodeMarker(Instr instr)
void li(Register rd, int32_t j, LiFlags mode=OPTIMIZE_SIZE)
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, Register object2, Register scratch1, Register scratch2, Label *failure)
void mfc1(Register rt, FPURegister fs)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void UndoAllocationInNewSpace(Register object, Register scratch)
void Ret(BranchDelaySlot bd, Condition cond=al, Register rs=zero_reg, const Operand &rt=Operand(zero_reg))
void LoadFromSafepointRegisterSlot(Register dst, Register src)
const int kRsShift
void Push(Register src, Condition cond, Register tst1, Register tst2)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void Call(Register target, Condition cond=al)
void Trunc_w_d(FPURegister fd, FPURegister fs)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Check(Condition cond, const char *msg)
void MultiPushFPU(RegList regs)
void LoadInitialArrayMap(Register function_in, Register scratch, Register map_out, bool can_have_holes)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void AssertRootValue(Register src, Heap::RootListIndex root_value_index, const char *message)
void MultiPushReversedFPU(RegList regs)
void BranchF(BranchDelaySlot bd, Label *target, Label *nan, Condition cc, FPURegister cmp1, FPURegister cmp2)
void EmitFPUTruncate(FPURoundingMode rounding_mode, FPURegister result, DoubleRegister double_input, Register scratch1, Register except_flag, CheckForInexactConversion check_inexact=kDontCheckForInexactConversion)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void TailCallStub(CodeStub *stub, Condition cond=al)
CodePatcher(byte *address, int instructions)
void UntagAndJumpIfNotSmi(Register dst, Register src, Label *non_smi_case)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void CallJSExitStub(CodeStub *stub)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void PatchRelocatedValue(Register li_location, Register scratch, Register new_value)
void CallStub(CodeStub *stub, Condition cond=al)
static FPURegister from_code(int code)
void IndexFromHash(Register hash, Register index)
void Round_w_d(FPURegister fd, FPURegister fs)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
signed short int16_t
Definition: unicode.cc:45
void EnterExitFrame(bool save_doubles, int stack_space=0)
void MultiPushReversed(RegList regs)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, RAStatus ra_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void SetCallKind(Register dst, CallKind kind)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void MultiPopFPU(RegList regs)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void Ceil_w_d(FPURegister fd, FPURegister fs)
void BranchF(Label *target, Label *nan, Condition cc, FPURegister cmp1, FPURegister cmp2, BranchDelaySlot bd=PROTECT)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void EnterFrame(StackFrame::Type type)
void BranchOnOverflow(Label *label, Register overflow_check, BranchDelaySlot bd=PROTECT)
void CheckEnumCache(Register null_value, Label *call_runtime)
static const int kInstanceTypeOffset
Definition: objects.h:5158
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923
void Move(Register dst_low, Register dst_high, FPURegister src)
void Move(Register dst, Register src)
void Push(Register src1, Register src2, Register src3)