v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
29 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // ----------------------------------------------------------------------------
39 // Static helper functions
40 
41 // Generate a MemOperand for loading a field from an object.
42 inline MemOperand FieldMemOperand(Register object, int offset) {
43  return MemOperand(object, offset - kHeapObjectTag);
44 }
45 
46 
47 // Give alias names to registers
48 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
49 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
50 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
51 
52 // Flags used for AllocateHeapNumber
54  // Tag the result.
56  // Don't tag
58 };
59 
60 
64 
65 
66 Register GetRegisterThatIsNotOneOf(Register reg1,
67  Register reg2 = no_reg,
68  Register reg3 = no_reg,
69  Register reg4 = no_reg,
70  Register reg5 = no_reg,
71  Register reg6 = no_reg);
72 
73 
74 #ifdef DEBUG
75 bool AreAliased(Register reg1,
76  Register reg2,
77  Register reg3 = no_reg,
78  Register reg4 = no_reg,
79  Register reg5 = no_reg,
80  Register reg6 = no_reg);
81 #endif
82 
83 
87 };
88 
89 // MacroAssembler implements a collection of frequently used macros.
90 class MacroAssembler: public Assembler {
91  public:
92  // The isolate parameter can be NULL if the macro assembler should
93  // not use isolate-dependent functionality. In this case, it's the
94  // responsibility of the caller to never invoke such function on the
95  // macro assembler.
96  MacroAssembler(Isolate* isolate, void* buffer, int size);
97 
98  // Jump, Call, and Ret pseudo instructions implementing inter-working.
99  void Jump(Register target, Condition cond = al);
100  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
101  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
102  static int CallSize(Register target, Condition cond = al);
103  void Call(Register target, Condition cond = al);
104  int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
105  static int CallSizeNotPredictableCodeSize(Address target,
106  RelocInfo::Mode rmode,
107  Condition cond = al);
108  void Call(Address target, RelocInfo::Mode rmode,
109  Condition cond = al,
111  int CallSize(Handle<Code> code,
112  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
114  Condition cond = al);
115  void Call(Handle<Code> code,
116  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
118  Condition cond = al,
120  void Ret(Condition cond = al);
121 
122  // Emit code to discard a non-negative number of pointer-sized elements
123  // from the stack, clobbering only the sp register.
124  void Drop(int count, Condition cond = al);
125 
126  void Ret(int drop, Condition cond = al);
127 
128  // Swap two registers. If the scratch register is omitted then a slightly
129  // less efficient form using xor instead of mov is emitted.
130  void Swap(Register reg1,
131  Register reg2,
132  Register scratch = no_reg,
133  Condition cond = al);
134 
135 
136  void And(Register dst, Register src1, const Operand& src2,
137  Condition cond = al);
138  void Ubfx(Register dst, Register src, int lsb, int width,
139  Condition cond = al);
140  void Sbfx(Register dst, Register src, int lsb, int width,
141  Condition cond = al);
142  // The scratch register is not used for ARMv7.
143  // scratch can be the same register as src (in which case it is trashed), but
144  // not the same as dst.
145  void Bfi(Register dst,
146  Register src,
147  Register scratch,
148  int lsb,
149  int width,
150  Condition cond = al);
151  void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
152  void Usat(Register dst, int satpos, const Operand& src,
153  Condition cond = al);
154 
155  void Call(Label* target);
156  void Push(Register src) { push(src); }
157  void Pop(Register dst) { pop(dst); }
158 
159  // Register move. May do nothing if the registers are identical.
160  void Move(Register dst, Handle<Object> value);
161  void Move(Register dst, Register src, Condition cond = al);
162  void Move(DwVfpRegister dst, DwVfpRegister src);
163 
164  void Load(Register dst, const MemOperand& src, Representation r);
165  void Store(Register src, const MemOperand& dst, Representation r);
166 
167  // Load an object from the root table.
168  void LoadRoot(Register destination,
169  Heap::RootListIndex index,
170  Condition cond = al);
171  // Store an object to the root table.
172  void StoreRoot(Register source,
173  Heap::RootListIndex index,
174  Condition cond = al);
175 
176  // ---------------------------------------------------------------------------
177  // GC Support
178 
180  Register value,
181  Register address);
182 
186  };
187 
188  // Record in the remembered set the fact that we have a pointer to new space
189  // at the address pointed to by the addr register. Only works if addr is not
190  // in new space.
191  void RememberedSetHelper(Register object, // Used for debug code.
192  Register addr,
193  Register scratch,
194  SaveFPRegsMode save_fp,
195  RememberedSetFinalAction and_then);
196 
197  void CheckPageFlag(Register object,
198  Register scratch,
199  int mask,
200  Condition cc,
201  Label* condition_met);
202 
204  Register scratch,
205  Label* if_deprecated);
206 
207  // Check if object is in new space. Jumps if the object is not in new space.
208  // The register scratch can be object itself, but scratch will be clobbered.
210  Register scratch,
211  Label* branch) {
212  InNewSpace(object, scratch, ne, branch);
213  }
214 
215  // Check if object is in new space. Jumps if the object is in new space.
216  // The register scratch can be object itself, but it will be clobbered.
218  Register scratch,
219  Label* branch) {
220  InNewSpace(object, scratch, eq, branch);
221  }
222 
223  // Check if an object has a given incremental marking color.
224  void HasColor(Register object,
225  Register scratch0,
226  Register scratch1,
227  Label* has_color,
228  int first_bit,
229  int second_bit);
230 
231  void JumpIfBlack(Register object,
232  Register scratch0,
233  Register scratch1,
234  Label* on_black);
235 
236  // Checks the color of an object. If the object is already grey or black
237  // then we just fall through, since it is already live. If it is white and
238  // we can determine that it doesn't need to be scanned, then we just mark it
239  // black and fall through. For the rest we jump to the label so the
240  // incremental marker can fix its assumptions.
241  void EnsureNotWhite(Register object,
242  Register scratch1,
243  Register scratch2,
244  Register scratch3,
245  Label* object_is_white_and_not_data);
246 
247  // Detects conservatively whether an object is data-only, i.e. it does need to
248  // be scanned by the garbage collector.
249  void JumpIfDataObject(Register value,
250  Register scratch,
251  Label* not_data_object);
252 
253  // Notify the garbage collector that we wrote a pointer into an object.
254  // |object| is the object being stored into, |value| is the object being
255  // stored. value and scratch registers are clobbered by the operation.
256  // The offset is the offset from the start of the object, not the offset from
257  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
258  void RecordWriteField(
259  Register object,
260  int offset,
261  Register value,
262  Register scratch,
263  LinkRegisterStatus lr_status,
264  SaveFPRegsMode save_fp,
265  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
266  SmiCheck smi_check = INLINE_SMI_CHECK);
267 
268  // As above, but the offset has the tag presubtracted. For use with
269  // MemOperand(reg, off).
271  Register context,
272  int offset,
273  Register value,
274  Register scratch,
275  LinkRegisterStatus lr_status,
276  SaveFPRegsMode save_fp,
277  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
278  SmiCheck smi_check = INLINE_SMI_CHECK) {
279  RecordWriteField(context,
280  offset + kHeapObjectTag,
281  value,
282  scratch,
283  lr_status,
284  save_fp,
285  remembered_set_action,
286  smi_check);
287  }
288 
289  // For a given |object| notify the garbage collector that the slot |address|
290  // has been written. |value| is the object being stored. The value and
291  // address registers are clobbered by the operation.
292  void RecordWrite(
293  Register object,
294  Register address,
295  Register value,
296  LinkRegisterStatus lr_status,
297  SaveFPRegsMode save_fp,
298  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
299  SmiCheck smi_check = INLINE_SMI_CHECK);
300 
301  // Push a handle.
302  void Push(Handle<Object> handle);
303  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
304 
305  // Push two registers. Pushes leftmost register first (to highest address).
306  void Push(Register src1, Register src2, Condition cond = al) {
307  ASSERT(!src1.is(src2));
308  if (src1.code() > src2.code()) {
309  stm(db_w, sp, src1.bit() | src2.bit(), cond);
310  } else {
311  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
312  str(src2, MemOperand(sp, 4, NegPreIndex), cond);
313  }
314  }
315 
316  // Push three registers. Pushes leftmost register first (to highest address).
317  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
318  ASSERT(!src1.is(src2));
319  ASSERT(!src2.is(src3));
320  ASSERT(!src1.is(src3));
321  if (src1.code() > src2.code()) {
322  if (src2.code() > src3.code()) {
323  stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
324  } else {
325  stm(db_w, sp, src1.bit() | src2.bit(), cond);
326  str(src3, MemOperand(sp, 4, NegPreIndex), cond);
327  }
328  } else {
329  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
330  Push(src2, src3, cond);
331  }
332  }
333 
334  // Push four registers. Pushes leftmost register first (to highest address).
335  void Push(Register src1,
336  Register src2,
337  Register src3,
338  Register src4,
339  Condition cond = al) {
340  ASSERT(!src1.is(src2));
341  ASSERT(!src2.is(src3));
342  ASSERT(!src1.is(src3));
343  ASSERT(!src1.is(src4));
344  ASSERT(!src2.is(src4));
345  ASSERT(!src3.is(src4));
346  if (src1.code() > src2.code()) {
347  if (src2.code() > src3.code()) {
348  if (src3.code() > src4.code()) {
349  stm(db_w,
350  sp,
351  src1.bit() | src2.bit() | src3.bit() | src4.bit(),
352  cond);
353  } else {
354  stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
355  str(src4, MemOperand(sp, 4, NegPreIndex), cond);
356  }
357  } else {
358  stm(db_w, sp, src1.bit() | src2.bit(), cond);
359  Push(src3, src4, cond);
360  }
361  } else {
362  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
363  Push(src2, src3, src4, cond);
364  }
365  }
366 
367  // Pop two registers. Pops rightmost register first (from lower address).
368  void Pop(Register src1, Register src2, Condition cond = al) {
369  ASSERT(!src1.is(src2));
370  if (src1.code() > src2.code()) {
371  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
372  } else {
373  ldr(src2, MemOperand(sp, 4, PostIndex), cond);
374  ldr(src1, MemOperand(sp, 4, PostIndex), cond);
375  }
376  }
377 
378  // Pop three registers. Pops rightmost register first (from lower address).
379  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
380  ASSERT(!src1.is(src2));
381  ASSERT(!src2.is(src3));
382  ASSERT(!src1.is(src3));
383  if (src1.code() > src2.code()) {
384  if (src2.code() > src3.code()) {
385  ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
386  } else {
387  ldr(src3, MemOperand(sp, 4, PostIndex), cond);
388  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
389  }
390  } else {
391  Pop(src2, src3, cond);
392  ldr(src1, MemOperand(sp, 4, PostIndex), cond);
393  }
394  }
395 
396  // Pop four registers. Pops rightmost register first (from lower address).
397  void Pop(Register src1,
398  Register src2,
399  Register src3,
400  Register src4,
401  Condition cond = al) {
402  ASSERT(!src1.is(src2));
403  ASSERT(!src2.is(src3));
404  ASSERT(!src1.is(src3));
405  ASSERT(!src1.is(src4));
406  ASSERT(!src2.is(src4));
407  ASSERT(!src3.is(src4));
408  if (src1.code() > src2.code()) {
409  if (src2.code() > src3.code()) {
410  if (src3.code() > src4.code()) {
411  ldm(ia_w,
412  sp,
413  src1.bit() | src2.bit() | src3.bit() | src4.bit(),
414  cond);
415  } else {
416  ldr(src4, MemOperand(sp, 4, PostIndex), cond);
417  ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
418  }
419  } else {
420  Pop(src3, src4, cond);
421  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
422  }
423  } else {
424  Pop(src2, src3, src4, cond);
425  ldr(src1, MemOperand(sp, 4, PostIndex), cond);
426  }
427  }
428 
429  // Push a fixed frame, consisting of lr, fp, constant pool (if
430  // FLAG_enable_ool_constant_pool), context and JS function / marker id if
431  // marker_reg is a valid register.
432  void PushFixedFrame(Register marker_reg = no_reg);
433  void PopFixedFrame(Register marker_reg = no_reg);
434 
435  // Push and pop the registers that can hold pointers, as defined by the
436  // RegList constant kSafepointSavedRegisters.
437  void PushSafepointRegisters();
438  void PopSafepointRegisters();
441  // Store value in register src in the safepoint stack slot for
442  // register dst.
445  // Load the value of the src register from its safepoint stack slot
446  // into register dst.
448 
449  // Load two consecutive registers with two consecutive memory locations.
450  void Ldrd(Register dst1,
451  Register dst2,
452  const MemOperand& src,
453  Condition cond = al);
454 
455  // Store two consecutive registers to two consecutive memory locations.
456  void Strd(Register src1,
457  Register src2,
458  const MemOperand& dst,
459  Condition cond = al);
460 
461  // Ensure that FPSCR contains values needed by JavaScript.
462  // We need the NaNModeControlBit to be sure that operations like
463  // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
464  // In VFP3 it will be always the Canonical NaN.
465  // In VFP2 it will be either the Canonical NaN or the negative version
466  // of the Canonical NaN. It doesn't matter if we have two values. The aim
467  // is to be sure to never generate the hole NaN.
468  void VFPEnsureFPSCRState(Register scratch);
469 
470  // If the value is a NaN, canonicalize the value else, do nothing.
471  void VFPCanonicalizeNaN(const DwVfpRegister dst,
472  const DwVfpRegister src,
473  const Condition cond = al);
475  const Condition cond = al) {
476  VFPCanonicalizeNaN(value, value, cond);
477  }
478 
479  // Compare double values and move the result to the normal condition flags.
480  void VFPCompareAndSetFlags(const DwVfpRegister src1,
481  const DwVfpRegister src2,
482  const Condition cond = al);
483  void VFPCompareAndSetFlags(const DwVfpRegister src1,
484  const double src2,
485  const Condition cond = al);
486 
487  // Compare double values and then load the fpscr flags to a register.
488  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
489  const DwVfpRegister src2,
490  const Register fpscr_flags,
491  const Condition cond = al);
492  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
493  const double src2,
494  const Register fpscr_flags,
495  const Condition cond = al);
496 
497  void Vmov(const DwVfpRegister dst,
498  const double imm,
499  const Register scratch = no_reg);
500 
501  void VmovHigh(Register dst, DwVfpRegister src);
502  void VmovHigh(DwVfpRegister dst, Register src);
503  void VmovLow(Register dst, DwVfpRegister src);
504  void VmovLow(DwVfpRegister dst, Register src);
505 
506  // Loads the number from object into dst register.
507  // If |object| is neither smi nor heap number, |not_number| is jumped to
508  // with |object| still intact.
509  void LoadNumber(Register object,
510  LowDwVfpRegister dst,
511  Register heap_number_map,
512  Register scratch,
513  Label* not_number);
514 
515  // Loads the number from object into double_dst in the double format.
516  // Control will jump to not_int32 if the value cannot be exactly represented
517  // by a 32-bit integer.
518  // Floating point value in the 32-bit integer range that are not exact integer
519  // won't be loaded.
520  void LoadNumberAsInt32Double(Register object,
521  DwVfpRegister double_dst,
522  Register heap_number_map,
523  Register scratch,
524  LowDwVfpRegister double_scratch,
525  Label* not_int32);
526 
527  // Loads the number from object into dst as a 32-bit integer.
528  // Control will jump to not_int32 if the object cannot be exactly represented
529  // by a 32-bit integer.
530  // Floating point value in the 32-bit integer range that are not exact integer
531  // won't be converted.
532  void LoadNumberAsInt32(Register object,
533  Register dst,
534  Register heap_number_map,
535  Register scratch,
536  DwVfpRegister double_scratch0,
537  LowDwVfpRegister double_scratch1,
538  Label* not_int32);
539 
540  // Generates function and stub prologue code.
541  void Prologue(PrologueFrameMode frame_mode);
542 
543  // Enter exit frame.
544  // stack_space - extra stack space, used for alignment before call to C.
545  void EnterExitFrame(bool save_doubles, int stack_space = 0);
546 
547  // Leave the current exit frame. Expects the return value in r0.
548  // Expect the number of values, pushed prior to the exit frame, to
549  // remove in a register (or no_reg, if there is nothing to remove).
550  void LeaveExitFrame(bool save_doubles,
551  Register argument_count,
552  bool restore_context);
553 
554  // Get the actual activation frame alignment for target environment.
555  static int ActivationFrameAlignment();
556 
557  void LoadContext(Register dst, int context_chain_length);
558 
559  // Conditionally load the cached Array transitioned map of type
560  // transitioned_kind from the native context if the map in register
561  // map_in_out is the cached Array map in the native context of
562  // expected_kind.
564  ElementsKind expected_kind,
565  ElementsKind transitioned_kind,
566  Register map_in_out,
567  Register scratch,
568  Label* no_map_match);
569 
570  void LoadGlobalFunction(int index, Register function);
571 
572  // Load the initial map from the global function. The registers
573  // function and map can be the same, function is then overwritten.
575  Register map,
576  Register scratch);
577 
579  ExternalReference roots_array_start =
580  ExternalReference::roots_array_start(isolate());
581  mov(kRootRegister, Operand(roots_array_start));
582  }
583 
584  // ---------------------------------------------------------------------------
585  // JavaScript invokes
586 
587  // Invoke the JavaScript function code by either calling or jumping.
588  void InvokeCode(Register code,
589  const ParameterCount& expected,
590  const ParameterCount& actual,
592  const CallWrapper& call_wrapper);
593 
594  // Invoke the JavaScript function in the given register. Changes the
595  // current context to the context in the function before invoking.
596  void InvokeFunction(Register function,
597  const ParameterCount& actual,
599  const CallWrapper& call_wrapper);
600 
601  void InvokeFunction(Register function,
602  const ParameterCount& expected,
603  const ParameterCount& actual,
605  const CallWrapper& call_wrapper);
606 
607  void InvokeFunction(Handle<JSFunction> function,
608  const ParameterCount& expected,
609  const ParameterCount& actual,
611  const CallWrapper& call_wrapper);
612 
613  void IsObjectJSObjectType(Register heap_object,
614  Register map,
615  Register scratch,
616  Label* fail);
617 
619  Register scratch,
620  Label* fail);
621 
622  void IsObjectJSStringType(Register object,
623  Register scratch,
624  Label* fail);
625 
626  void IsObjectNameType(Register object,
627  Register scratch,
628  Label* fail);
629 
630 #ifdef ENABLE_DEBUGGER_SUPPORT
631  // ---------------------------------------------------------------------------
632  // Debugger Support
633 
634  void DebugBreak();
635 #endif
636 
637  // ---------------------------------------------------------------------------
638  // Exception handling
639 
640  // Push a new try handler and link into try handler chain.
641  void PushTryHandler(StackHandler::Kind kind, int handler_index);
642 
643  // Unlink the stack handler on top of the stack from the try handler chain.
644  // Must preserve the result register.
645  void PopTryHandler();
646 
647  // Passes thrown value to the handler of top of the try handler chain.
648  void Throw(Register value);
649 
650  // Propagates an uncatchable exception to the top of the current JS stack's
651  // handler chain.
652  void ThrowUncatchable(Register value);
653 
654  // Throw a message string as an exception.
655  void Throw(BailoutReason reason);
656 
657  // Throw a message string as an exception if a condition is not true.
658  void ThrowIf(Condition cc, BailoutReason reason);
659 
660  // ---------------------------------------------------------------------------
661  // Inline caching support
662 
663  // Generate code for checking access rights - used for security checks
664  // on access to global objects across environments. The holder register
665  // is left untouched, whereas both scratch registers are clobbered.
666  void CheckAccessGlobalProxy(Register holder_reg,
667  Register scratch,
668  Label* miss);
669 
670  void GetNumberHash(Register t0, Register scratch);
671 
672  void LoadFromNumberDictionary(Label* miss,
673  Register elements,
674  Register key,
675  Register result,
676  Register t0,
677  Register t1,
678  Register t2);
679 
680 
681  inline void MarkCode(NopMarkerTypes type) {
682  nop(type);
683  }
684 
685  // Check if the given instruction is a 'type' marker.
686  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
687  // These instructions are generated to mark special location in the code,
688  // like some special IC code.
689  static inline bool IsMarkedCode(Instr instr, int type) {
690  ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
691  return IsNop(instr, type);
692  }
693 
694 
695  static inline int GetCodeMarker(Instr instr) {
696  int dst_reg_offset = 12;
697  int dst_mask = 0xf << dst_reg_offset;
698  int src_mask = 0xf;
699  int dst_reg = (instr & dst_mask) >> dst_reg_offset;
700  int src_reg = instr & src_mask;
701  uint32_t non_register_mask = ~(dst_mask | src_mask);
702  uint32_t mov_mask = al | 13 << 21;
703 
704  // Return <n> if we have a mov rn rn, else return -1.
705  int type = ((instr & non_register_mask) == mov_mask) &&
706  (dst_reg == src_reg) &&
707  (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
708  ? src_reg
709  : -1;
710  ASSERT((type == -1) ||
711  ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
712  return type;
713  }
714 
715 
716  // ---------------------------------------------------------------------------
717  // Allocation support
718 
719  // Allocate an object in new space or old pointer space. The object_size is
720  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
721  // is passed. If the space is exhausted control continues at the gc_required
722  // label. The allocated object is returned in result. If the flag
723  // tag_allocated_object is true the result is tagged as as a heap object.
724  // All registers are clobbered also when control continues at the gc_required
725  // label.
726  void Allocate(int object_size,
727  Register result,
728  Register scratch1,
729  Register scratch2,
730  Label* gc_required,
732 
733  void Allocate(Register object_size,
734  Register result,
735  Register scratch1,
736  Register scratch2,
737  Label* gc_required,
739 
740  // Undo allocation in new space. The object passed and objects allocated after
741  // it will no longer be allocated. The caller must make sure that no pointers
742  // are left to the object(s) no longer allocated as they would be invalid when
743  // allocation is undone.
744  void UndoAllocationInNewSpace(Register object, Register scratch);
745 
746 
747  void AllocateTwoByteString(Register result,
748  Register length,
749  Register scratch1,
750  Register scratch2,
751  Register scratch3,
752  Label* gc_required);
753  void AllocateAsciiString(Register result,
754  Register length,
755  Register scratch1,
756  Register scratch2,
757  Register scratch3,
758  Label* gc_required);
760  Register length,
761  Register scratch1,
762  Register scratch2,
763  Label* gc_required);
764  void AllocateAsciiConsString(Register result,
765  Register length,
766  Register scratch1,
767  Register scratch2,
768  Label* gc_required);
770  Register length,
771  Register scratch1,
772  Register scratch2,
773  Label* gc_required);
775  Register length,
776  Register scratch1,
777  Register scratch2,
778  Label* gc_required);
779 
780  // Allocates a heap number or jumps to the gc_required label if the young
781  // space is full and a scavenge is needed. All registers are clobbered also
782  // when control continues at the gc_required label.
783  void AllocateHeapNumber(Register result,
784  Register scratch1,
785  Register scratch2,
786  Register heap_number_map,
787  Label* gc_required,
788  TaggingMode tagging_mode = TAG_RESULT);
790  DwVfpRegister value,
791  Register scratch1,
792  Register scratch2,
793  Register heap_number_map,
794  Label* gc_required);
795 
796  // Copies a fixed number of fields of heap objects from src to dst.
797  void CopyFields(Register dst,
798  Register src,
799  LowDwVfpRegister double_scratch,
800  int field_count);
801 
802  // Copies a number of bytes from src to dst. All registers are clobbered. On
803  // exit src and dst will point to the place just after where the last byte was
804  // read or written and length will be zero.
805  void CopyBytes(Register src,
806  Register dst,
807  Register length,
808  Register scratch);
809 
810  // Initialize fields with filler values. Fields starting at |start_offset|
811  // not including end_offset are overwritten with the value in |filler|. At
812  // the end the loop, |start_offset| takes the value of |end_offset|.
813  void InitializeFieldsWithFiller(Register start_offset,
814  Register end_offset,
815  Register filler);
816 
817  // ---------------------------------------------------------------------------
818  // Support functions.
819 
820  // Try to get function prototype of a function and puts the value in
821  // the result register. Checks that the function really is a
822  // function and jumps to the miss label if the fast checks fail. The
823  // function register will be untouched; the other registers may be
824  // clobbered.
825  void TryGetFunctionPrototype(Register function,
826  Register result,
827  Register scratch,
828  Label* miss,
829  bool miss_on_bound_function = false);
830 
831  // Compare object type for heap object. heap_object contains a non-Smi
832  // whose object type should be compared with the given type. This both
833  // sets the flags and leaves the object type in the type_reg register.
834  // It leaves the map in the map register (unless the type_reg and map register
835  // are the same register). It leaves the heap object in the heap_object
836  // register unless the heap_object register is the same register as one of the
837  // other registers.
838  // Type_reg can be no_reg. In that case ip is used.
839  void CompareObjectType(Register heap_object,
840  Register map,
841  Register type_reg,
842  InstanceType type);
843 
844  // Compare object type for heap object. Branch to false_label if type
845  // is lower than min_type or greater than max_type.
846  // Load map into the register map.
847  void CheckObjectTypeRange(Register heap_object,
848  Register map,
849  InstanceType min_type,
850  InstanceType max_type,
851  Label* false_label);
852 
853  // Compare instance type in a map. map contains a valid map object whose
854  // object type should be compared with the given type. This both
855  // sets the flags and leaves the object type in the type_reg register.
857  Register type_reg,
858  InstanceType type);
859 
860 
861  // Check if a map for a JSObject indicates that the object has fast elements.
862  // Jump to the specified label if it does not.
864  Register scratch,
865  Label* fail);
866 
867  // Check if a map for a JSObject indicates that the object can have both smi
868  // and HeapObject elements. Jump to the specified label if it does not.
870  Register scratch,
871  Label* fail);
872 
873  // Check if a map for a JSObject indicates that the object has fast smi only
874  // elements. Jump to the specified label if it does not.
876  Register scratch,
877  Label* fail);
878 
879  // Check to see if maybe_number can be stored as a double in
880  // FastDoubleElements. If it can, store it at the index specified by key in
881  // the FastDoubleElements array elements. Otherwise jump to fail.
882  void StoreNumberToDoubleElements(Register value_reg,
883  Register key_reg,
884  Register elements_reg,
885  Register scratch1,
886  LowDwVfpRegister double_scratch,
887  Label* fail,
888  int elements_offset = 0);
889 
890  // Compare an object's map with the specified map and its transitioned
891  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
892  // set with result of map compare. If multiple map compares are required, the
893  // compare sequences branches to early_success.
894  void CompareMap(Register obj,
895  Register scratch,
897  Label* early_success);
898 
899  // As above, but the map of the object is already loaded into the register
900  // which is preserved by the code generated.
901  void CompareMap(Register obj_map,
903  Label* early_success);
904 
905  // Check if the map of an object is equal to a specified map and branch to
906  // label if not. Skip the smi check if not required (object is known to be a
907  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
908  // against maps that are ElementsKind transition maps of the specified map.
909  void CheckMap(Register obj,
910  Register scratch,
912  Label* fail,
913  SmiCheckType smi_check_type);
914 
915 
916  void CheckMap(Register obj,
917  Register scratch,
918  Heap::RootListIndex index,
919  Label* fail,
920  SmiCheckType smi_check_type);
921 
922 
923  // Check if the map of an object is equal to a specified map and branch to a
924  // specified target if equal. Skip the smi check if not required (object is
925  // known to be a heap object)
926  void DispatchMap(Register obj,
927  Register scratch,
929  Handle<Code> success,
930  SmiCheckType smi_check_type);
931 
932 
933  // Compare the object in a register to a value from the root list.
934  // Uses the ip register as scratch.
936 
937 
938  // Load and check the instance type of an object for being a string.
939  // Loads the type into the second argument register.
940  // Returns a condition that will be enabled if the object was a string
941  // and the passed-in condition passed. If the passed-in condition failed
942  // then flags remain unchanged.
944  Register type,
945  Condition cond = al) {
946  ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
947  ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
948  tst(type, Operand(kIsNotStringMask), cond);
949  ASSERT_EQ(0, kStringTag);
950  return eq;
951  }
952 
953 
954  // Generates code for reporting that an illegal operation has
955  // occurred.
956  void IllegalOperation(int num_arguments);
957 
958  // Picks out an array index from the hash field.
959  // Register use:
960  // hash - holds the index's hash. Clobbered.
961  // index - holds the overwritten index on exit.
962  void IndexFromHash(Register hash, Register index);
963 
964  // Get the number of least significant bits from a register
965  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
966  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
967 
968  // Load the value of a smi object into a double register.
969  // The register value must be between d0 and d15.
970  void SmiToDouble(LowDwVfpRegister value, Register smi);
971 
972  // Check if a double can be exactly represented as a signed 32-bit integer.
973  // Z flag set to one if true.
974  void TestDoubleIsInt32(DwVfpRegister double_input,
975  LowDwVfpRegister double_scratch);
976 
977  // Try to convert a double to a signed 32-bit integer.
978  // Z flag set to one and result assigned if the conversion is exact.
979  void TryDoubleToInt32Exact(Register result,
980  DwVfpRegister double_input,
981  LowDwVfpRegister double_scratch);
982 
983  // Floor a double and writes the value to the result register.
984  // Go to exact if the conversion is exact (to be able to test -0),
985  // fall through calling code if an overflow occurred, else go to done.
986  // In return, input_high is loaded with high bits of input.
987  void TryInt32Floor(Register result,
988  DwVfpRegister double_input,
989  Register input_high,
990  LowDwVfpRegister double_scratch,
991  Label* done,
992  Label* exact);
993 
994  // Performs a truncating conversion of a floating point number as used by
995  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
996  // succeeds, otherwise falls through if result is saturated. On return
997  // 'result' either holds answer, or is clobbered on fall through.
998  //
999  // Only public for the test code in test-code-stubs-arm.cc.
1001  DwVfpRegister input,
1002  Label* done);
1003 
1004  // Performs a truncating conversion of a floating point number as used by
1005  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1006  // Exits with 'result' holding the answer.
1007  void TruncateDoubleToI(Register result, DwVfpRegister double_input);
1008 
1009  // Performs a truncating conversion of a heap number as used by
1010  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1011  // must be different registers. Exits with 'result' holding the answer.
1012  void TruncateHeapNumberToI(Register result, Register object);
1013 
1014  // Converts the smi or heap number in object to an int32 using the rules
1015  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1016  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1017  // different registers.
1018  void TruncateNumberToI(Register object,
1019  Register result,
1020  Register heap_number_map,
1021  Register scratch1,
1022  Label* not_int32);
1023 
1024  // Check whether d16-d31 are available on the CPU. The result is given by the
1025  // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
1026  void CheckFor32DRegs(Register scratch);
1027 
1028  // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
1029  // values to location, saving [d0..(d15|d31)].
1030  void SaveFPRegs(Register location, Register scratch);
1031 
1032  // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
1033  // values to location, restoring [d0..(d15|d31)].
1034  void RestoreFPRegs(Register location, Register scratch);
1035 
1036  // ---------------------------------------------------------------------------
1037  // Runtime calls
1038 
1039  // Call a code stub.
1040  void CallStub(CodeStub* stub,
1042  Condition cond = al);
1043 
1044  // Call a code stub.
1045  void TailCallStub(CodeStub* stub, Condition cond = al);
1046 
1047  // Call a runtime routine.
1048  void CallRuntime(const Runtime::Function* f,
1049  int num_arguments,
1050  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1052  const Runtime::Function* function = Runtime::FunctionForId(id);
1053  CallRuntime(function, function->nargs, kSaveFPRegs);
1054  }
1055 
1056  // Convenience function: Same as above, but takes the fid instead.
1058  int num_arguments,
1059  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1060  CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1061  }
1062 
1063  // Convenience function: call an external reference.
1064  void CallExternalReference(const ExternalReference& ext,
1065  int num_arguments);
1066 
1067  // Tail call of a runtime routine (jump).
1068  // Like JumpToExternalReference, but also takes care of passing the number
1069  // of parameters.
1070  void TailCallExternalReference(const ExternalReference& ext,
1071  int num_arguments,
1072  int result_size);
1073 
1074  // Convenience function: tail call a runtime routine (jump).
1076  int num_arguments,
1077  int result_size);
1078 
1079  int CalculateStackPassedWords(int num_reg_arguments,
1080  int num_double_arguments);
1081 
1082  // Before calling a C-function from generated code, align arguments on stack.
1083  // After aligning the frame, non-register arguments must be stored in
1084  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1085  // are word sized. If double arguments are used, this function assumes that
1086  // all double arguments are stored before core registers; otherwise the
1087  // correct alignment of the double values is not guaranteed.
1088  // Some compilers/platforms require the stack to be aligned when calling
1089  // C++ code.
1090  // Needs a scratch register to do some arithmetic. This register will be
1091  // trashed.
1092  void PrepareCallCFunction(int num_reg_arguments,
1093  int num_double_registers,
1094  Register scratch);
1095  void PrepareCallCFunction(int num_reg_arguments,
1096  Register scratch);
1097 
1098  // There are two ways of passing double arguments on ARM, depending on
1099  // whether soft or hard floating point ABI is used. These functions
1100  // abstract parameter passing for the three different ways we call
1101  // C functions from generated code.
1104  void MovToFloatResult(DwVfpRegister src);
1105 
1106  // Calls a C function and cleans up the space for arguments allocated
1107  // by PrepareCallCFunction. The called function is not allowed to trigger a
1108  // garbage collection, since that might move the code and invalidate the
1109  // return address (unless this is somehow accounted for by the called
1110  // function).
1111  void CallCFunction(ExternalReference function, int num_arguments);
1112  void CallCFunction(Register function, int num_arguments);
1113  void CallCFunction(ExternalReference function,
1114  int num_reg_arguments,
1115  int num_double_arguments);
1116  void CallCFunction(Register function,
1117  int num_reg_arguments,
1118  int num_double_arguments);
1119 
1122 
1123  // Calls an API function. Allocates HandleScope, extracts returned value
1124  // from handle and propagates exceptions. Restores context. stack_space
1125  // - space to be unwound on exit (includes the call JS arguments space and
1126  // the additional space allocated for the fast call).
1127  void CallApiFunctionAndReturn(Register function_address,
1128  ExternalReference thunk_ref,
1129  int stack_space,
1130  MemOperand return_value_operand,
1131  MemOperand* context_restore_operand);
1132 
1133  // Jump to a runtime routine.
1134  void JumpToExternalReference(const ExternalReference& builtin);
1135 
1136  // Invoke specified builtin JavaScript function. Adds an entry to
1137  // the unresolved list if the name does not resolve.
1139  InvokeFlag flag,
1140  const CallWrapper& call_wrapper = NullCallWrapper());
1141 
1142  // Store the code object for the given builtin in the target register and
1143  // setup the function in r1.
1145 
1146  // Store the function for the given builtin in the target register.
1148 
1150  ASSERT(!code_object_.is_null());
1151  return code_object_;
1152  }
1153 
1154 
1155  // Emit code for a truncating division by a constant. The dividend register is
1156  // unchanged and ip gets clobbered. Dividend and result must be different.
1157  void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1158 
1159  // ---------------------------------------------------------------------------
1160  // StatsCounter support
1161 
1162  void SetCounter(StatsCounter* counter, int value,
1163  Register scratch1, Register scratch2);
1164  void IncrementCounter(StatsCounter* counter, int value,
1165  Register scratch1, Register scratch2);
1166  void DecrementCounter(StatsCounter* counter, int value,
1167  Register scratch1, Register scratch2);
1168 
1169 
1170  // ---------------------------------------------------------------------------
1171  // Debugging
1172 
1173  // Calls Abort(msg) if the condition cond is not satisfied.
1174  // Use --debug_code to enable.
1175  void Assert(Condition cond, BailoutReason reason);
1176  void AssertFastElements(Register elements);
1177 
1178  // Like Assert(), but always enabled.
1179  void Check(Condition cond, BailoutReason reason);
1180 
1181  // Print a message to stdout and abort execution.
1182  void Abort(BailoutReason msg);
1183 
1184  // Verify restrictions about code generated in stubs.
1185  void set_generating_stub(bool value) { generating_stub_ = value; }
1186  bool generating_stub() { return generating_stub_; }
1187  void set_has_frame(bool value) { has_frame_ = value; }
1188  bool has_frame() { return has_frame_; }
1189  inline bool AllowThisStubCall(CodeStub* stub);
1190 
1191  // EABI variant for double arguments in use.
1193 #ifdef __arm__
1194  return OS::ArmUsingHardFloat();
1195 #elif USE_EABI_HARDFLOAT
1196  return true;
1197 #else
1198  return false;
1199 #endif
1200  }
1201 
1202  // ---------------------------------------------------------------------------
1203  // Number utilities
1204 
1205  // Check whether the value of reg is a power of two and not zero. If not
1206  // control continues at the label not_power_of_two. If reg is a power of two
1207  // the register scratch contains the value of (reg - 1) when control falls
1208  // through.
1210  Register scratch,
1211  Label* not_power_of_two_or_zero);
1212  // Check whether the value of reg is a power of two and not zero.
1213  // Control falls through if it is, with scratch containing the mask
1214  // value (reg - 1).
1215  // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1216  // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1217  // strictly positive but not a power of two.
1219  Register scratch,
1220  Label* zero_and_neg,
1221  Label* not_power_of_two);
1222 
1223  // ---------------------------------------------------------------------------
1224  // Smi utilities
1225 
1226  void SmiTag(Register reg, SBit s = LeaveCC) {
1227  add(reg, reg, Operand(reg), s);
1228  }
1229  void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1230  add(dst, src, Operand(src), s);
1231  }
1232 
1233  // Try to convert int32 to smi. If the value is to large, preserve
1234  // the original value and jump to not_a_smi. Destroys scratch and
1235  // sets flags.
1236  void TrySmiTag(Register reg, Label* not_a_smi) {
1237  TrySmiTag(reg, reg, not_a_smi);
1238  }
1239  void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
1240  SmiTag(ip, src, SetCC);
1241  b(vs, not_a_smi);
1242  mov(reg, ip);
1243  }
1244 
1245 
1246  void SmiUntag(Register reg, SBit s = LeaveCC) {
1247  mov(reg, Operand::SmiUntag(reg), s);
1248  }
1249  void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1250  mov(dst, Operand::SmiUntag(src), s);
1251  }
1252 
1253  // Untag the source value into destination and jump if source is a smi.
1254  // Souce and destination can be the same register.
1255  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1256 
1257  // Untag the source value into destination and jump if source is not a smi.
1258  // Souce and destination can be the same register.
1259  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1260 
1261  // Test if the register contains a smi (Z == 0 (eq) if true).
1262  inline void SmiTst(Register value) {
1263  tst(value, Operand(kSmiTagMask));
1264  }
1265  inline void NonNegativeSmiTst(Register value) {
1266  tst(value, Operand(kSmiTagMask | kSmiSignMask));
1267  }
1268  // Jump if the register contains a smi.
1269  inline void JumpIfSmi(Register value, Label* smi_label) {
1270  tst(value, Operand(kSmiTagMask));
1271  b(eq, smi_label);
1272  }
1273  // Jump if either of the registers contain a non-smi.
1274  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1275  tst(value, Operand(kSmiTagMask));
1276  b(ne, not_smi_label);
1277  }
1278  // Jump if either of the registers contain a non-smi.
1279  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1280  // Jump if either of the registers contain a smi.
1281  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1282 
1283  // Abort execution if argument is a smi, enabled via --debug-code.
1284  void AssertNotSmi(Register object);
1285  void AssertSmi(Register object);
1286 
1287  // Abort execution if argument is not a string, enabled via --debug-code.
1288  void AssertString(Register object);
1289 
1290  // Abort execution if argument is not a name, enabled via --debug-code.
1291  void AssertName(Register object);
1292 
1293  // Abort execution if argument is not undefined or an AllocationSite, enabled
1294  // via --debug-code.
1295  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1296 
1297  // Abort execution if reg is not the root value with the given index,
1298  // enabled via --debug-code.
1299  void AssertIsRoot(Register reg, Heap::RootListIndex index);
1300 
1301  // ---------------------------------------------------------------------------
1302  // HeapNumber utilities
1303 
1304  void JumpIfNotHeapNumber(Register object,
1305  Register heap_number_map,
1306  Register scratch,
1307  Label* on_not_heap_number);
1308 
1309  // ---------------------------------------------------------------------------
1310  // String utilities
1311 
1312  // Generate code to do a lookup in the number string cache. If the number in
1313  // the register object is found in the cache the generated code falls through
1314  // with the result in the result register. The object and the result register
1315  // can be the same. If the number is not found in the cache the code jumps to
1316  // the label not_found with only the content of register object unchanged.
1317  void LookupNumberStringCache(Register object,
1318  Register result,
1319  Register scratch1,
1320  Register scratch2,
1321  Register scratch3,
1322  Label* not_found);
1323 
1324  // Checks if both objects are sequential ASCII strings and jumps to label
1325  // if either is not. Assumes that neither object is a smi.
1327  Register object2,
1328  Register scratch1,
1329  Register scratch2,
1330  Label* failure);
1331 
1332  // Checks if both objects are sequential ASCII strings and jumps to label
1333  // if either is not.
1335  Register second,
1336  Register scratch1,
1337  Register scratch2,
1338  Label* not_flat_ascii_strings);
1339 
1340  // Checks if both instance types are sequential ASCII strings and jumps to
1341  // label if either is not.
1343  Register first_object_instance_type,
1344  Register second_object_instance_type,
1345  Register scratch1,
1346  Register scratch2,
1347  Label* failure);
1348 
1349  // Check if instance type is sequential ASCII string and jump to label if
1350  // it is not.
1352  Register scratch,
1353  Label* failure);
1354 
1355  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
1356 
1357  void EmitSeqStringSetCharCheck(Register string,
1358  Register index,
1359  Register value,
1360  uint32_t encoding_mask);
1361 
1362  // ---------------------------------------------------------------------------
1363  // Patching helpers.
1364 
1365  // Get the location of a relocated constant (its address in the constant pool)
1366  // from its load site.
1367  void GetRelocatedValueLocation(Register ldr_location,
1368  Register result);
1369 
1370 
1371  void ClampUint8(Register output_reg, Register input_reg);
1372 
1373  void ClampDoubleToUint8(Register result_reg,
1374  DwVfpRegister input_reg,
1375  LowDwVfpRegister double_scratch);
1376 
1377 
1378  void LoadInstanceDescriptors(Register map, Register descriptors);
1379  void EnumLength(Register dst, Register map);
1381 
1382  template<typename Field>
1383  void DecodeField(Register reg) {
1384  static const int shift = Field::kShift;
1385  static const int mask = (Field::kMask >> shift) << kSmiTagSize;
1386  mov(reg, Operand(reg, LSR, shift));
1387  and_(reg, reg, Operand(mask));
1388  }
1389 
1390  // Activation support.
1391  void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
1392  // Returns the pc offset at which the frame ends.
1393  int LeaveFrame(StackFrame::Type type);
1394 
1395  // Expects object in r0 and returns map with validated enum cache
1396  // in r0. Assumes that any other register can be used as a scratch.
1397  void CheckEnumCache(Register null_value, Label* call_runtime);
1398 
1399  // AllocationMemento support. Arrays may have an associated
1400  // AllocationMemento object that can be checked for in order to pretransition
1401  // to another type.
1402  // On entry, receiver_reg should point to the array object.
1403  // scratch_reg gets clobbered.
1404  // If allocation info is present, condition flags are set to eq.
1405  void TestJSArrayForAllocationMemento(Register receiver_reg,
1406  Register scratch_reg,
1407  Label* no_memento_found);
1408 
1410  Register scratch_reg,
1411  Label* memento_found) {
1412  Label no_memento_found;
1413  TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1414  &no_memento_found);
1415  b(eq, memento_found);
1416  bind(&no_memento_found);
1417  }
1418 
1419  // Jumps to found label if a prototype map has dictionary elements.
1420  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1421  Register scratch1, Label* found);
1422 
1423  private:
1424  void CallCFunctionHelper(Register function,
1425  int num_reg_arguments,
1426  int num_double_arguments);
1427 
1428  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1429 
1430  // Helper functions for generating invokes.
1431  void InvokePrologue(const ParameterCount& expected,
1432  const ParameterCount& actual,
1433  Handle<Code> code_constant,
1434  Register code_reg,
1435  Label* done,
1436  bool* definitely_mismatches,
1437  InvokeFlag flag,
1438  const CallWrapper& call_wrapper);
1439 
1440  void InitializeNewString(Register string,
1441  Register length,
1442  Heap::RootListIndex map_index,
1443  Register scratch1,
1444  Register scratch2);
1445 
1446  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1447  void InNewSpace(Register object,
1448  Register scratch,
1449  Condition cond, // eq for new space, ne otherwise.
1450  Label* branch);
1451 
1452  // Helper for finding the mark bits for an address. Afterwards, the
1453  // bitmap register points at the word with the mark bits and the mask
1454  // the position of the first bit. Leaves addr_reg unchanged.
1455  inline void GetMarkBits(Register addr_reg,
1456  Register bitmap_reg,
1457  Register mask_reg);
1458 
1459  // Helper for throwing exceptions. Compute a handler address and jump to
1460  // it. See the implementation for register usage.
1461  void JumpToHandlerEntry();
1462 
1463  // Compute memory operands for safepoint stack slots.
1464  static int SafepointRegisterStackIndex(int reg_code);
1465  MemOperand SafepointRegisterSlot(Register reg);
1466  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1467 
1468  // Loads the constant pool pointer (pp) register.
1469  void LoadConstantPoolPointerRegister();
1470 
1471  bool generating_stub_;
1472  bool has_frame_;
1473  // This handle will be patched with the code object on installation.
1474  Handle<Object> code_object_;
1475 
1476  // Needs access to SafepointRegisterStackIndex for compiled frame
1477  // traversal.
1478  friend class StandardFrame;
1479 };
1480 
1481 
1482 // The code patcher is used to patch (typically) small parts of code e.g. for
1483 // debugging and other types of instrumentation. When using the code patcher
1484 // the exact number of bytes specified must be emitted. It is not legal to emit
1485 // relocation information. If any of these constraints are violated it causes
1486 // an assertion to fail.
1488  public:
1492  };
1493 
1494  CodePatcher(byte* address,
1495  int instructions,
1496  FlushICache flush_cache = FLUSH);
1497  virtual ~CodePatcher();
1498 
1499  // Macro assembler to emit code.
1500  MacroAssembler* masm() { return &masm_; }
1501 
1502  // Emit an instruction directly.
1503  void Emit(Instr instr);
1504 
1505  // Emit an address directly.
1506  void Emit(Address addr);
1507 
1508  // Emit the condition part of an instruction leaving the rest of the current
1509  // instruction unchanged.
1510  void EmitCondition(Condition cond);
1511 
1512  private:
1513  byte* address_; // The address of the code being patched.
1514  int size_; // Number of bytes of the expected patch size.
1515  MacroAssembler masm_; // Macro assembler used to generate the code.
1516  FlushICache flush_cache_; // Whether to flush the I cache after patching.
1517 };
1518 
1519 
1521  public:
1523  : masm_(masm),
1524  type_(type),
1525  old_has_frame_(masm->has_frame()),
1526  old_constant_pool_available_(masm->is_constant_pool_available()) {
1527  masm->set_has_frame(true);
1528  masm->set_constant_pool_available(true);
1529  if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
1530  masm->EnterFrame(type, !old_constant_pool_available_);
1531  }
1532  }
1533 
1535  masm_->LeaveFrame(type_);
1536  masm_->set_has_frame(old_has_frame_);
1537  masm_->set_constant_pool_available(old_constant_pool_available_);
1538  }
1539 
1540  // Normally we generate the leave-frame code when this object goes
1541  // out of scope. Sometimes we may need to generate the code somewhere else
1542  // in addition. Calling this will achieve that, but the object stays in
1543  // scope, the MacroAssembler is still marked as being in a frame scope, and
1544  // the code will be generated again when it goes out of scope.
1546  ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
1547  masm_->LeaveFrame(type_);
1548  }
1549 
1550  private:
1551  MacroAssembler* masm_;
1552  StackFrame::Type type_;
1553  bool old_has_frame_;
1554  bool old_constant_pool_available_;
1555 
1556  DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
1557 };
1558 
1559 
1560 // Class for scoping the the unavailability of constant pool access.
1562  public:
1564  : masm_(masm),
1565  old_constant_pool_available_(masm->is_constant_pool_available()) {
1566  if (FLAG_enable_ool_constant_pool) {
1567  masm_->set_constant_pool_available(false);
1568  }
1569  }
1571  if (FLAG_enable_ool_constant_pool) {
1572  masm_->set_constant_pool_available(old_constant_pool_available_);
1573  }
1574  }
1575 
1576  private:
1577  MacroAssembler* masm_;
1578  int old_constant_pool_available_;
1579 
1580  DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
1581 };
1582 
1583 
1584 // -----------------------------------------------------------------------------
1585 // Static helper functions.
1586 
1587 inline MemOperand ContextOperand(Register context, int index) {
1588  return MemOperand(context, Context::SlotOffset(index));
1589 }
1590 
1591 
1594 }
1595 
1596 
1597 #ifdef GENERATED_CODE_COVERAGE
1598 #define CODE_COVERAGE_STRINGIFY(x) #x
1599 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1600 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1601 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1602 #else
1603 #define ACCESS_MASM(masm) masm->
1604 #endif
1605 
1606 
1607 } } // namespace v8::internal
1608 
1609 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
byte * Address
Definition: globals.h:186
void TrySmiTag(Register reg, Label *not_a_smi)
void SmiUntag(Register reg, SBit s=LeaveCC)
void ClampUint8(Register output_reg, Register input_reg)
Isolate * isolate() const
Definition: assembler.h:62
void LoadNumber(Register object, LowDwVfpRegister dst, Register heap_number_map, Register scratch, Label *not_number)
const intptr_t kSmiTagMask
Definition: v8.h:5480
const intptr_t kSmiSignMask
Definition: v8globals.h:41
static int SlotOffset(int index)
Definition: contexts.h:498
void GetRelocatedValueLocation(Register ldr_location, Register result)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void SmiTag(Register reg, SBit s=LeaveCC)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
CodePatcher(byte *address, int instructions, FlushICache flush_cache=FLUSH)
Condition IsObjectStringType(Register obj, Register type, Condition cond=al)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void mov(Register rd, Register rt)
void IncrementalMarkingRecordWriteHelper(Register object, Register value, Register address)
const Register cp
void LoadNumberAsInt32Double(Register object, DwVfpRegister double_dst, Register heap_number_map, Register scratch, LowDwVfpRegister double_scratch, Label *not_int32)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Bfc(Register dst, Register src, int lsb, int width, Condition cond=al)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch)
void AssertString(Register object)
const int kRegister_r7_Code
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
static TypeFeedbackId None()
Definition: utils.h:1149
void JumpToExternalReference(const ExternalReference &builtin)
void UntagAndJumpIfSmi(Register dst, Register src, Label *smi_case)
static bool ArmUsingHardFloat()
void LoadInstanceDescriptors(Register map, Register descriptors)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void Store(Register src, const MemOperand &dst, Representation r)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void b(int branch_offset, Condition cond=al)
void JumpIfSmi(Register value, Label *smi_label)
void MovFromFloatParameter(DwVfpRegister dst)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
void TryInt32Floor(Register result, DwVfpRegister double_input, Register input_high, LowDwVfpRegister double_scratch, Label *done, Label *exact)
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
int int32_t
Definition: unicode.cc:47
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, Label *not_power_of_two_or_zero)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void SmiToDouble(LowDwVfpRegister value, Register smi)
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
void NonNegativeSmiTst(Register value)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
void Bfi(Register dst, Register src, Register scratch, int lsb, int width, Condition cond=al)
void Swap(Register reg1, Register reg2, Register scratch=no_reg, Condition cond=al)
void RestoreFPRegs(Register location, Register scratch)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
Definition: checks.h:329
FrameAndConstantPoolScope(MacroAssembler *masm, StackFrame::Type type)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void SmiTag(Register dst, Register src, SBit s=LeaveCC)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static bool IsMarkedCode(Instr instr, int type)
void NumberOfOwnDescriptors(Register dst, Register map)
void CheckObjectTypeRange(Register heap_object, Register map, InstanceType min_type, InstanceType max_type, Label *false_label)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void MarkCode(NopMarkerTypes type)
void Load(Register dst, const MemOperand &src, Representation r)
MemOperand ContextOperand(Register context, int index)
const Register pp
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void Pop(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst)
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
void Abort(BailoutReason msg)
const Register kRootRegister
void IsObjectNameType(Register object, Register scratch, Label *fail)
uint8_t byte
Definition: globals.h:185
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void Pop(Register src1, Register src2, Condition cond=al)
void MovFromFloatResult(DwVfpRegister dst)
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
const Register sp
void Vmov(const DwVfpRegister dst, const double imm, const Register scratch=no_reg)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void EnumLength(Register dst, Register map)
static int ActivationFrameAlignment()
void SmiUntag(Register dst, Register src, SBit s=LeaveCC)
void SaveFPRegs(Register location, Register scratch)
void TruncateNumberToI(Register object, Register result, Register heap_number_map, Register scratch1, Label *not_int32)
void MovToFloatResult(DwVfpRegister src)
void CheckFastElements(Register map, Register scratch, Label *fail)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
void TrySmiTag(Register reg, Register src, Label *not_a_smi)
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
PrologueFrameMode
Definition: frames.h:957
void Emit(Instr instr)
const Register ip
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void CallCFunction(ExternalReference function, int num_arguments)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:5473
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void TruncateHeapNumberToI(Register result, Register object)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void VmovLow(Register dst, DwVfpRegister src)
void Throw(Register value)
void Move(Register dst, Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void VFPEnsureFPSCRState(Register scratch)
const int kRegister_r8_Code
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void str(Register src, const MemOperand &dst, Condition cond=al)
void Push(Register src1, Register src2, Register src3, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
void LoadContext(Register dst, int context_chain_length)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static int CallSize(Register target, Condition cond=al)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void AssertFastElements(Register elements)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits)
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void Drop(int count, Condition cond=al)
const int kRegister_r10_Code
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void IllegalOperation(int num_arguments)
AllocationFlags
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CopyFields(Register dst, Register src, LowDwVfpRegister double_scratch, int field_count)
void Ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
static const int kMapOffset
Definition: objects.h:1890
bool is(Register reg) const
void VmovHigh(Register dst, DwVfpRegister src)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
const uint32_t kIsNotStringMask
Definition: objects.h:597
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond=al)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch, Label *zero_and_neg, Label *not_power_of_two)
void PushFixedFrame(Register marker_reg=no_reg)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void ThrowUncatchable(Register value)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
MemOperand FieldMemOperand(Register object, int offset)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2)
void LoadNumberAsInt32(Register object, Register dst, Register heap_number_map, Register scratch, DwVfpRegister double_scratch0, LowDwVfpRegister double_scratch1, Label *not_int32)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
const int kSmiTagSize
Definition: v8.h:5479
static int GetCodeMarker(Instr instr)
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, Register object2, Register scratch1, Register scratch2, Label *failure)
void EmitCondition(Condition cond)
void VFPCanonicalizeNaN(const DwVfpRegister value, const Condition cond=al)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void MovToFloatParameter(DwVfpRegister src)
void UndoAllocationInNewSpace(Register object, Register scratch)
void Push(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void Prologue(PrologueFrameMode frame_mode)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, Register scratch_reg, Label *memento_found)
void CheckFor32DRegs(Register scratch)
void Call(Register target, Condition cond=al)
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void TryDoubleToInt32Exact(Register result, DwVfpRegister double_input, LowDwVfpRegister double_scratch)
void Check(Condition cond, BailoutReason reason)
void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, Label *done)
void Assert(Condition cond, BailoutReason reason)
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void TestDoubleIsInt32(DwVfpRegister double_input, LowDwVfpRegister double_scratch)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void Pop(Register src1, Register src2, Register src3, Condition cond=al)
void TailCallStub(CodeStub *stub, Condition cond=al)
HeapObject * obj
void UntagAndJumpIfNotSmi(Register dst, Register src, Label *non_smi_case)
void AssertIsRoot(Register reg, Heap::RootListIndex index)
void AssertName(Register object)
const Register no_reg
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void PopFixedFrame(Register marker_reg=no_reg)
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
void set_constant_pool_available(bool available)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void Usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void Push(Register src1, Register src2, Condition cond=al)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void tst(Register src1, const Operand &src2, Condition cond=al)
void Strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void CheckEnumCache(Register null_value, Label *call_runtime)
static const int kInstanceTypeOffset
Definition: objects.h:6459
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)