v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
29 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // ----------------------------------------------------------------------------
39 // Static helper functions
40 
41 // Generate a MemOperand for loading a field from an object.
42 inline MemOperand FieldMemOperand(Register object, int offset) {
43  return MemOperand(object, offset - kHeapObjectTag);
44 }
45 
46 
47 inline Operand SmiUntagOperand(Register object) {
48  return Operand(object, ASR, kSmiTagSize);
49 }
50 
51 
52 
53 // Give alias names to registers
54 const Register cp = { 8 }; // JavaScript context pointer
55 const Register kRootRegister = { 10 }; // Roots array pointer.
56 
57 // Flags used for the AllocateInNewSpace functions.
59  // No special flags.
61  // Return the pointer to the allocated already tagged as a heap object.
62  TAG_OBJECT = 1 << 0,
63  // The content of the result register already contains the allocation top in
64  // new space.
66  // Specify that the requested size of the space to allocate is specified in
67  // words instead of bytes.
68  SIZE_IN_WORDS = 1 << 2
69 };
70 
71 // Flags used for AllocateHeapNumber
73  // Tag the result.
75  // Don't tag
77 };
78 
79 // Flags used for the ObjectToDoubleVFPRegister function.
81  // No special flags.
83  // Object is known to be a non smi.
84  OBJECT_NOT_SMI = 1 << 0,
85  // Don't load NaNs or infinities, branch to the non number case instead.
87 };
88 
89 
93 
94 
95 #ifdef DEBUG
96 bool AreAliased(Register reg1,
97  Register reg2,
98  Register reg3 = no_reg,
99  Register reg4 = no_reg,
100  Register reg5 = no_reg,
101  Register reg6 = no_reg);
102 #endif
103 
104 
108 };
109 
110 // MacroAssembler implements a collection of frequently used macros.
111 class MacroAssembler: public Assembler {
112  public:
113  // The isolate parameter can be NULL if the macro assembler should
114  // not use isolate-dependent functionality. In this case, it's the
115  // responsibility of the caller to never invoke such function on the
116  // macro assembler.
117  MacroAssembler(Isolate* isolate, void* buffer, int size);
118 
119  // Jump, Call, and Ret pseudo instructions implementing inter-working.
120  void Jump(Register target, Condition cond = al);
121  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
122  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
123  static int CallSize(Register target, Condition cond = al);
124  void Call(Register target, Condition cond = al);
125  int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
126  static int CallSizeNotPredictableCodeSize(Address target,
127  RelocInfo::Mode rmode,
128  Condition cond = al);
129  void Call(Address target, RelocInfo::Mode rmode,
130  Condition cond = al,
132  int CallSize(Handle<Code> code,
133  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
135  Condition cond = al);
136  void Call(Handle<Code> code,
137  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
139  Condition cond = al,
141  void Ret(Condition cond = al);
142 
143  // Emit code to discard a non-negative number of pointer-sized elements
144  // from the stack, clobbering only the sp register.
145  void Drop(int count, Condition cond = al);
146 
147  void Ret(int drop, Condition cond = al);
148 
149  // Swap two registers. If the scratch register is omitted then a slightly
150  // less efficient form using xor instead of mov is emitted.
151  void Swap(Register reg1,
152  Register reg2,
153  Register scratch = no_reg,
154  Condition cond = al);
155 
156 
157  void And(Register dst, Register src1, const Operand& src2,
158  Condition cond = al);
159  void Ubfx(Register dst, Register src, int lsb, int width,
160  Condition cond = al);
161  void Sbfx(Register dst, Register src, int lsb, int width,
162  Condition cond = al);
163  // The scratch register is not used for ARMv7.
164  // scratch can be the same register as src (in which case it is trashed), but
165  // not the same as dst.
166  void Bfi(Register dst,
167  Register src,
168  Register scratch,
169  int lsb,
170  int width,
171  Condition cond = al);
172  void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
173  void Usat(Register dst, int satpos, const Operand& src,
174  Condition cond = al);
175 
176  void Call(Label* target);
177 
178  // Register move. May do nothing if the registers are identical.
179  void Move(Register dst, Handle<Object> value);
180  void Move(Register dst, Register src, Condition cond = al);
181  void Move(DoubleRegister dst, DoubleRegister src);
182 
183  // Load an object from the root table.
184  void LoadRoot(Register destination,
185  Heap::RootListIndex index,
186  Condition cond = al);
187  // Store an object to the root table.
188  void StoreRoot(Register source,
189  Heap::RootListIndex index,
190  Condition cond = al);
191 
192  void LoadHeapObject(Register dst, Handle<HeapObject> object);
193 
194  void LoadObject(Register result, Handle<Object> object) {
195  if (object->IsHeapObject()) {
196  LoadHeapObject(result, Handle<HeapObject>::cast(object));
197  } else {
198  Move(result, object);
199  }
200  }
201 
202  // ---------------------------------------------------------------------------
203  // GC Support
204 
206  Register value,
207  Register address);
208 
212  };
213 
214  // Record in the remembered set the fact that we have a pointer to new space
215  // at the address pointed to by the addr register. Only works if addr is not
216  // in new space.
217  void RememberedSetHelper(Register object, // Used for debug code.
218  Register addr,
219  Register scratch,
220  SaveFPRegsMode save_fp,
221  RememberedSetFinalAction and_then);
222 
223  void CheckPageFlag(Register object,
224  Register scratch,
225  int mask,
226  Condition cc,
227  Label* condition_met);
228 
229  // Check if object is in new space. Jumps if the object is not in new space.
230  // The register scratch can be object itself, but scratch will be clobbered.
232  Register scratch,
233  Label* branch) {
234  InNewSpace(object, scratch, ne, branch);
235  }
236 
237  // Check if object is in new space. Jumps if the object is in new space.
238  // The register scratch can be object itself, but it will be clobbered.
240  Register scratch,
241  Label* branch) {
242  InNewSpace(object, scratch, eq, branch);
243  }
244 
245  // Check if an object has a given incremental marking color.
246  void HasColor(Register object,
247  Register scratch0,
248  Register scratch1,
249  Label* has_color,
250  int first_bit,
251  int second_bit);
252 
253  void JumpIfBlack(Register object,
254  Register scratch0,
255  Register scratch1,
256  Label* on_black);
257 
258  // Checks the color of an object. If the object is already grey or black
259  // then we just fall through, since it is already live. If it is white and
260  // we can determine that it doesn't need to be scanned, then we just mark it
261  // black and fall through. For the rest we jump to the label so the
262  // incremental marker can fix its assumptions.
263  void EnsureNotWhite(Register object,
264  Register scratch1,
265  Register scratch2,
266  Register scratch3,
267  Label* object_is_white_and_not_data);
268 
269  // Detects conservatively whether an object is data-only, i.e. it does need to
270  // be scanned by the garbage collector.
271  void JumpIfDataObject(Register value,
272  Register scratch,
273  Label* not_data_object);
274 
275  // Notify the garbage collector that we wrote a pointer into an object.
276  // |object| is the object being stored into, |value| is the object being
277  // stored. value and scratch registers are clobbered by the operation.
278  // The offset is the offset from the start of the object, not the offset from
279  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
280  void RecordWriteField(
281  Register object,
282  int offset,
283  Register value,
284  Register scratch,
285  LinkRegisterStatus lr_status,
286  SaveFPRegsMode save_fp,
287  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
288  SmiCheck smi_check = INLINE_SMI_CHECK);
289 
290  // As above, but the offset has the tag presubtracted. For use with
291  // MemOperand(reg, off).
293  Register context,
294  int offset,
295  Register value,
296  Register scratch,
297  LinkRegisterStatus lr_status,
298  SaveFPRegsMode save_fp,
299  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
300  SmiCheck smi_check = INLINE_SMI_CHECK) {
301  RecordWriteField(context,
302  offset + kHeapObjectTag,
303  value,
304  scratch,
305  lr_status,
306  save_fp,
307  remembered_set_action,
308  smi_check);
309  }
310 
311  // For a given |object| notify the garbage collector that the slot |address|
312  // has been written. |value| is the object being stored. The value and
313  // address registers are clobbered by the operation.
314  void RecordWrite(
315  Register object,
316  Register address,
317  Register value,
318  LinkRegisterStatus lr_status,
319  SaveFPRegsMode save_fp,
320  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
321  SmiCheck smi_check = INLINE_SMI_CHECK);
322 
323  // Push a handle.
324  void Push(Handle<Object> handle);
325 
326  // Push two registers. Pushes leftmost register first (to highest address).
327  void Push(Register src1, Register src2, Condition cond = al) {
328  ASSERT(!src1.is(src2));
329  if (src1.code() > src2.code()) {
330  stm(db_w, sp, src1.bit() | src2.bit(), cond);
331  } else {
332  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
333  str(src2, MemOperand(sp, 4, NegPreIndex), cond);
334  }
335  }
336 
337  // Push three registers. Pushes leftmost register first (to highest address).
338  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
339  ASSERT(!src1.is(src2));
340  ASSERT(!src2.is(src3));
341  ASSERT(!src1.is(src3));
342  if (src1.code() > src2.code()) {
343  if (src2.code() > src3.code()) {
344  stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
345  } else {
346  stm(db_w, sp, src1.bit() | src2.bit(), cond);
347  str(src3, MemOperand(sp, 4, NegPreIndex), cond);
348  }
349  } else {
350  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
351  Push(src2, src3, cond);
352  }
353  }
354 
355  // Push four registers. Pushes leftmost register first (to highest address).
356  void Push(Register src1,
357  Register src2,
358  Register src3,
359  Register src4,
360  Condition cond = al) {
361  ASSERT(!src1.is(src2));
362  ASSERT(!src2.is(src3));
363  ASSERT(!src1.is(src3));
364  ASSERT(!src1.is(src4));
365  ASSERT(!src2.is(src4));
366  ASSERT(!src3.is(src4));
367  if (src1.code() > src2.code()) {
368  if (src2.code() > src3.code()) {
369  if (src3.code() > src4.code()) {
370  stm(db_w,
371  sp,
372  src1.bit() | src2.bit() | src3.bit() | src4.bit(),
373  cond);
374  } else {
375  stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
376  str(src4, MemOperand(sp, 4, NegPreIndex), cond);
377  }
378  } else {
379  stm(db_w, sp, src1.bit() | src2.bit(), cond);
380  Push(src3, src4, cond);
381  }
382  } else {
383  str(src1, MemOperand(sp, 4, NegPreIndex), cond);
384  Push(src2, src3, src4, cond);
385  }
386  }
387 
388  // Pop two registers. Pops rightmost register first (from lower address).
389  void Pop(Register src1, Register src2, Condition cond = al) {
390  ASSERT(!src1.is(src2));
391  if (src1.code() > src2.code()) {
392  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
393  } else {
394  ldr(src2, MemOperand(sp, 4, PostIndex), cond);
395  ldr(src1, MemOperand(sp, 4, PostIndex), cond);
396  }
397  }
398 
399  // Pop three registers. Pops rightmost register first (from lower address).
400  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
401  ASSERT(!src1.is(src2));
402  ASSERT(!src2.is(src3));
403  ASSERT(!src1.is(src3));
404  if (src1.code() > src2.code()) {
405  if (src2.code() > src3.code()) {
406  ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
407  } else {
408  ldr(src3, MemOperand(sp, 4, PostIndex), cond);
409  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
410  }
411  } else {
412  Pop(src2, src3, cond);
413  str(src1, MemOperand(sp, 4, PostIndex), cond);
414  }
415  }
416 
417  // Pop four registers. Pops rightmost register first (from lower address).
418  void Pop(Register src1,
419  Register src2,
420  Register src3,
421  Register src4,
422  Condition cond = al) {
423  ASSERT(!src1.is(src2));
424  ASSERT(!src2.is(src3));
425  ASSERT(!src1.is(src3));
426  ASSERT(!src1.is(src4));
427  ASSERT(!src2.is(src4));
428  ASSERT(!src3.is(src4));
429  if (src1.code() > src2.code()) {
430  if (src2.code() > src3.code()) {
431  if (src3.code() > src4.code()) {
432  ldm(ia_w,
433  sp,
434  src1.bit() | src2.bit() | src3.bit() | src4.bit(),
435  cond);
436  } else {
437  ldr(src4, MemOperand(sp, 4, PostIndex), cond);
438  ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
439  }
440  } else {
441  Pop(src3, src4, cond);
442  ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
443  }
444  } else {
445  Pop(src2, src3, src4, cond);
446  ldr(src1, MemOperand(sp, 4, PostIndex), cond);
447  }
448  }
449 
450  // Push and pop the registers that can hold pointers, as defined by the
451  // RegList constant kSafepointSavedRegisters.
452  void PushSafepointRegisters();
453  void PopSafepointRegisters();
456  // Store value in register src in the safepoint stack slot for
457  // register dst.
460  // Load the value of the src register from its safepoint stack slot
461  // into register dst.
463 
464  // Load two consecutive registers with two consecutive memory locations.
465  void Ldrd(Register dst1,
466  Register dst2,
467  const MemOperand& src,
468  Condition cond = al);
469 
470  // Store two consecutive registers to two consecutive memory locations.
471  void Strd(Register src1,
472  Register src2,
473  const MemOperand& dst,
474  Condition cond = al);
475 
476  // Clear specified FPSCR bits.
477  void ClearFPSCRBits(const uint32_t bits_to_clear,
478  const Register scratch,
479  const Condition cond = al);
480 
481  // Compare double values and move the result to the normal condition flags.
482  void VFPCompareAndSetFlags(const DwVfpRegister src1,
483  const DwVfpRegister src2,
484  const Condition cond = al);
485  void VFPCompareAndSetFlags(const DwVfpRegister src1,
486  const double src2,
487  const Condition cond = al);
488 
489  // Compare double values and then load the fpscr flags to a register.
490  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
491  const DwVfpRegister src2,
492  const Register fpscr_flags,
493  const Condition cond = al);
494  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
495  const double src2,
496  const Register fpscr_flags,
497  const Condition cond = al);
498 
499  void Vmov(const DwVfpRegister dst,
500  const double imm,
501  const Register scratch = no_reg,
502  const Condition cond = al);
503 
504  // Enter exit frame.
505  // stack_space - extra stack space, used for alignment before call to C.
506  void EnterExitFrame(bool save_doubles, int stack_space = 0);
507 
508  // Leave the current exit frame. Expects the return value in r0.
509  // Expect the number of values, pushed prior to the exit frame, to
510  // remove in a register (or no_reg, if there is nothing to remove).
511  void LeaveExitFrame(bool save_doubles, Register argument_count);
512 
513  // Get the actual activation frame alignment for target environment.
514  static int ActivationFrameAlignment();
515 
516  void LoadContext(Register dst, int context_chain_length);
517 
518  // Conditionally load the cached Array transitioned map of type
519  // transitioned_kind from the native context if the map in register
520  // map_in_out is the cached Array map in the native context of
521  // expected_kind.
523  ElementsKind expected_kind,
524  ElementsKind transitioned_kind,
525  Register map_in_out,
526  Register scratch,
527  Label* no_map_match);
528 
529  // Load the initial map for new Arrays from a JSFunction.
530  void LoadInitialArrayMap(Register function_in,
531  Register scratch,
532  Register map_out,
533  bool can_have_holes);
534 
535  void LoadGlobalFunction(int index, Register function);
536 
537  // Load the initial map from the global function. The registers
538  // function and map can be the same, function is then overwritten.
540  Register map,
541  Register scratch);
542 
544  ExternalReference roots_array_start =
545  ExternalReference::roots_array_start(isolate());
546  mov(kRootRegister, Operand(roots_array_start));
547  }
548 
549  // ---------------------------------------------------------------------------
550  // JavaScript invokes
551 
552  // Set up call kind marking in ecx. The method takes ecx as an
553  // explicit first parameter to make the code more readable at the
554  // call sites.
555  void SetCallKind(Register dst, CallKind kind);
556 
557  // Invoke the JavaScript function code by either calling or jumping.
558  void InvokeCode(Register code,
559  const ParameterCount& expected,
560  const ParameterCount& actual,
562  const CallWrapper& call_wrapper,
563  CallKind call_kind);
564 
565  void InvokeCode(Handle<Code> code,
566  const ParameterCount& expected,
567  const ParameterCount& actual,
568  RelocInfo::Mode rmode,
570  CallKind call_kind);
571 
572  // Invoke the JavaScript function in the given register. Changes the
573  // current context to the context in the function before invoking.
574  void InvokeFunction(Register function,
575  const ParameterCount& actual,
577  const CallWrapper& call_wrapper,
578  CallKind call_kind);
579 
580  void InvokeFunction(Handle<JSFunction> function,
581  const ParameterCount& actual,
583  const CallWrapper& call_wrapper,
584  CallKind call_kind);
585 
586  void IsObjectJSObjectType(Register heap_object,
587  Register map,
588  Register scratch,
589  Label* fail);
590 
592  Register scratch,
593  Label* fail);
594 
595  void IsObjectJSStringType(Register object,
596  Register scratch,
597  Label* fail);
598 
599 #ifdef ENABLE_DEBUGGER_SUPPORT
600  // ---------------------------------------------------------------------------
601  // Debugger Support
602 
603  void DebugBreak();
604 #endif
605 
606  // ---------------------------------------------------------------------------
607  // Exception handling
608 
609  // Push a new try handler and link into try handler chain.
610  void PushTryHandler(StackHandler::Kind kind, int handler_index);
611 
612  // Unlink the stack handler on top of the stack from the try handler chain.
613  // Must preserve the result register.
614  void PopTryHandler();
615 
616  // Passes thrown value to the handler of top of the try handler chain.
617  void Throw(Register value);
618 
619  // Propagates an uncatchable exception to the top of the current JS stack's
620  // handler chain.
621  void ThrowUncatchable(Register value);
622 
623  // ---------------------------------------------------------------------------
624  // Inline caching support
625 
626  // Generate code for checking access rights - used for security checks
627  // on access to global objects across environments. The holder register
628  // is left untouched, whereas both scratch registers are clobbered.
629  void CheckAccessGlobalProxy(Register holder_reg,
630  Register scratch,
631  Label* miss);
632 
633  void GetNumberHash(Register t0, Register scratch);
634 
635  void LoadFromNumberDictionary(Label* miss,
636  Register elements,
637  Register key,
638  Register result,
639  Register t0,
640  Register t1,
641  Register t2);
642 
643 
644  inline void MarkCode(NopMarkerTypes type) {
645  nop(type);
646  }
647 
648  // Check if the given instruction is a 'type' marker.
649  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
650  // These instructions are generated to mark special location in the code,
651  // like some special IC code.
652  static inline bool IsMarkedCode(Instr instr, int type) {
653  ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
654  return IsNop(instr, type);
655  }
656 
657 
658  static inline int GetCodeMarker(Instr instr) {
659  int dst_reg_offset = 12;
660  int dst_mask = 0xf << dst_reg_offset;
661  int src_mask = 0xf;
662  int dst_reg = (instr & dst_mask) >> dst_reg_offset;
663  int src_reg = instr & src_mask;
664  uint32_t non_register_mask = ~(dst_mask | src_mask);
665  uint32_t mov_mask = al | 13 << 21;
666 
667  // Return <n> if we have a mov rn rn, else return -1.
668  int type = ((instr & non_register_mask) == mov_mask) &&
669  (dst_reg == src_reg) &&
670  (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
671  ? src_reg
672  : -1;
673  ASSERT((type == -1) ||
674  ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
675  return type;
676  }
677 
678 
679  // ---------------------------------------------------------------------------
680  // Allocation support
681 
682  // Allocate an object in new space. The object_size is specified
683  // either in bytes or in words if the allocation flag SIZE_IN_WORDS
684  // is passed. If the new space is exhausted control continues at the
685  // gc_required label. The allocated object is returned in result. If
686  // the flag tag_allocated_object is true the result is tagged as as
687  // a heap object. All registers are clobbered also when control
688  // continues at the gc_required label.
689  void AllocateInNewSpace(int object_size,
690  Register result,
691  Register scratch1,
692  Register scratch2,
693  Label* gc_required,
695  void AllocateInNewSpace(Register object_size,
696  Register result,
697  Register scratch1,
698  Register scratch2,
699  Label* gc_required,
701 
702  // Undo allocation in new space. The object passed and objects allocated after
703  // it will no longer be allocated. The caller must make sure that no pointers
704  // are left to the object(s) no longer allocated as they would be invalid when
705  // allocation is undone.
706  void UndoAllocationInNewSpace(Register object, Register scratch);
707 
708 
709  void AllocateTwoByteString(Register result,
710  Register length,
711  Register scratch1,
712  Register scratch2,
713  Register scratch3,
714  Label* gc_required);
715  void AllocateAsciiString(Register result,
716  Register length,
717  Register scratch1,
718  Register scratch2,
719  Register scratch3,
720  Label* gc_required);
722  Register length,
723  Register scratch1,
724  Register scratch2,
725  Label* gc_required);
726  void AllocateAsciiConsString(Register result,
727  Register length,
728  Register scratch1,
729  Register scratch2,
730  Label* gc_required);
732  Register length,
733  Register scratch1,
734  Register scratch2,
735  Label* gc_required);
737  Register length,
738  Register scratch1,
739  Register scratch2,
740  Label* gc_required);
741 
742  // Allocates a heap number or jumps to the gc_required label if the young
743  // space is full and a scavenge is needed. All registers are clobbered also
744  // when control continues at the gc_required label.
745  void AllocateHeapNumber(Register result,
746  Register scratch1,
747  Register scratch2,
748  Register heap_number_map,
749  Label* gc_required,
750  TaggingMode tagging_mode = TAG_RESULT);
752  DwVfpRegister value,
753  Register scratch1,
754  Register scratch2,
755  Register heap_number_map,
756  Label* gc_required);
757 
758  // Copies a fixed number of fields of heap objects from src to dst.
759  void CopyFields(Register dst, Register src, RegList temps, int field_count);
760 
761  // Copies a number of bytes from src to dst. All registers are clobbered. On
762  // exit src and dst will point to the place just after where the last byte was
763  // read or written and length will be zero.
764  void CopyBytes(Register src,
765  Register dst,
766  Register length,
767  Register scratch);
768 
769  // Initialize fields with filler values. Fields starting at |start_offset|
770  // not including end_offset are overwritten with the value in |filler|. At
771  // the end the loop, |start_offset| takes the value of |end_offset|.
772  void InitializeFieldsWithFiller(Register start_offset,
773  Register end_offset,
774  Register filler);
775 
776  // ---------------------------------------------------------------------------
777  // Support functions.
778 
779  // Try to get function prototype of a function and puts the value in
780  // the result register. Checks that the function really is a
781  // function and jumps to the miss label if the fast checks fail. The
782  // function register will be untouched; the other registers may be
783  // clobbered.
784  void TryGetFunctionPrototype(Register function,
785  Register result,
786  Register scratch,
787  Label* miss,
788  bool miss_on_bound_function = false);
789 
790  // Compare object type for heap object. heap_object contains a non-Smi
791  // whose object type should be compared with the given type. This both
792  // sets the flags and leaves the object type in the type_reg register.
793  // It leaves the map in the map register (unless the type_reg and map register
794  // are the same register). It leaves the heap object in the heap_object
795  // register unless the heap_object register is the same register as one of the
796  // other registers.
797  void CompareObjectType(Register heap_object,
798  Register map,
799  Register type_reg,
800  InstanceType type);
801 
802  // Compare instance type in a map. map contains a valid map object whose
803  // object type should be compared with the given type. This both
804  // sets the flags and leaves the object type in the type_reg register.
805  void CompareInstanceType(Register map,
806  Register type_reg,
807  InstanceType type);
808 
809 
810  // Check if a map for a JSObject indicates that the object has fast elements.
811  // Jump to the specified label if it does not.
812  void CheckFastElements(Register map,
813  Register scratch,
814  Label* fail);
815 
816  // Check if a map for a JSObject indicates that the object can have both smi
817  // and HeapObject elements. Jump to the specified label if it does not.
819  Register scratch,
820  Label* fail);
821 
822  // Check if a map for a JSObject indicates that the object has fast smi only
823  // elements. Jump to the specified label if it does not.
825  Register scratch,
826  Label* fail);
827 
828  // Check to see if maybe_number can be stored as a double in
829  // FastDoubleElements. If it can, store it at the index specified by key in
830  // the FastDoubleElements array elements. Otherwise jump to fail, in which
831  // case scratch2, scratch3 and scratch4 are unmodified.
832  void StoreNumberToDoubleElements(Register value_reg,
833  Register key_reg,
834  Register receiver_reg,
835  // All regs below here overwritten.
836  Register elements_reg,
837  Register scratch1,
838  Register scratch2,
839  Register scratch3,
840  Register scratch4,
841  Label* fail);
842 
843  // Compare an object's map with the specified map and its transitioned
844  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
845  // set with result of map compare. If multiple map compares are required, the
846  // compare sequences branches to early_success.
847  void CompareMap(Register obj,
848  Register scratch,
849  Handle<Map> map,
850  Label* early_success,
852 
853  // As above, but the map of the object is already loaded into the register
854  // which is preserved by the code generated.
855  void CompareMap(Register obj_map,
856  Handle<Map> map,
857  Label* early_success,
859 
860  // Check if the map of an object is equal to a specified map and branch to
861  // label if not. Skip the smi check if not required (object is known to be a
862  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
863  // against maps that are ElementsKind transition maps of the specified map.
864  void CheckMap(Register obj,
865  Register scratch,
866  Handle<Map> map,
867  Label* fail,
868  SmiCheckType smi_check_type,
870 
871 
872  void CheckMap(Register obj,
873  Register scratch,
874  Heap::RootListIndex index,
875  Label* fail,
876  SmiCheckType smi_check_type);
877 
878 
879  // Check if the map of an object is equal to a specified map and branch to a
880  // specified target if equal. Skip the smi check if not required (object is
881  // known to be a heap object)
882  void DispatchMap(Register obj,
883  Register scratch,
884  Handle<Map> map,
885  Handle<Code> success,
886  SmiCheckType smi_check_type);
887 
888 
889  // Compare the object in a register to a value from the root list.
890  // Uses the ip register as scratch.
891  void CompareRoot(Register obj, Heap::RootListIndex index);
892 
893 
894  // Load and check the instance type of an object for being a string.
895  // Loads the type into the second argument register.
896  // Returns a condition that will be enabled if the object was a string.
898  Register type) {
901  tst(type, Operand(kIsNotStringMask));
902  ASSERT_EQ(0, kStringTag);
903  return eq;
904  }
905 
906 
907  // Generates code for reporting that an illegal operation has
908  // occurred.
909  void IllegalOperation(int num_arguments);
910 
911  // Picks out an array index from the hash field.
912  // Register use:
913  // hash - holds the index's hash. Clobbered.
914  // index - holds the overwritten index on exit.
915  void IndexFromHash(Register hash, Register index);
916 
917  // Get the number of least significant bits from a register
918  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
919  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
920 
921  // Uses VFP instructions to Convert a Smi to a double.
923  Register outHighReg,
924  Register outLowReg);
925 
926  // Load the value of a number object into a VFP double register. If the object
927  // is not a number a jump to the label not_number is performed and the VFP
928  // double register is unchanged.
930  Register object,
931  DwVfpRegister value,
932  Register scratch1,
933  Register scratch2,
934  Register heap_number_map,
935  SwVfpRegister scratch3,
936  Label* not_number,
938 
939  // Load the value of a smi object into a VFP double register. The register
940  // scratch1 can be the same register as smi in which case smi will hold the
941  // untagged value afterwards.
943  DwVfpRegister value,
944  Register scratch1,
945  SwVfpRegister scratch2);
946 
947  // Convert the HeapNumber pointed to by source to a 32bits signed integer
948  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
949  // to not_int32 label. If VFP3 is available double_scratch is used but not
950  // scratch2.
951  void ConvertToInt32(Register source,
952  Register dest,
953  Register scratch,
954  Register scratch2,
955  DwVfpRegister double_scratch,
956  Label *not_int32);
957 
958  // Truncates a double using a specific rounding mode, and writes the value
959  // to the result register.
960  // Clears the z flag (ne condition) if an overflow occurs.
961  // If kCheckForInexactConversion is passed, the z flag is also cleared if the
962  // conversion was inexact, i.e. if the double value could not be converted
963  // exactly to a 32-bit integer.
964  void EmitVFPTruncate(VFPRoundingMode rounding_mode,
965  Register result,
966  DwVfpRegister double_input,
967  Register scratch,
968  DwVfpRegister double_scratch,
971 
972  // Helper for EmitECMATruncate.
973  // This will truncate a floating-point value outside of the signed 32bit
974  // integer range to a 32bit signed integer.
975  // Expects the double value loaded in input_high and input_low.
976  // Exits with the answer in 'result'.
977  // Note that this code does not work for values in the 32bit range!
979  Register input_high,
980  Register input_low,
981  Register scratch);
982 
983  // Performs a truncating conversion of a floating point number as used by
984  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
985  // Exits with 'result' holding the answer and all other registers clobbered.
986  void EmitECMATruncate(Register result,
987  DwVfpRegister double_input,
988  SwVfpRegister single_scratch,
989  Register scratch,
990  Register scratch2,
991  Register scratch3);
992 
993  // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
994  // instruction. On pre-ARM5 hardware this routine gives the wrong answer
995  // for 0 (31 instead of 32). Source and scratch can be the same in which case
996  // the source is clobbered. Source and zeros can also be the same in which
997  // case scratch should be a different register.
998  void CountLeadingZeros(Register zeros,
999  Register source,
1000  Register scratch);
1001 
1002  // ---------------------------------------------------------------------------
1003  // Runtime calls
1004 
1005  // Call a code stub.
1006  void CallStub(CodeStub* stub, Condition cond = al);
1007 
1008  // Call a code stub.
1009  void TailCallStub(CodeStub* stub, Condition cond = al);
1010 
1011  // Call a runtime routine.
1012  void CallRuntime(const Runtime::Function* f, int num_arguments);
1014 
1015  // Convenience function: Same as above, but takes the fid instead.
1016  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
1017 
1018  // Convenience function: call an external reference.
1019  void CallExternalReference(const ExternalReference& ext,
1020  int num_arguments);
1021 
1022  // Tail call of a runtime routine (jump).
1023  // Like JumpToExternalReference, but also takes care of passing the number
1024  // of parameters.
1025  void TailCallExternalReference(const ExternalReference& ext,
1026  int num_arguments,
1027  int result_size);
1028 
1029  // Convenience function: tail call a runtime routine (jump).
1031  int num_arguments,
1032  int result_size);
1033 
1034  int CalculateStackPassedWords(int num_reg_arguments,
1035  int num_double_arguments);
1036 
1037  // Before calling a C-function from generated code, align arguments on stack.
1038  // After aligning the frame, non-register arguments must be stored in
1039  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
1040  // are word sized. If double arguments are used, this function assumes that
1041  // all double arguments are stored before core registers; otherwise the
1042  // correct alignment of the double values is not guaranteed.
1043  // Some compilers/platforms require the stack to be aligned when calling
1044  // C++ code.
1045  // Needs a scratch register to do some arithmetic. This register will be
1046  // trashed.
1047  void PrepareCallCFunction(int num_reg_arguments,
1048  int num_double_registers,
1049  Register scratch);
1050  void PrepareCallCFunction(int num_reg_arguments,
1051  Register scratch);
1052 
1053  // There are two ways of passing double arguments on ARM, depending on
1054  // whether soft or hard floating point ABI is used. These functions
1055  // abstract parameter passing for the three different ways we call
1056  // C functions from generated code.
1060 
1061  // Calls a C function and cleans up the space for arguments allocated
1062  // by PrepareCallCFunction. The called function is not allowed to trigger a
1063  // garbage collection, since that might move the code and invalidate the
1064  // return address (unless this is somehow accounted for by the called
1065  // function).
1066  void CallCFunction(ExternalReference function, int num_arguments);
1067  void CallCFunction(Register function, int num_arguments);
1068  void CallCFunction(ExternalReference function,
1069  int num_reg_arguments,
1070  int num_double_arguments);
1071  void CallCFunction(Register function,
1072  int num_reg_arguments,
1073  int num_double_arguments);
1074 
1075  void GetCFunctionDoubleResult(const DoubleRegister dst);
1076 
1077  // Calls an API function. Allocates HandleScope, extracts returned value
1078  // from handle and propagates exceptions. Restores context. stack_space
1079  // - space to be unwound on exit (includes the call JS arguments space and
1080  // the additional space allocated for the fast call).
1081  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
1082 
1083  // Jump to a runtime routine.
1084  void JumpToExternalReference(const ExternalReference& builtin);
1085 
1086  // Invoke specified builtin JavaScript function. Adds an entry to
1087  // the unresolved list if the name does not resolve.
1089  InvokeFlag flag,
1090  const CallWrapper& call_wrapper = NullCallWrapper());
1091 
1092  // Store the code object for the given builtin in the target register and
1093  // setup the function in r1.
1095 
1096  // Store the function for the given builtin in the target register.
1098 
1100  ASSERT(!code_object_.is_null());
1101  return code_object_;
1102  }
1103 
1104 
1105  // ---------------------------------------------------------------------------
1106  // StatsCounter support
1107 
1108  void SetCounter(StatsCounter* counter, int value,
1109  Register scratch1, Register scratch2);
1110  void IncrementCounter(StatsCounter* counter, int value,
1111  Register scratch1, Register scratch2);
1112  void DecrementCounter(StatsCounter* counter, int value,
1113  Register scratch1, Register scratch2);
1114 
1115 
1116  // ---------------------------------------------------------------------------
1117  // Debugging
1118 
1119  // Calls Abort(msg) if the condition cond is not satisfied.
1120  // Use --debug_code to enable.
1121  void Assert(Condition cond, const char* msg);
1123  void AssertFastElements(Register elements);
1124 
1125  // Like Assert(), but always enabled.
1126  void Check(Condition cond, const char* msg);
1127 
1128  // Print a message to stdout and abort execution.
1129  void Abort(const char* msg);
1130 
1131  // Verify restrictions about code generated in stubs.
1132  void set_generating_stub(bool value) { generating_stub_ = value; }
1133  bool generating_stub() { return generating_stub_; }
1134  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1135  bool allow_stub_calls() { return allow_stub_calls_; }
1136  void set_has_frame(bool value) { has_frame_ = value; }
1137  bool has_frame() { return has_frame_; }
1138  inline bool AllowThisStubCall(CodeStub* stub);
1139 
1140  // EABI variant for double arguments in use.
1142 #if USE_EABI_HARDFLOAT
1143  return true;
1144 #else
1145  return false;
1146 #endif
1147  }
1148 
1149  // ---------------------------------------------------------------------------
1150  // Number utilities
1151 
1152  // Check whether the value of reg is a power of two and not zero. If not
1153  // control continues at the label not_power_of_two. If reg is a power of two
1154  // the register scratch contains the value of (reg - 1) when control falls
1155  // through.
1157  Register scratch,
1158  Label* not_power_of_two_or_zero);
1159  // Check whether the value of reg is a power of two and not zero.
1160  // Control falls through if it is, with scratch containing the mask
1161  // value (reg - 1).
1162  // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1163  // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1164  // strictly positive but not a power of two.
1166  Register scratch,
1167  Label* zero_and_neg,
1168  Label* not_power_of_two);
1169 
1170  // ---------------------------------------------------------------------------
1171  // Smi utilities
1172 
1173  void SmiTag(Register reg, SBit s = LeaveCC) {
1174  add(reg, reg, Operand(reg), s);
1175  }
1176  void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
1177  add(dst, src, Operand(src), s);
1178  }
1179 
1180  // Try to convert int32 to smi. If the value is to large, preserve
1181  // the original value and jump to not_a_smi. Destroys scratch and
1182  // sets flags.
1183  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
1184  mov(scratch, reg);
1185  SmiTag(scratch, SetCC);
1186  b(vs, not_a_smi);
1187  mov(reg, scratch);
1188  }
1189 
1190  void SmiUntag(Register reg, SBit s = LeaveCC) {
1191  mov(reg, Operand(reg, ASR, kSmiTagSize), s);
1192  }
1193  void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
1194  mov(dst, Operand(src, ASR, kSmiTagSize), s);
1195  }
1196 
1197  // Untag the source value into destination and jump if source is a smi.
1198  // Souce and destination can be the same register.
1199  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1200 
1201  // Untag the source value into destination and jump if source is not a smi.
1202  // Souce and destination can be the same register.
1203  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1204 
1205  // Jump the register contains a smi.
1206  inline void JumpIfSmi(Register value, Label* smi_label) {
1207  tst(value, Operand(kSmiTagMask));
1208  b(eq, smi_label);
1209  }
1210  // Jump if either of the registers contain a non-smi.
1211  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1212  tst(value, Operand(kSmiTagMask));
1213  b(ne, not_smi_label);
1214  }
1215  // Jump if either of the registers contain a non-smi.
1216  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1217  // Jump if either of the registers contain a smi.
1218  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1219 
1220  // Abort execution if argument is a smi, enabled via --debug-code.
1221  void AssertNotSmi(Register object);
1222  void AssertSmi(Register object);
1223 
1224  // Abort execution if argument is a string, enabled via --debug-code.
1225  void AssertString(Register object);
1226 
1227  // Abort execution if argument is not the root value with the given index,
1228  // enabled via --debug-code.
1229  void AssertRootValue(Register src,
1230  Heap::RootListIndex root_value_index,
1231  const char* message);
1232 
1233  // ---------------------------------------------------------------------------
1234  // HeapNumber utilities
1235 
1236  void JumpIfNotHeapNumber(Register object,
1237  Register heap_number_map,
1238  Register scratch,
1239  Label* on_not_heap_number);
1240 
1241  // ---------------------------------------------------------------------------
1242  // String utilities
1243 
1244  // Checks if both objects are sequential ASCII strings and jumps to label
1245  // if either is not. Assumes that neither object is a smi.
1247  Register object2,
1248  Register scratch1,
1249  Register scratch2,
1250  Label* failure);
1251 
1252  // Checks if both objects are sequential ASCII strings and jumps to label
1253  // if either is not.
1255  Register second,
1256  Register scratch1,
1257  Register scratch2,
1258  Label* not_flat_ascii_strings);
1259 
1260  // Checks if both instance types are sequential ASCII strings and jumps to
1261  // label if either is not.
1263  Register first_object_instance_type,
1264  Register second_object_instance_type,
1265  Register scratch1,
1266  Register scratch2,
1267  Label* failure);
1268 
1269  // Check if instance type is sequential ASCII string and jump to label if
1270  // it is not.
1272  Register scratch,
1273  Label* failure);
1274 
1275 
1276  // ---------------------------------------------------------------------------
1277  // Patching helpers.
1278 
1279  // Get the location of a relocated constant (its address in the constant pool)
1280  // from its load site.
1281  void GetRelocatedValueLocation(Register ldr_location,
1282  Register result);
1283 
1284 
1285  void ClampUint8(Register output_reg, Register input_reg);
1286 
1287  void ClampDoubleToUint8(Register result_reg,
1288  DoubleRegister input_reg,
1289  DoubleRegister temp_double_reg);
1290 
1291 
1292  void LoadInstanceDescriptors(Register map, Register descriptors);
1293  void EnumLength(Register dst, Register map);
1294  void NumberOfOwnDescriptors(Register dst, Register map);
1295 
1296  template<typename Field>
1297  void DecodeField(Register reg) {
1298  static const int shift = Field::kShift;
1299  static const int mask = (Field::kMask >> shift) << kSmiTagSize;
1300  mov(reg, Operand(reg, LSR, shift));
1301  and_(reg, reg, Operand(mask));
1302  }
1303 
1304  // Activation support.
1305  void EnterFrame(StackFrame::Type type);
1306  void LeaveFrame(StackFrame::Type type);
1307 
1308  // Expects object in r0 and returns map with validated enum cache
1309  // in r0. Assumes that any other register can be used as a scratch.
1310  void CheckEnumCache(Register null_value, Label* call_runtime);
1311 
1312  private:
1313  void CallCFunctionHelper(Register function,
1314  int num_reg_arguments,
1315  int num_double_arguments);
1316 
1317  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1318 
1319  // Helper functions for generating invokes.
1320  void InvokePrologue(const ParameterCount& expected,
1321  const ParameterCount& actual,
1322  Handle<Code> code_constant,
1323  Register code_reg,
1324  Label* done,
1325  bool* definitely_mismatches,
1326  InvokeFlag flag,
1327  const CallWrapper& call_wrapper,
1328  CallKind call_kind);
1329 
1330  void InitializeNewString(Register string,
1331  Register length,
1332  Heap::RootListIndex map_index,
1333  Register scratch1,
1334  Register scratch2);
1335 
1336  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1337  void InNewSpace(Register object,
1338  Register scratch,
1339  Condition cond, // eq for new space, ne otherwise.
1340  Label* branch);
1341 
1342  // Helper for finding the mark bits for an address. Afterwards, the
1343  // bitmap register points at the word with the mark bits and the mask
1344  // the position of the first bit. Leaves addr_reg unchanged.
1345  inline void GetMarkBits(Register addr_reg,
1346  Register bitmap_reg,
1347  Register mask_reg);
1348 
1349  // Helper for throwing exceptions. Compute a handler address and jump to
1350  // it. See the implementation for register usage.
1351  void JumpToHandlerEntry();
1352 
1353  // Compute memory operands for safepoint stack slots.
1354  static int SafepointRegisterStackIndex(int reg_code);
1355  MemOperand SafepointRegisterSlot(Register reg);
1356  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1357 
1358  bool generating_stub_;
1359  bool allow_stub_calls_;
1360  bool has_frame_;
1361  // This handle will be patched with the code object on installation.
1362  Handle<Object> code_object_;
1363 
1364  // Needs access to SafepointRegisterStackIndex for optimized frame
1365  // traversal.
1366  friend class OptimizedFrame;
1367 };
1368 
1369 
1370 // The code patcher is used to patch (typically) small parts of code e.g. for
1371 // debugging and other types of instrumentation. When using the code patcher
1372 // the exact number of bytes specified must be emitted. It is not legal to emit
1373 // relocation information. If any of these constraints are violated it causes
1374 // an assertion to fail.
1376  public:
1377  CodePatcher(byte* address, int instructions);
1378  virtual ~CodePatcher();
1379 
1380  // Macro assembler to emit code.
1381  MacroAssembler* masm() { return &masm_; }
1382 
1383  // Emit an instruction directly.
1384  void Emit(Instr instr);
1385 
1386  // Emit an address directly.
1387  void Emit(Address addr);
1388 
1389  // Emit the condition part of an instruction leaving the rest of the current
1390  // instruction unchanged.
1391  void EmitCondition(Condition cond);
1392 
1393  private:
1394  byte* address_; // The address of the code being patched.
1395  int instructions_; // Number of instructions of the expected patch size.
1396  int size_; // Number of bytes of the expected patch size.
1397  MacroAssembler masm_; // Macro assembler used to generate the code.
1398 };
1399 
1400 
1401 // -----------------------------------------------------------------------------
1402 // Static helper functions.
1403 
1404 inline MemOperand ContextOperand(Register context, int index) {
1405  return MemOperand(context, Context::SlotOffset(index));
1406 }
1407 
1408 
1411 }
1412 
1413 
1414 #ifdef GENERATED_CODE_COVERAGE
1415 #define CODE_COVERAGE_STRINGIFY(x) #x
1416 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1417 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1418 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1419 #else
1420 #define ACCESS_MASM(masm) masm->
1421 #endif
1422 
1423 
1424 } } // namespace v8::internal
1425 
1426 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
byte * Address
Definition: globals.h:157
void CallRuntime(const Runtime::Function *f, int num_arguments)
void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, DoubleRegister temp_double_reg)
void Push(Handle< Object > handle)
void SmiUntag(Register reg, SBit s=LeaveCC)
void IntegerToDoubleConversionWithVFP3(Register inReg, Register outHighReg, Register outLowReg)
void ClampUint8(Register output_reg, Register input_reg)
Isolate * isolate() const
Definition: assembler.h:61
const intptr_t kSmiTagMask
Definition: v8.h:4016
void Assert(Condition cond, const char *msg)
static int SlotOffset(int index)
Definition: contexts.h:425
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
void GetRelocatedValueLocation(Register ldr_location, Register result)
void GetCFunctionDoubleResult(const DoubleRegister dst)
void SmiTag(Register reg, SBit s=LeaveCC)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void mov(Register rd, Register rt)
void IncrementalMarkingRecordWriteHelper(Register object, Register value, Register address)
const Register cp
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Bfc(Register dst, Register src, int lsb, int width, Condition cond=al)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch)
void LeaveExitFrame(bool save_doubles, Register argument_count)
void AssertString(Register object)
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
static TypeFeedbackId None()
Definition: utils.h:999
void JumpToExternalReference(const ExternalReference &builtin)
void UntagAndJumpIfSmi(Register dst, Register src, Label *smi_case)
void LoadInstanceDescriptors(Register map, Register descriptors)
void ObjectToDoubleVFPRegister(Register object, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, SwVfpRegister scratch3, Label *not_number, ObjectToDoubleFlags flags=NO_OBJECT_TO_DOUBLE_FLAGS)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void TrySmiTag(Register reg, Label *not_a_smi, Register scratch)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void SetCallCDoubleArguments(DoubleRegister dreg)
void CountLeadingZeros(Register zeros, Register source, Register scratch)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void b(int branch_offset, Condition cond=al)
void JumpIfSmi(Register value, Label *smi_label)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
bool AllowThisStubCall(CodeStub *stub)
uint32_t RegList
Definition: frames.h:38
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, Label *not_power_of_two_or_zero)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void Bfi(Register dst, Register src, Register scratch, int lsb, int width, Condition cond=al)
void Swap(Register reg1, Register reg2, Register scratch=no_reg, Condition cond=al)
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void SmiTag(Register dst, Register src, SBit s=LeaveCC)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static bool IsMarkedCode(Instr instr, int type)
void NumberOfOwnDescriptors(Register dst, Register map)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Abort(const char *msg)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void MarkCode(NopMarkerTypes type)
MemOperand ContextOperand(Register context, int index)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type, CompareMapMode mode=REQUIRE_EXACT_MAP)
void CopyFields(Register dst, Register src, RegList temps, int field_count)
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void Pop(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst)
const Register kRootRegister
uint8_t byte
Definition: globals.h:156
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void Pop(Register src1, Register src2, Condition cond=al)
void EmitVFPTruncate(VFPRoundingMode rounding_mode, Register result, DwVfpRegister double_input, Register scratch, DwVfpRegister double_scratch, CheckForInexactConversion check=kDontCheckForInexactConversion)
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
const Register sp
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void EnumLength(Register dst, Register map)
static int ActivationFrameAlignment()
void SmiUntag(Register dst, Register src, SBit s=LeaveCC)
void LeaveFrame(StackFrame::Type type)
void CheckFastElements(Register map, Register scratch, Label *fail)
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
void Emit(Instr instr)
void EmitECMATruncate(Register result, DwVfpRegister double_input, SwVfpRegister single_scratch, Register scratch, Register scratch2, Register scratch3)
void CallCFunction(ExternalReference function, int num_arguments)
Condition IsObjectStringType(Register obj, Register type)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:4009
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void LoadHeapObject(Register dst, Handle< HeapObject > object)
void Throw(Register value)
void ConvertToInt32(Register source, Register dest, Register scratch, Register scratch2, DwVfpRegister double_scratch, Label *not_int32)
void Move(Register dst, Handle< Object > value)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void str(Register src, const MemOperand &dst, Condition cond=al)
void Push(Register src1, Register src2, Register src3, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:456
void LoadContext(Register dst, int context_chain_length)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static int CallSize(Register target, Condition cond=al)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Label *fail)
void AssertFastElements(Register elements)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits)
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void Drop(int count, Condition cond=al)
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void IllegalOperation(int num_arguments)
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CallApiFunctionAndReturn(ExternalReference function, int stack_space)
void Ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
static const int kMapOffset
Definition: objects.h:1261
bool is(Register reg) const
const uint32_t kIsNotStringMask
Definition: objects.h:455
void LoadObject(Register result, Handle< Object > object)
void ClearFPSCRBits(const uint32_t bits_to_clear, const Register scratch, const Condition cond=al)
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond=al)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch, Label *zero_and_neg, Label *not_power_of_two)
bool is_null() const
Definition: handles.h:87
void ThrowUncatchable(Register value)
void SmiToDoubleVFPRegister(Register smi, DwVfpRegister value, Register scratch1, SwVfpRegister scratch2)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void AllocateInNewSpace(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
MemOperand FieldMemOperand(Register object, int offset)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success, CompareMapMode mode=REQUIRE_EXACT_MAP)
void EmitOutOfInt32RangeTruncate(Register result, Register input_high, Register input_low, Register scratch)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
const int kSmiTagSize
Definition: v8.h:4015
static int GetCodeMarker(Instr instr)
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, Register object2, Register scratch1, Register scratch2, Label *failure)
void EmitCondition(Condition cond)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void UndoAllocationInNewSpace(Register object, Register scratch)
void Push(Register src1, Register src2, Register src3, Register src4, Condition cond=al)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void Call(Register target, Condition cond=al)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Check(Condition cond, const char *msg)
void LoadInitialArrayMap(Register function_in, Register scratch, Register map_out, bool can_have_holes)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void AssertRootValue(Register src, Heap::RootListIndex root_value_index, const char *message)
static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond=al)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void Pop(Register src1, Register src2, Register src3, Condition cond=al)
void TailCallStub(CodeStub *stub, Condition cond=al)
CodePatcher(byte *address, int instructions)
void UntagAndJumpIfNotSmi(Register dst, Register src, Label *non_smi_case)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
Operand SmiUntagOperand(Register object)
int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void CallStub(CodeStub *stub, Condition cond=al)
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void SetCallKind(Register dst, CallKind kind)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void check(i::Vector< const char > string)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void Usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void Push(Register src1, Register src2, Condition cond=al)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void tst(Register src1, const Operand &src2, Condition cond=al)
void Strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
void Vmov(const DwVfpRegister dst, const double imm, const Register scratch=no_reg, const Condition cond=al)
void EnterFrame(StackFrame::Type type)
void CheckEnumCache(Register null_value, Label *call_runtime)
static const int kInstanceTypeOffset
Definition: objects.h:5158
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923