v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-ia32.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
29 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Convenience for platform-independent signatures. We do not normally
39 // distinguish memory operands from other operands on ia32.
41 
44 
45 
49 };
50 
51 
52 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
53 
54 
55 // MacroAssembler implements a collection of frequently used macros.
56 class MacroAssembler: public Assembler {
57  public:
58  // The isolate parameter can be NULL if the macro assembler should
59  // not use isolate-dependent functionality. In this case, it's the
60  // responsibility of the caller to never invoke such function on the
61  // macro assembler.
62  MacroAssembler(Isolate* isolate, void* buffer, int size);
63 
64  void Load(Register dst, const Operand& src, Representation r);
65  void Store(Register src, const Operand& dst, Representation r);
66 
67  // Operations on roots in the root-array.
68  void LoadRoot(Register destination, Heap::RootListIndex index);
69  void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
70  void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
71  // These methods can only be used with constant roots (i.e. non-writable
72  // and not in new space).
73  void CompareRoot(Register with, Heap::RootListIndex index);
74  void CompareRoot(const Operand& with, Heap::RootListIndex index);
75 
76  // ---------------------------------------------------------------------------
77  // GC Support
81  };
82 
83  // Record in the remembered set the fact that we have a pointer to new space
84  // at the address pointed to by the addr register. Only works if addr is not
85  // in new space.
86  void RememberedSetHelper(Register object, // Used for debug code.
87  Register addr,
88  Register scratch,
89  SaveFPRegsMode save_fp,
90  RememberedSetFinalAction and_then);
91 
92  void CheckPageFlag(Register object,
93  Register scratch,
94  int mask,
95  Condition cc,
96  Label* condition_met,
97  Label::Distance condition_met_distance = Label::kFar);
98 
101  int mask,
102  Condition cc,
103  Label* condition_met,
104  Label::Distance condition_met_distance = Label::kFar);
105 
107  Register scratch,
108  Label* if_deprecated);
109 
110  // Check if object is in new space. Jumps if the object is not in new space.
111  // The register scratch can be object itself, but scratch will be clobbered.
113  Register scratch,
114  Label* branch,
115  Label::Distance distance = Label::kFar) {
116  InNewSpace(object, scratch, zero, branch, distance);
117  }
118 
119  // Check if object is in new space. Jumps if the object is in new space.
120  // The register scratch can be object itself, but it will be clobbered.
122  Register scratch,
123  Label* branch,
124  Label::Distance distance = Label::kFar) {
125  InNewSpace(object, scratch, not_zero, branch, distance);
126  }
127 
128  // Check if an object has a given incremental marking color. Also uses ecx!
129  void HasColor(Register object,
130  Register scratch0,
131  Register scratch1,
132  Label* has_color,
133  Label::Distance has_color_distance,
134  int first_bit,
135  int second_bit);
136 
137  void JumpIfBlack(Register object,
138  Register scratch0,
139  Register scratch1,
140  Label* on_black,
141  Label::Distance on_black_distance = Label::kFar);
142 
143  // Checks the color of an object. If the object is already grey or black
144  // then we just fall through, since it is already live. If it is white and
145  // we can determine that it doesn't need to be scanned, then we just mark it
146  // black and fall through. For the rest we jump to the label so the
147  // incremental marker can fix its assumptions.
148  void EnsureNotWhite(Register object,
149  Register scratch1,
150  Register scratch2,
151  Label* object_is_white_and_not_data,
152  Label::Distance distance);
153 
154  // Notify the garbage collector that we wrote a pointer into an object.
155  // |object| is the object being stored into, |value| is the object being
156  // stored. value and scratch registers are clobbered by the operation.
157  // The offset is the offset from the start of the object, not the offset from
158  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
159  void RecordWriteField(
160  Register object,
161  int offset,
162  Register value,
163  Register scratch,
164  SaveFPRegsMode save_fp,
165  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
166  SmiCheck smi_check = INLINE_SMI_CHECK);
167 
168  // As above, but the offset has the tag presubtracted. For use with
169  // Operand(reg, off).
171  Register context,
172  int offset,
173  Register value,
174  Register scratch,
175  SaveFPRegsMode save_fp,
176  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
177  SmiCheck smi_check = INLINE_SMI_CHECK) {
178  RecordWriteField(context,
179  offset + kHeapObjectTag,
180  value,
181  scratch,
182  save_fp,
183  remembered_set_action,
184  smi_check);
185  }
186 
187  // Notify the garbage collector that we wrote a pointer into a fixed array.
188  // |array| is the array being stored into, |value| is the
189  // object being stored. |index| is the array index represented as a
190  // Smi. All registers are clobbered by the operation RecordWriteArray
191  // filters out smis so it does not update the write barrier if the
192  // value is a smi.
193  void RecordWriteArray(
194  Register array,
195  Register value,
196  Register index,
197  SaveFPRegsMode save_fp,
198  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
199  SmiCheck smi_check = INLINE_SMI_CHECK);
200 
201  // For page containing |object| mark region covering |address|
202  // dirty. |object| is the object being stored into, |value| is the
203  // object being stored. The address and value registers are clobbered by the
204  // operation. RecordWrite filters out smis so it does not update the
205  // write barrier if the value is a smi.
206  void RecordWrite(
207  Register object,
208  Register address,
209  Register value,
210  SaveFPRegsMode save_fp,
211  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
212  SmiCheck smi_check = INLINE_SMI_CHECK);
213 
214  // For page containing |object| mark the region covering the object's map
215  // dirty. |object| is the object being stored into, |map| is the Map object
216  // that was stored.
217  void RecordWriteForMap(
218  Register object,
220  Register scratch1,
221  Register scratch2,
222  SaveFPRegsMode save_fp);
223 
224 #ifdef ENABLE_DEBUGGER_SUPPORT
225  // ---------------------------------------------------------------------------
226  // Debugger Support
227 
228  void DebugBreak();
229 #endif
230 
231  // Generates function and stub prologue code.
232  void Prologue(PrologueFrameMode frame_mode);
233 
234  // Enter specific kind of exit frame. Expects the number of
235  // arguments in register eax and sets up the number of arguments in
236  // register edi and the pointer to the first argument in register
237  // esi.
238  void EnterExitFrame(bool save_doubles);
239 
240  void EnterApiExitFrame(int argc);
241 
242  // Leave the current exit frame. Expects the return value in
243  // register eax:edx (untouched) and the pointer to the first
244  // argument in register esi.
245  void LeaveExitFrame(bool save_doubles);
246 
247  // Leave the current exit frame. Expects the return value in
248  // register eax (untouched).
249  void LeaveApiExitFrame(bool restore_context);
250 
251  // Find the function context up the context chain.
252  void LoadContext(Register dst, int context_chain_length);
253 
254  // Conditionally load the cached Array transitioned map of type
255  // transitioned_kind from the native context if the map in register
256  // map_in_out is the cached Array map in the native context of
257  // expected_kind.
259  ElementsKind expected_kind,
260  ElementsKind transitioned_kind,
261  Register map_in_out,
262  Register scratch,
263  Label* no_map_match);
264 
265  // Load the global function with the given index.
266  void LoadGlobalFunction(int index, Register function);
267 
268  // Load the initial map from the global function. The registers
269  // function and map can be the same.
271 
272  // Push and pop the registers that can hold pointers.
275  // Store the value in register/immediate src in the safepoint
276  // register stack slot for register dst.
278  void StoreToSafepointRegisterSlot(Register dst, Immediate src);
280 
281  void LoadHeapObject(Register result, Handle<HeapObject> object);
282  void CmpHeapObject(Register reg, Handle<HeapObject> object);
283  void PushHeapObject(Handle<HeapObject> object);
284 
285  void LoadObject(Register result, Handle<Object> object) {
286  AllowDeferredHandleDereference heap_object_check;
287  if (object->IsHeapObject()) {
288  LoadHeapObject(result, Handle<HeapObject>::cast(object));
289  } else {
290  Move(result, Immediate(object));
291  }
292  }
293 
294  void CmpObject(Register reg, Handle<Object> object) {
295  AllowDeferredHandleDereference heap_object_check;
296  if (object->IsHeapObject()) {
298  } else {
299  cmp(reg, Immediate(object));
300  }
301  }
302 
303  // ---------------------------------------------------------------------------
304  // JavaScript invokes
305 
306  // Invoke the JavaScript function code by either calling or jumping.
308  const ParameterCount& expected,
309  const ParameterCount& actual,
311  const CallWrapper& call_wrapper) {
312  InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
313  }
314 
315  void InvokeCode(const Operand& code,
316  const ParameterCount& expected,
317  const ParameterCount& actual,
319  const CallWrapper& call_wrapper);
320 
321  // Invoke the JavaScript function in the given register. Changes the
322  // current context to the context in the function before invoking.
323  void InvokeFunction(Register function,
324  const ParameterCount& actual,
326  const CallWrapper& call_wrapper);
327 
328  void InvokeFunction(Register function,
329  const ParameterCount& expected,
330  const ParameterCount& actual,
332  const CallWrapper& call_wrapper);
333 
334  void InvokeFunction(Handle<JSFunction> function,
335  const ParameterCount& expected,
336  const ParameterCount& actual,
338  const CallWrapper& call_wrapper);
339 
340  // Invoke specified builtin JavaScript function. Adds an entry to
341  // the unresolved list if the name does not resolve.
344  const CallWrapper& call_wrapper = NullCallWrapper());
345 
346  // Store the function for the given builtin in the target register.
348 
349  // Store the code object for the given builtin in the target register.
351 
352  // Expression support
353  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
354  // hinders register renaming and makes dependence chains longer. So we use
355  // xorps to clear the dst register before cvtsi2sd to solve this issue.
356  void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
357  void Cvtsi2sd(XMMRegister dst, const Operand& src);
358 
359  // Support for constant splitting.
360  bool IsUnsafeImmediate(const Immediate& x);
361  void SafeMove(Register dst, const Immediate& x);
362  void SafePush(const Immediate& x);
363 
364  // Compare object type for heap object.
365  // Incoming register is heap_object and outgoing register is map.
366  void CmpObjectType(Register heap_object, InstanceType type, Register map);
367 
368  // Compare instance type for map.
370 
371  // Check if a map for a JSObject indicates that the object has fast elements.
372  // Jump to the specified label if it does not.
374  Label* fail,
375  Label::Distance distance = Label::kFar);
376 
377  // Check if a map for a JSObject indicates that the object can have both smi
378  // and HeapObject elements. Jump to the specified label if it does not.
380  Label* fail,
381  Label::Distance distance = Label::kFar);
382 
383  // Check if a map for a JSObject indicates that the object has fast smi only
384  // elements. Jump to the specified label if it does not.
386  Label* fail,
387  Label::Distance distance = Label::kFar);
388 
389  // Check to see if maybe_number can be stored as a double in
390  // FastDoubleElements. If it can, store it at the index specified by key in
391  // the FastDoubleElements array elements, otherwise jump to fail.
392  void StoreNumberToDoubleElements(Register maybe_number,
393  Register elements,
394  Register key,
395  Register scratch1,
396  XMMRegister scratch2,
397  Label* fail,
398  bool specialize_for_processor,
399  int offset = 0);
400 
401  // Compare an object's map with the specified map.
403 
404  // Check if the map of an object is equal to a specified map and branch to
405  // label if not. Skip the smi check if not required (object is known to be a
406  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
407  // against maps that are ElementsKind transition maps of the specified map.
408  void CheckMap(Register obj,
410  Label* fail,
411  SmiCheckType smi_check_type);
412 
413  // Check if the map of an object is equal to a specified map and branch to a
414  // specified target if equal. Skip the smi check if not required (object is
415  // known to be a heap object)
416  void DispatchMap(Register obj,
417  Register unused,
419  Handle<Code> success,
420  SmiCheckType smi_check_type);
421 
422  // Check if the object in register heap_object is a string. Afterwards the
423  // register map contains the object map and the register instance_type
424  // contains the instance_type. The registers map and instance_type can be the
425  // same in which case it contains the instance type afterwards. Either of the
426  // registers map and instance_type can be the same as heap_object.
428  Register map,
429  Register instance_type);
430 
431  // Check if the object in register heap_object is a name. Afterwards the
432  // register map contains the object map and the register instance_type
433  // contains the instance_type. The registers map and instance_type can be the
434  // same in which case it contains the instance type afterwards. Either of the
435  // registers map and instance_type can be the same as heap_object.
436  Condition IsObjectNameType(Register heap_object,
437  Register map,
438  Register instance_type);
439 
440  // Check if a heap object's type is in the JSObject range, not including
441  // JSFunction. The object's map will be loaded in the map register.
442  // Any or all of the three registers may be the same.
443  // The contents of the scratch register will always be overwritten.
444  void IsObjectJSObjectType(Register heap_object,
445  Register map,
446  Register scratch,
447  Label* fail);
448 
449  // The contents of the scratch register will be overwritten.
450  void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
451 
452  // FCmp is similar to integer cmp, but requires unsigned
453  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
454  void FCmp();
455 
456  void ClampUint8(Register reg);
457 
458  void ClampDoubleToUint8(XMMRegister input_reg,
459  XMMRegister scratch_reg,
460  Register result_reg);
461 
462  void SlowTruncateToI(Register result_reg, Register input_reg,
464 
465  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
466  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
467  void TruncateX87TOSToI(Register result_reg);
468 
469  void DoubleToI(Register result_reg, XMMRegister input_reg,
470  XMMRegister scratch, MinusZeroMode minus_zero_mode,
471  Label* conversion_failed, Label::Distance dst = Label::kFar);
472  void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
473  Label* conversion_failed, Label::Distance dst = Label::kFar);
474 
475  void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
476  MinusZeroMode minus_zero_mode, Label* lost_precision);
477 
478  // Smi tagging support.
479  void SmiTag(Register reg) {
480  STATIC_ASSERT(kSmiTag == 0);
482  add(reg, reg);
483  }
484  void SmiUntag(Register reg) {
485  sar(reg, kSmiTagSize);
486  }
487 
488  // Modifies the register even if it does not contain a Smi!
489  void SmiUntag(Register reg, Label* is_smi) {
491  sar(reg, kSmiTagSize);
492  STATIC_ASSERT(kSmiTag == 0);
493  j(not_carry, is_smi);
494  }
495 
496  void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
497  void LoadUint32NoSSE2(Register src);
498 
499  // Jump the register contains a smi.
500  inline void JumpIfSmi(Register value,
501  Label* smi_label,
502  Label::Distance distance = Label::kFar) {
503  test(value, Immediate(kSmiTagMask));
504  j(zero, smi_label, distance);
505  }
506  // Jump if the operand is a smi.
507  inline void JumpIfSmi(Operand value,
508  Label* smi_label,
509  Label::Distance distance = Label::kFar) {
510  test(value, Immediate(kSmiTagMask));
511  j(zero, smi_label, distance);
512  }
513  // Jump if register contain a non-smi.
514  inline void JumpIfNotSmi(Register value,
515  Label* not_smi_label,
516  Label::Distance distance = Label::kFar) {
517  test(value, Immediate(kSmiTagMask));
518  j(not_zero, not_smi_label, distance);
519  }
520 
521  void LoadInstanceDescriptors(Register map, Register descriptors);
522  void EnumLength(Register dst, Register map);
524 
525  template<typename Field>
526  void DecodeField(Register reg) {
527  static const int shift = Field::kShift;
528  static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
529  sar(reg, shift);
530  and_(reg, Immediate(mask));
531  }
532  void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
533 
534  // Abort execution if argument is not a number, enabled via --debug-code.
535  void AssertNumber(Register object);
536 
537  // Abort execution if argument is not a smi, enabled via --debug-code.
538  void AssertSmi(Register object);
539 
540  // Abort execution if argument is a smi, enabled via --debug-code.
541  void AssertNotSmi(Register object);
542 
543  // Abort execution if argument is not a string, enabled via --debug-code.
544  void AssertString(Register object);
545 
546  // Abort execution if argument is not a name, enabled via --debug-code.
547  void AssertName(Register object);
548 
549  // Abort execution if argument is not undefined or an AllocationSite, enabled
550  // via --debug-code.
552 
553  // ---------------------------------------------------------------------------
554  // Exception handling
555 
556  // Push a new try handler and link it into try handler chain.
557  void PushTryHandler(StackHandler::Kind kind, int handler_index);
558 
559  // Unlink the stack handler on top of the stack from the try handler chain.
560  void PopTryHandler();
561 
562  // Throw to the top handler in the try hander chain.
563  void Throw(Register value);
564 
565  // Throw past all JS frames to the top JS entry frame.
566  void ThrowUncatchable(Register value);
567 
568  // Throw a message string as an exception.
569  void Throw(BailoutReason reason);
570 
571  // Throw a message string as an exception if a condition is not true.
572  void ThrowIf(Condition cc, BailoutReason reason);
573 
574  // ---------------------------------------------------------------------------
575  // Inline caching support
576 
577  // Generate code for checking access rights - used for security checks
578  // on access to global objects across environments. The holder register
579  // is left untouched, but the scratch register is clobbered.
580  void CheckAccessGlobalProxy(Register holder_reg,
581  Register scratch1,
582  Register scratch2,
583  Label* miss);
584 
585  void GetNumberHash(Register r0, Register scratch);
586 
587  void LoadFromNumberDictionary(Label* miss,
588  Register elements,
589  Register key,
590  Register r0,
591  Register r1,
592  Register r2,
593  Register result);
594 
595 
596  // ---------------------------------------------------------------------------
597  // Allocation support
598 
599  // Allocate an object in new space or old pointer space. If the given space
600  // is exhausted control continues at the gc_required label. The allocated
601  // object is returned in result and end of the new object is returned in
602  // result_end. The register scratch can be passed as no_reg in which case
603  // an additional object reference will be added to the reloc info. The
604  // returned pointers in result and result_end have not yet been tagged as
605  // heap objects. If result_contains_top_on_entry is true the content of
606  // result is known to be the allocation top on entry (could be result_end
607  // from a previous call). If result_contains_top_on_entry is true scratch
608  // should be no_reg as it is never used.
609  void Allocate(int object_size,
610  Register result,
611  Register result_end,
612  Register scratch,
613  Label* gc_required,
615 
616  void Allocate(int header_size,
617  ScaleFactor element_size,
618  Register element_count,
619  RegisterValueType element_count_type,
620  Register result,
621  Register result_end,
622  Register scratch,
623  Label* gc_required,
625 
626  void Allocate(Register object_size,
627  Register result,
628  Register result_end,
629  Register scratch,
630  Label* gc_required,
632 
633  // Undo allocation in new space. The object passed and objects allocated after
634  // it will no longer be allocated. Make sure that no pointers are left to the
635  // object(s) no longer allocated as they would be invalid when allocation is
636  // un-done.
637  void UndoAllocationInNewSpace(Register object);
638 
639  // Allocate a heap number in new space with undefined value. The
640  // register scratch2 can be passed as no_reg; the others must be
641  // valid registers. Returns tagged pointer in result register, or
642  // jumps to gc_required if new space is full.
643  void AllocateHeapNumber(Register result,
644  Register scratch1,
645  Register scratch2,
646  Label* gc_required);
647 
648  // Allocate a sequential string. All the header fields of the string object
649  // are initialized.
650  void AllocateTwoByteString(Register result,
651  Register length,
652  Register scratch1,
653  Register scratch2,
654  Register scratch3,
655  Label* gc_required);
656  void AllocateAsciiString(Register result,
657  Register length,
658  Register scratch1,
659  Register scratch2,
660  Register scratch3,
661  Label* gc_required);
662  void AllocateAsciiString(Register result,
663  int length,
664  Register scratch1,
665  Register scratch2,
666  Label* gc_required);
667 
668  // Allocate a raw cons string object. Only the map field of the result is
669  // initialized.
671  Register scratch1,
672  Register scratch2,
673  Label* gc_required);
674  void AllocateAsciiConsString(Register result,
675  Register scratch1,
676  Register scratch2,
677  Label* gc_required);
678 
679  // Allocate a raw sliced string object. Only the map field of the result is
680  // initialized.
682  Register scratch1,
683  Register scratch2,
684  Label* gc_required);
686  Register scratch1,
687  Register scratch2,
688  Label* gc_required);
689 
690  // Copy memory, byte-by-byte, from source to destination. Not optimized for
691  // long or aligned copies.
692  // The contents of index and scratch are destroyed.
693  void CopyBytes(Register source,
694  Register destination,
695  Register length,
696  Register scratch);
697 
698  // Initialize fields with filler values. Fields starting at |start_offset|
699  // not including end_offset are overwritten with the value in |filler|. At
700  // the end the loop, |start_offset| takes the value of |end_offset|.
701  void InitializeFieldsWithFiller(Register start_offset,
702  Register end_offset,
703  Register filler);
704 
705  // ---------------------------------------------------------------------------
706  // Support functions.
707 
708  // Check a boolean-bit of a Smi field.
709  void BooleanBitTest(Register object, int field_offset, int bit_index);
710 
711  // Check if result is zero and op is negative.
712  void NegativeZeroTest(Register result, Register op, Label* then_label);
713 
714  // Check if result is zero and any of op1 and op2 are negative.
715  // Register scratch is destroyed, and it must be different from op2.
716  void NegativeZeroTest(Register result, Register op1, Register op2,
717  Register scratch, Label* then_label);
718 
719  // Try to get function prototype of a function and puts the value in
720  // the result register. Checks that the function really is a
721  // function and jumps to the miss label if the fast checks fail. The
722  // function register will be untouched; the other registers may be
723  // clobbered.
724  void TryGetFunctionPrototype(Register function,
725  Register result,
726  Register scratch,
727  Label* miss,
728  bool miss_on_bound_function = false);
729 
730  // Generates code for reporting that an illegal operation has
731  // occurred.
732  void IllegalOperation(int num_arguments);
733 
734  // Picks out an array index from the hash field.
735  // Register use:
736  // hash - holds the index's hash. Clobbered.
737  // index - holds the overwritten index on exit.
738  void IndexFromHash(Register hash, Register index);
739 
740  // ---------------------------------------------------------------------------
741  // Runtime calls
742 
743  // Call a code stub. Generate the code if necessary.
744  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
745 
746  // Tail call a code stub (jump). Generate the code if necessary.
747  void TailCallStub(CodeStub* stub);
748 
749  // Return from a code stub after popping its arguments.
750  void StubReturn(int argc);
751 
752  // Call a runtime routine.
753  void CallRuntime(const Runtime::Function* f,
754  int num_arguments,
755  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
757  const Runtime::Function* function = Runtime::FunctionForId(id);
758  CallRuntime(function, function->nargs, kSaveFPRegs);
759  }
760 
761  // Convenience function: Same as above, but takes the fid instead.
763  int num_arguments,
764  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
765  CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
766  }
767 
768  // Convenience function: call an external reference.
769  void CallExternalReference(ExternalReference ref, int num_arguments);
770 
771  // Tail call of a runtime routine (jump).
772  // Like JumpToExternalReference, but also takes care of passing the number
773  // of parameters.
774  void TailCallExternalReference(const ExternalReference& ext,
775  int num_arguments,
776  int result_size);
777 
778  // Convenience function: tail call a runtime routine (jump).
780  int num_arguments,
781  int result_size);
782 
783  // Before calling a C-function from generated code, align arguments on stack.
784  // After aligning the frame, arguments must be stored in esp[0], esp[4],
785  // etc., not pushed. The argument count assumes all arguments are word sized.
786  // Some compilers/platforms require the stack to be aligned when calling
787  // C++ code.
788  // Needs a scratch register to do some arithmetic. This register will be
789  // trashed.
790  void PrepareCallCFunction(int num_arguments, Register scratch);
791 
792  // Calls a C function and cleans up the space for arguments allocated
793  // by PrepareCallCFunction. The called function is not allowed to trigger a
794  // garbage collection, since that might move the code and invalidate the
795  // return address (unless this is somehow accounted for by the called
796  // function).
797  void CallCFunction(ExternalReference function, int num_arguments);
798  void CallCFunction(Register function, int num_arguments);
799 
800  // Prepares stack to put arguments (aligns and so on). Reserves
801  // space for return value if needed (assumes the return value is a handle).
802  // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
803  // etc. Saves context (esi). If space was reserved for return value then
804  // stores the pointer to the reserved slot into esi.
805  void PrepareCallApiFunction(int argc);
806 
807  // Calls an API function. Allocates HandleScope, extracts returned value
808  // from handle and propagates exceptions. Clobbers ebx, edi and
809  // caller-save registers. Restores context. On return removes
810  // stack_space * kPointerSize (GCed).
811  void CallApiFunctionAndReturn(Register function_address,
812  Address thunk_address,
813  Operand thunk_last_arg,
814  int stack_space,
815  Operand return_value_operand,
816  Operand* context_restore_operand);
817 
818  // Jump to a runtime routine.
819  void JumpToExternalReference(const ExternalReference& ext);
820 
821  // ---------------------------------------------------------------------------
822  // Utilities
823 
824  void Ret();
825 
826  // Return and drop arguments from stack, where the number of arguments
827  // may be bigger than 2^16 - 1. Requires a scratch register.
828  void Ret(int bytes_dropped, Register scratch);
829 
830  // Emit code to discard a non-negative number of pointer-sized elements
831  // from the stack, clobbering only the esp register.
832  void Drop(int element_count);
833 
834  void Call(Label* target) { call(target); }
835  void Push(Register src) { push(src); }
836  void Pop(Register dst) { pop(dst); }
837 
838  // Emit call to the code we are currently generating.
839  void CallSelf() {
840  Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
841  call(self, RelocInfo::CODE_TARGET);
842  }
843 
844  // Move if the registers are not identical.
845  void Move(Register target, Register source);
846 
847  // Move a constant into a destination using the most efficient encoding.
848  void Move(Register dst, const Immediate& x);
849  void Move(const Operand& dst, const Immediate& x);
850 
851  // Move an immediate into an XMM register.
852  void Move(XMMRegister dst, double val);
853 
854  // Push a handle value.
855  void Push(Handle<Object> handle) { push(Immediate(handle)); }
856  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
857 
859  ASSERT(!code_object_.is_null());
860  return code_object_;
861  }
862 
863  // Insert code to verify that the x87 stack has the specified depth (0-7)
864  void VerifyX87StackDepth(uint32_t depth);
865 
866  // Emit code for a truncating division by a constant. The dividend register is
867  // unchanged, the result is in edx, and eax gets clobbered.
868  void TruncatingDiv(Register dividend, int32_t divisor);
869 
870  // ---------------------------------------------------------------------------
871  // StatsCounter support
872 
873  void SetCounter(StatsCounter* counter, int value);
874  void IncrementCounter(StatsCounter* counter, int value);
875  void DecrementCounter(StatsCounter* counter, int value);
876  void IncrementCounter(Condition cc, StatsCounter* counter, int value);
877  void DecrementCounter(Condition cc, StatsCounter* counter, int value);
878 
879 
880  // ---------------------------------------------------------------------------
881  // Debugging
882 
883  // Calls Abort(msg) if the condition cc is not satisfied.
884  // Use --debug_code to enable.
885  void Assert(Condition cc, BailoutReason reason);
886 
887  void AssertFastElements(Register elements);
888 
889  // Like Assert(), but always enabled.
890  void Check(Condition cc, BailoutReason reason);
891 
892  // Print a message to stdout and abort execution.
893  void Abort(BailoutReason reason);
894 
895  // Check that the stack is aligned.
896  void CheckStackAlignment();
897 
898  // Verify restrictions about code generated in stubs.
899  void set_generating_stub(bool value) { generating_stub_ = value; }
900  bool generating_stub() { return generating_stub_; }
901  void set_has_frame(bool value) { has_frame_ = value; }
902  bool has_frame() { return has_frame_; }
903  inline bool AllowThisStubCall(CodeStub* stub);
904 
905  // ---------------------------------------------------------------------------
906  // String utilities.
907 
908  // Generate code to do a lookup in the number string cache. If the number in
909  // the register object is found in the cache the generated code falls through
910  // with the result in the result register. The object and the result register
911  // can be the same. If the number is not found in the cache the code jumps to
912  // the label not_found with only the content of register object unchanged.
913  void LookupNumberStringCache(Register object,
914  Register result,
915  Register scratch1,
916  Register scratch2,
917  Label* not_found);
918 
919  // Check whether the instance type represents a flat ASCII string. Jump to the
920  // label if not. If the instance type can be scratched specify same register
921  // for both instance type and scratch.
923  Register scratch,
924  Label* on_not_flat_ascii_string);
925 
926  // Checks if both objects are sequential ASCII strings, and jumps to label
927  // if either is not.
929  Register object2,
930  Register scratch1,
931  Register scratch2,
932  Label* on_not_flat_ascii_strings);
933 
934  // Checks if the given register or operand is a unique name
935  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
936  Label::Distance distance = Label::kFar) {
937  JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
938  }
939 
940  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
941  Label::Distance distance = Label::kFar);
942 
944  Register index,
945  Register value,
946  uint32_t encoding_mask);
947 
949  return SafepointRegisterStackIndex(reg.code());
950  }
951 
952  // Activation support.
953  void EnterFrame(StackFrame::Type type);
954  void LeaveFrame(StackFrame::Type type);
955 
956  // Expects object in eax and returns map with validated enum cache
957  // in eax. Assumes that any other register can be used as a scratch.
958  void CheckEnumCache(Label* call_runtime);
959 
960  // AllocationMemento support. Arrays may have an associated
961  // AllocationMemento object that can be checked for in order to pretransition
962  // to another type.
963  // On entry, receiver_reg should point to the array object.
964  // scratch_reg gets clobbered.
965  // If allocation info is present, conditional code is set to equal.
966  void TestJSArrayForAllocationMemento(Register receiver_reg,
967  Register scratch_reg,
968  Label* no_memento_found);
969 
971  Register scratch_reg,
972  Label* memento_found) {
973  Label no_memento_found;
974  TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
975  &no_memento_found);
976  j(equal, memento_found);
977  bind(&no_memento_found);
978  }
979 
980  // Jumps to found label if a prototype map has dictionary elements.
981  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
982  Register scratch1, Label* found);
983 
984  private:
985  bool generating_stub_;
986  bool has_frame_;
987  // This handle will be patched with the code object on installation.
988  Handle<Object> code_object_;
989 
990  // Helper functions for generating invokes.
991  void InvokePrologue(const ParameterCount& expected,
992  const ParameterCount& actual,
993  Handle<Code> code_constant,
994  const Operand& code_operand,
995  Label* done,
996  bool* definitely_mismatches,
997  InvokeFlag flag,
998  Label::Distance done_distance,
999  const CallWrapper& call_wrapper = NullCallWrapper());
1000 
1001  void EnterExitFramePrologue();
1002  void EnterExitFrameEpilogue(int argc, bool save_doubles);
1003 
1004  void LeaveExitFrameEpilogue(bool restore_context);
1005 
1006  // Allocation support helpers.
1007  void LoadAllocationTopHelper(Register result,
1008  Register scratch,
1010 
1011  void UpdateAllocationTopHelper(Register result_end,
1012  Register scratch,
1014 
1015  // Helper for PopHandleScope. Allowed to perform a GC and returns
1016  // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
1017  // possibly returns a failure object indicating an allocation failure.
1018  MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
1019  Register scratch,
1020  bool gc_allowed);
1021 
1022  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1023  void InNewSpace(Register object,
1024  Register scratch,
1025  Condition cc,
1026  Label* condition_met,
1027  Label::Distance condition_met_distance = Label::kFar);
1028 
1029  // Helper for finding the mark bits for an address. Afterwards, the
1030  // bitmap register points at the word with the mark bits and the mask
1031  // the position of the first bit. Uses ecx as scratch and leaves addr_reg
1032  // unchanged.
1033  inline void GetMarkBits(Register addr_reg,
1034  Register bitmap_reg,
1035  Register mask_reg);
1036 
1037  // Helper for throwing exceptions. Compute a handler address and jump to
1038  // it. See the implementation for register usage.
1039  void JumpToHandlerEntry();
1040 
1041  // Compute memory operands for safepoint stack slots.
1042  Operand SafepointRegisterSlot(Register reg);
1043  static int SafepointRegisterStackIndex(int reg_code);
1044 
1045  // Needs access to SafepointRegisterStackIndex for compiled frame
1046  // traversal.
1047  friend class StandardFrame;
1048 };
1049 
1050 
1051 // The code patcher is used to patch (typically) small parts of code e.g. for
1052 // debugging and other types of instrumentation. When using the code patcher
1053 // the exact number of bytes specified must be emitted. Is not legal to emit
1054 // relocation information. If any of these constraints are violated it causes
1055 // an assertion.
1056 class CodePatcher {
1057  public:
1058  CodePatcher(byte* address, int size);
1059  virtual ~CodePatcher();
1060 
1061  // Macro assembler to emit code.
1062  MacroAssembler* masm() { return &masm_; }
1063 
1064  private:
1065  byte* address_; // The address of the code being patched.
1066  int size_; // Number of bytes of the expected patch size.
1067  MacroAssembler masm_; // Macro assembler used to generate the code.
1068 };
1069 
1070 
1071 // -----------------------------------------------------------------------------
1072 // Static helper functions.
1073 
1074 // Generate an Operand for loading a field from an object.
1075 inline Operand FieldOperand(Register object, int offset) {
1076  return Operand(object, offset - kHeapObjectTag);
1077 }
1078 
1079 
1080 // Generate an Operand for loading an indexed field from an object.
1082  Register index,
1083  ScaleFactor scale,
1084  int offset) {
1085  return Operand(object, index, scale, offset - kHeapObjectTag);
1086 }
1087 
1088 
1090  Register index_as_smi,
1091  int additional_offset = 0) {
1092  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
1093  return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
1094 }
1095 
1096 
1097 inline Operand ContextOperand(Register context, int index) {
1098  return Operand(context, Context::SlotOffset(index));
1099 }
1100 
1101 
1102 inline Operand GlobalObjectOperand() {
1104 }
1105 
1106 
1107 // Generates an Operand for saving parameters after PrepareCallApiFunction.
1108 Operand ApiParameterOperand(int index);
1109 
1110 
1111 #ifdef GENERATED_CODE_COVERAGE
1112 extern void LogGeneratedCodeCoverage(const char* file_line);
1113 #define CODE_COVERAGE_STRINGIFY(x) #x
1114 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1115 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1116 #define ACCESS_MASM(masm) { \
1117  byte* ia32_coverage_function = \
1118  reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
1119  masm->pushfd(); \
1120  masm->pushad(); \
1121  masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
1122  masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
1123  masm->pop(eax); \
1124  masm->popad(); \
1125  masm->popfd(); \
1126  } \
1127  masm->
1128 #else
1129 #define ACCESS_MASM(masm) masm->
1130 #endif
1131 
1132 
1133 } } // namespace v8::internal
1134 
1135 #endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
byte * Address
Definition: globals.h:186
void cmp(Register src1, const Operand &src2, Condition cond=al)
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch)
void Push(Handle< Object > handle)
void SlowTruncateToI(Register result_reg, Register input_reg, int offset=HeapNumber::kValueOffset-kHeapObjectTag)
void ClampUint8(Register output_reg, Register input_reg)
void LoadPowerOf2(XMMRegister dst, Register scratch, int power)
void SmiUntag(Register reg, Label *is_smi)
Isolate * isolate() const
Definition: assembler.h:62
const intptr_t kSmiTagMask
Definition: v8.h:5480
static int SlotOffset(int index)
Definition: contexts.h:498
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
const Register r3
void TruncateX87TOSToI(Register result_reg)
void Cvtsi2sd(XMMRegister dst, Register src)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
CodePatcher(byte *address, int instructions, FlushICache flush_cache=FLUSH)
void CmpHeapObject(Register reg, Handle< HeapObject > object)
Condition IsObjectStringType(Register obj, Register type, Condition cond=al)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void LoadUint32NoSSE2(Register src)
void AssertString(Register object)
static TypeFeedbackId None()
Definition: utils.h:1149
void RecordWriteArray(Register array, Register value, Register index, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpToExternalReference(const ExternalReference &builtin)
void LoadInstanceDescriptors(Register map, Register descriptors)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
STATIC_ASSERT((reg_zero==(reg_not_zero^1))&&(reg_bit_clear==(reg_bit_set^1))&&(always==(never^1)))
void JumpIfNotSmi(Register value, Label *not_smi_label, Label::Distance distance=Label::kFar)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name, Label::Distance distance=Label::kFar)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void Store(Register src, const MemOperand &dst, Representation r)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
int int32_t
Definition: unicode.cc:47
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
Definition: checks.h:329
void RecordWriteForMap(Register object, Handle< Map > map, Register scratch1, Register scratch2, SaveFPRegsMode save_fp)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void CheckPageFlagForMap(Handle< Map > map, int mask, Condition cc, Label *condition_met, Label::Distance condition_met_distance=Label::kFar)
void NumberOfOwnDescriptors(Register dst, Register map)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const Register r2
void sar(Register dst, uint8_t imm8)
void Load(Register dst, const MemOperand &src, Representation r)
MemOperand ContextOperand(Register context, int index)
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void PushHeapObject(Handle< HeapObject > object)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
static int SafepointRegisterStackIndex(Register reg)
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
void Abort(BailoutReason msg)
void IsObjectNameType(Register object, Register scratch, Label *fail)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
uint8_t byte
Definition: globals.h:185
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void LeaveApiExitFrame(bool restore_context)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void EnumLength(Register dst, Register map)
void CheckFastElements(Register map, Register scratch, Label *fail)
#define MUST_USE_RESULT
Definition: globals.h:381
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
PrologueFrameMode
Definition: frames.h:957
const int kPointerSize
Definition: globals.h:268
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void CallCFunction(ExternalReference function, int num_arguments)
void SafePush(const Immediate &x)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Operand FieldOperand(Register object, int offset)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:5473
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void TruncateHeapNumberToI(Register result, Register object)
bool IsUnsafeImmediate(const Immediate &x)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void LoadHeapObject(Register dst, Handle< HeapObject > object)
void Throw(Register value)
void Move(Register dst, Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void EnterApiExitFrame(int argc)
void PrepareCallApiFunction(int argc)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const Register r0
void NegativeZeroTest(Register result, Register op, Label *then_label)
MacroAssembler(Isolate *isolate, void *buffer, int size)
Operand FixedArrayElementOperand(Register array, Register index_as_smi, int additional_offset=0)
void LoadContext(Register dst, int context_chain_length)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
void AssertFastElements(Register elements)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode, Label *conversion_failed, Label::Distance dst=Label::kFar)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Drop(int count, Condition cond=al)
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
static const int kHeaderSize
Definition: objects.h:3016
void IllegalOperation(int num_arguments)
AllocationFlags
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void DoubleToI(Register result_reg, XMMRegister input_reg, XMMRegister scratch, MinusZeroMode minus_zero_mode, Label *conversion_failed, Label::Distance dst=Label::kFar)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
void LoadObject(Register result, Handle< Object > object)
void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp, MinusZeroMode minus_zero_mode, Label *lost_precision)
const Register r1
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void ThrowUncatchable(Register value)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void JumpIfSmi(Operand value, Label *smi_label, Label::Distance distance=Label::kFar)
Operand ApiParameterOperand(int index)
const int kSmiTagSize
Definition: v8.h:5479
void UndoAllocationInNewSpace(Register object, Register scratch)
void Prologue(PrologueFrameMode frame_mode)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
void test(Register reg, const Immediate &imm)
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, Register scratch_reg, Label *memento_found)
const Register esi
void VerifyX87StackDepth(uint32_t depth)
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
const int kSmiTag
Definition: v8.h:5478
void Check(Condition cond, BailoutReason reason)
void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void Assert(Condition cond, BailoutReason reason)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void TailCallStub(CodeStub *stub, Condition cond=al)
void CmpObject(Register reg, Handle< Object > object)
HeapObject * obj
void BooleanBitTest(Register object, int field_offset, int bit_index)
void AssertName(Register object)
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
int LeaveFrame(StackFrame::Type type)
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void SafeMove(Register dst, const Immediate &x)
void CmpObjectType(Register heap_object, InstanceType type, Register map)
void CmpInstanceType(Register map, InstanceType type)
void JumpIfSmi(Register value, Label *smi_label, Label::Distance distance=Label::kFar)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void CheckEnumCache(Register null_value, Label *call_runtime)
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AssertNumber(Register object)
const Register r4