v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-x64.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29 #define V8_X64_MACRO_ASSEMBLER_X64_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Flags used for the AllocateInNewSpace functions.
40  // No special flags.
42  // Return the pointer to the allocated already tagged as a heap object.
43  TAG_OBJECT = 1 << 0,
44  // The content of the result register already contains the allocation top in
45  // new space.
46  RESULT_CONTAINS_TOP = 1 << 1
47 };
48 
49 
50 // Default scratch register used by MacroAssembler (and other code that needs
51 // a spare register). The register isn't callee save, and not used by the
52 // function calling convention.
53 const Register kScratchRegister = { 10 }; // r10.
54 const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
55 const Register kRootRegister = { 13 }; // r13 (callee save).
56 // Value of smi in kSmiConstantRegister.
58 // Actual value of root register is offset from the root array's start
59 // to take advantage of negitive 8-bit displacement values.
60 const int kRootRegisterBias = 128;
61 
62 // Convenience for platform-independent signatures.
63 typedef Operand MemOperand;
64 
67 
68 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
69 
70 // Forward declaration.
71 class JumpTarget;
72 
73 struct SmiIndex {
75  : reg(index_register),
76  scale(scale) {}
79 };
80 
81 
82 // MacroAssembler implements a collection of frequently used macros.
83 class MacroAssembler: public Assembler {
84  public:
85  // The isolate parameter can be NULL if the macro assembler should
86  // not use isolate-dependent functionality. In this case, it's the
87  // responsibility of the caller to never invoke such function on the
88  // macro assembler.
89  MacroAssembler(Isolate* isolate, void* buffer, int size);
90 
91  // Prevent the use of the RootArray during the lifetime of this
92  // scope object.
93  class NoRootArrayScope BASE_EMBEDDED {
94  public:
95  explicit NoRootArrayScope(MacroAssembler* assembler)
96  : variable_(&assembler->root_array_available_),
97  old_value_(assembler->root_array_available_) {
98  assembler->root_array_available_ = false;
99  }
101  *variable_ = old_value_;
102  }
103  private:
104  bool* variable_;
105  bool old_value_;
106  };
107 
108  // Operand pointing to an external reference.
109  // May emit code to set up the scratch register. The operand is
110  // only guaranteed to be correct as long as the scratch register
111  // isn't changed.
112  // If the operand is used more than once, use a scratch register
113  // that is guaranteed not to be clobbered.
114  Operand ExternalOperand(ExternalReference reference,
115  Register scratch = kScratchRegister);
116  // Loads and stores the value of an external reference.
117  // Special case code for load and store to take advantage of
118  // load_rax/store_rax if possible/necessary.
119  // For other operations, just use:
120  // Operand operand = ExternalOperand(extref);
121  // operation(operand, ..);
122  void Load(Register destination, ExternalReference source);
123  void Store(ExternalReference destination, Register source);
124  // Loads the address of the external reference into the destination
125  // register.
126  void LoadAddress(Register destination, ExternalReference source);
127  // Returns the size of the code generated by LoadAddress.
128  // Used by CallSize(ExternalReference) to find the size of a call.
129  int LoadAddressSize(ExternalReference source);
130  // Pushes the address of the external reference onto the stack.
131  void PushAddress(ExternalReference source);
132 
133  // Operations on roots in the root-array.
134  void LoadRoot(Register destination, Heap::RootListIndex index);
135  void StoreRoot(Register source, Heap::RootListIndex index);
136  // Load a root value where the index (or part of it) is variable.
137  // The variable_offset register is added to the fixed_offset value
138  // to get the index into the root-array.
139  void LoadRootIndexed(Register destination,
140  Register variable_offset,
141  int fixed_offset);
142  void CompareRoot(Register with, Heap::RootListIndex index);
143  void CompareRoot(const Operand& with, Heap::RootListIndex index);
144  void PushRoot(Heap::RootListIndex index);
145 
146  // These functions do not arrange the registers in any particular order so
147  // they are not useful for calls that can cause a GC. The caller can
148  // exclude up to 3 registers that do not need to be saved and restored.
149  void PushCallerSaved(SaveFPRegsMode fp_mode,
150  Register exclusion1 = no_reg,
151  Register exclusion2 = no_reg,
152  Register exclusion3 = no_reg);
153  void PopCallerSaved(SaveFPRegsMode fp_mode,
154  Register exclusion1 = no_reg,
155  Register exclusion2 = no_reg,
156  Register exclusion3 = no_reg);
157 
158 // ---------------------------------------------------------------------------
159 // GC Support
160 
161 
163  kReturnAtEnd,
165  };
166 
167  // Record in the remembered set the fact that we have a pointer to new space
168  // at the address pointed to by the addr register. Only works if addr is not
169  // in new space.
170  void RememberedSetHelper(Register object, // Used for debug code.
171  Register addr,
172  Register scratch,
173  SaveFPRegsMode save_fp,
174  RememberedSetFinalAction and_then);
175 
176  void CheckPageFlag(Register object,
177  Register scratch,
178  int mask,
179  Condition cc,
180  Label* condition_met,
181  Label::Distance condition_met_distance = Label::kFar);
182 
183  // Check if object is in new space. Jumps if the object is not in new space.
184  // The register scratch can be object itself, but scratch will be clobbered.
186  Register scratch,
187  Label* branch,
188  Label::Distance distance = Label::kFar) {
189  InNewSpace(object, scratch, not_equal, branch, distance);
190  }
191 
192  // Check if object is in new space. Jumps if the object is in new space.
193  // The register scratch can be object itself, but it will be clobbered.
195  Register scratch,
196  Label* branch,
197  Label::Distance distance = Label::kFar) {
198  InNewSpace(object, scratch, equal, branch, distance);
199  }
200 
201  // Check if an object has the black incremental marking color. Also uses rcx!
202  void JumpIfBlack(Register object,
203  Register scratch0,
204  Register scratch1,
205  Label* on_black,
206  Label::Distance on_black_distance = Label::kFar);
207 
208  // Detects conservatively whether an object is data-only, i.e. it does need to
209  // be scanned by the garbage collector.
210  void JumpIfDataObject(Register value,
211  Register scratch,
212  Label* not_data_object,
213  Label::Distance not_data_object_distance);
214 
215  // Checks the color of an object. If the object is already grey or black
216  // then we just fall through, since it is already live. If it is white and
217  // we can determine that it doesn't need to be scanned, then we just mark it
218  // black and fall through. For the rest we jump to the label so the
219  // incremental marker can fix its assumptions.
220  void EnsureNotWhite(Register object,
221  Register scratch1,
222  Register scratch2,
223  Label* object_is_white_and_not_data,
224  Label::Distance distance);
225 
226  // Notify the garbage collector that we wrote a pointer into an object.
227  // |object| is the object being stored into, |value| is the object being
228  // stored. value and scratch registers are clobbered by the operation.
229  // The offset is the offset from the start of the object, not the offset from
230  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
231  void RecordWriteField(
232  Register object,
233  int offset,
234  Register value,
235  Register scratch,
236  SaveFPRegsMode save_fp,
237  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
238  SmiCheck smi_check = INLINE_SMI_CHECK);
239 
240  // As above, but the offset has the tag presubtracted. For use with
241  // Operand(reg, off).
243  Register context,
244  int offset,
245  Register value,
246  Register scratch,
247  SaveFPRegsMode save_fp,
248  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
249  SmiCheck smi_check = INLINE_SMI_CHECK) {
250  RecordWriteField(context,
251  offset + kHeapObjectTag,
252  value,
253  scratch,
254  save_fp,
255  remembered_set_action,
256  smi_check);
257  }
258 
259  // Notify the garbage collector that we wrote a pointer into a fixed array.
260  // |array| is the array being stored into, |value| is the
261  // object being stored. |index| is the array index represented as a non-smi.
262  // All registers are clobbered by the operation RecordWriteArray
263  // filters out smis so it does not update the write barrier if the
264  // value is a smi.
265  void RecordWriteArray(
266  Register array,
267  Register value,
268  Register index,
269  SaveFPRegsMode save_fp,
270  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
271  SmiCheck smi_check = INLINE_SMI_CHECK);
272 
273  // For page containing |object| mark region covering |address|
274  // dirty. |object| is the object being stored into, |value| is the
275  // object being stored. The address and value registers are clobbered by the
276  // operation. RecordWrite filters out smis so it does not update
277  // the write barrier if the value is a smi.
278  void RecordWrite(
279  Register object,
280  Register address,
281  Register value,
282  SaveFPRegsMode save_fp,
283  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
284  SmiCheck smi_check = INLINE_SMI_CHECK);
285 
286 #ifdef ENABLE_DEBUGGER_SUPPORT
287  // ---------------------------------------------------------------------------
288  // Debugger Support
289 
290  void DebugBreak();
291 #endif
292 
293  // Enter specific kind of exit frame; either in normal or
294  // debug mode. Expects the number of arguments in register rax and
295  // sets up the number of arguments in register rdi and the pointer
296  // to the first argument in register rsi.
297  //
298  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
299  // accessible via StackSpaceOperand.
300  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
301 
302  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
303  // memory (not GCed) on the stack accessible via StackSpaceOperand.
304  void EnterApiExitFrame(int arg_stack_space);
305 
306  // Leave the current exit frame. Expects/provides the return value in
307  // register rax:rdx (untouched) and the pointer to the first
308  // argument in register rsi.
309  void LeaveExitFrame(bool save_doubles = false);
310 
311  // Leave the current exit frame. Expects/provides the return value in
312  // register rax (untouched).
313  void LeaveApiExitFrame();
314 
315  // Push and pop the registers that can hold pointers.
318  // Store the value in register src in the safepoint register stack
319  // slot for register dst.
320  void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
323 
325  ExternalReference roots_array_start =
326  ExternalReference::roots_array_start(isolate());
327  movq(kRootRegister, roots_array_start);
328  addq(kRootRegister, Immediate(kRootRegisterBias));
329  }
330 
331  // ---------------------------------------------------------------------------
332  // JavaScript invokes
333 
334  // Set up call kind marking in rcx. The method takes rcx as an
335  // explicit first parameter to make the code more readable at the
336  // call sites.
337  void SetCallKind(Register dst, CallKind kind);
338 
339  // Invoke the JavaScript function code by either calling or jumping.
340  void InvokeCode(Register code,
341  const ParameterCount& expected,
342  const ParameterCount& actual,
344  const CallWrapper& call_wrapper,
345  CallKind call_kind);
346 
348  const ParameterCount& expected,
349  const ParameterCount& actual,
350  RelocInfo::Mode rmode,
352  const CallWrapper& call_wrapper,
353  CallKind call_kind);
354 
355  // Invoke the JavaScript function in the given register. Changes the
356  // current context to the context in the function before invoking.
357  void InvokeFunction(Register function,
358  const ParameterCount& actual,
360  const CallWrapper& call_wrapper,
361  CallKind call_kind);
362 
363  void InvokeFunction(Handle<JSFunction> function,
364  const ParameterCount& actual,
366  const CallWrapper& call_wrapper,
367  CallKind call_kind);
368 
369  // Invoke specified builtin JavaScript function. Adds an entry to
370  // the unresolved list if the name does not resolve.
373  const CallWrapper& call_wrapper = NullCallWrapper());
374 
375  // Store the function for the given builtin in the target register.
377 
378  // Store the code object for the given builtin in the target register.
380 
381 
382  // ---------------------------------------------------------------------------
383  // Smi tagging, untagging and operations on tagged smis.
384 
387  reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
389  }
390 
391  // Conversions between tagged smi values and non-tagged integer values.
392 
393  // Tag an integer value. The result must be known to be a valid smi value.
394  // Only uses the low 32 bits of the src register. Sets the N and Z flags
395  // based on the value of the resulting smi.
396  void Integer32ToSmi(Register dst, Register src);
397 
398  // Stores an integer32 value into a memory field that already holds a smi.
399  void Integer32ToSmiField(const Operand& dst, Register src);
400 
401  // Adds constant to src and tags the result as a smi.
402  // Result must be a valid smi.
403  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
404 
405  // Convert smi to 32-bit integer. I.e., not sign extended into
406  // high 32 bits of destination.
407  void SmiToInteger32(Register dst, Register src);
408  void SmiToInteger32(Register dst, const Operand& src);
409 
410  // Convert smi to 64-bit integer (sign extended if necessary).
411  void SmiToInteger64(Register dst, Register src);
412  void SmiToInteger64(Register dst, const Operand& src);
413 
414  // Multiply a positive smi's integer value by a power of two.
415  // Provides result as 64-bit integer value.
417  Register src,
418  int power);
419 
420  // Divide a positive smi's integer value by a power of two.
421  // Provides result as 32-bit integer value.
423  Register src,
424  int power);
425 
426  // Perform the logical or of two smi values and return a smi value.
427  // If either argument is not a smi, jump to on_not_smis and retain
428  // the original values of source registers. The destination register
429  // may be changed if it's not one of the source registers.
430  void SmiOrIfSmis(Register dst,
431  Register src1,
432  Register src2,
433  Label* on_not_smis,
434  Label::Distance near_jump = Label::kFar);
435 
436 
437  // Simple comparison of smis. Both sides must be known smis to use these,
438  // otherwise use Cmp.
439  void SmiCompare(Register smi1, Register smi2);
440  void SmiCompare(Register dst, Smi* src);
441  void SmiCompare(Register dst, const Operand& src);
442  void SmiCompare(const Operand& dst, Register src);
443  void SmiCompare(const Operand& dst, Smi* src);
444  // Compare the int32 in src register to the value of the smi stored at dst.
445  void SmiCompareInteger32(const Operand& dst, Register src);
446  // Sets sign and zero flags depending on value of smi in register.
447  void SmiTest(Register src);
448 
449  // Functions performing a check on a known or potential smi. Returns
450  // a condition that is satisfied if the check is successful.
451 
452  // Is the value a tagged smi.
454  Condition CheckSmi(const Operand& src);
455 
456  // Is the value a non-negative tagged smi.
458 
459  // Are both values tagged smis.
460  Condition CheckBothSmi(Register first, Register second);
461 
462  // Are both values non-negative tagged smis.
464 
465  // Are either value a tagged smi.
467  Register second,
468  Register scratch = kScratchRegister);
469 
470  // Is the value the minimum smi value (since we are using
471  // two's complement numbers, negating the value is known to yield
472  // a non-smi value).
474 
475  // Checks whether an 32-bit integer value is a valid for conversion
476  // to a smi.
478 
479  // Checks whether an 32-bit unsigned integer value is a valid for
480  // conversion to a smi.
482 
483  // Check whether src is a Smi, and set dst to zero if it is a smi,
484  // and to one if it isn't.
485  void CheckSmiToIndicator(Register dst, Register src);
486  void CheckSmiToIndicator(Register dst, const Operand& src);
487 
488  // Test-and-jump functions. Typically combines a check function
489  // above with a conditional jump.
490 
491  // Jump if the value cannot be represented by a smi.
492  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
493  Label::Distance near_jump = Label::kFar);
494 
495  // Jump if the unsigned integer value cannot be represented by a smi.
496  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
497  Label::Distance near_jump = Label::kFar);
498 
499  // Jump to label if the value is a tagged smi.
500  void JumpIfSmi(Register src,
501  Label* on_smi,
502  Label::Distance near_jump = Label::kFar);
503 
504  // Jump to label if the value is not a tagged smi.
505  void JumpIfNotSmi(Register src,
506  Label* on_not_smi,
507  Label::Distance near_jump = Label::kFar);
508 
509  // Jump to label if the value is not a non-negative tagged smi.
511  Label* on_not_smi,
512  Label::Distance near_jump = Label::kFar);
513 
514  // Jump to label if the value, which must be a tagged smi, has value equal
515  // to the constant.
517  Smi* constant,
518  Label* on_equals,
519  Label::Distance near_jump = Label::kFar);
520 
521  // Jump if either or both register are not smi values.
522  void JumpIfNotBothSmi(Register src1,
523  Register src2,
524  Label* on_not_both_smi,
525  Label::Distance near_jump = Label::kFar);
526 
527  // Jump if either or both register are not non-negative smi values.
529  Label* on_not_both_smi,
530  Label::Distance near_jump = Label::kFar);
531 
532  // Operations on tagged smi values.
533 
534  // Smis represent a subset of integers. The subset is always equivalent to
535  // a two's complement interpretation of a fixed number of bits.
536 
537  // Optimistically adds an integer constant to a supposed smi.
538  // If the src is not a smi, or the result is not a smi, jump to
539  // the label.
540  void SmiTryAddConstant(Register dst,
541  Register src,
542  Smi* constant,
543  Label* on_not_smi_result,
544  Label::Distance near_jump = Label::kFar);
545 
546  // Add an integer constant to a tagged smi, giving a tagged smi as result.
547  // No overflow testing on the result is done.
548  void SmiAddConstant(Register dst, Register src, Smi* constant);
549 
550  // Add an integer constant to a tagged smi, giving a tagged smi as result.
551  // No overflow testing on the result is done.
552  void SmiAddConstant(const Operand& dst, Smi* constant);
553 
554  // Add an integer constant to a tagged smi, giving a tagged smi as result,
555  // or jumping to a label if the result cannot be represented by a smi.
556  void SmiAddConstant(Register dst,
557  Register src,
558  Smi* constant,
559  Label* on_not_smi_result,
560  Label::Distance near_jump = Label::kFar);
561 
562  // Subtract an integer constant from a tagged smi, giving a tagged smi as
563  // result. No testing on the result is done. Sets the N and Z flags
564  // based on the value of the resulting integer.
565  void SmiSubConstant(Register dst, Register src, Smi* constant);
566 
567  // Subtract an integer constant from a tagged smi, giving a tagged smi as
568  // result, or jumping to a label if the result cannot be represented by a smi.
569  void SmiSubConstant(Register dst,
570  Register src,
571  Smi* constant,
572  Label* on_not_smi_result,
573  Label::Distance near_jump = Label::kFar);
574 
575  // Negating a smi can give a negative zero or too large positive value.
576  // NOTICE: This operation jumps on success, not failure!
577  void SmiNeg(Register dst,
578  Register src,
579  Label* on_smi_result,
580  Label::Distance near_jump = Label::kFar);
581 
582  // Adds smi values and return the result as a smi.
583  // If dst is src1, then src1 will be destroyed, even if
584  // the operation is unsuccessful.
585  void SmiAdd(Register dst,
586  Register src1,
587  Register src2,
588  Label* on_not_smi_result,
589  Label::Distance near_jump = Label::kFar);
590  void SmiAdd(Register dst,
591  Register src1,
592  const Operand& src2,
593  Label* on_not_smi_result,
594  Label::Distance near_jump = Label::kFar);
595 
596  void SmiAdd(Register dst,
597  Register src1,
598  Register src2);
599 
600  // Subtracts smi values and return the result as a smi.
601  // If dst is src1, then src1 will be destroyed, even if
602  // the operation is unsuccessful.
603  void SmiSub(Register dst,
604  Register src1,
605  Register src2,
606  Label* on_not_smi_result,
607  Label::Distance near_jump = Label::kFar);
608 
609  void SmiSub(Register dst,
610  Register src1,
611  Register src2);
612 
613  void SmiSub(Register dst,
614  Register src1,
615  const Operand& src2,
616  Label* on_not_smi_result,
617  Label::Distance near_jump = Label::kFar);
618 
619  void SmiSub(Register dst,
620  Register src1,
621  const Operand& src2);
622 
623  // Multiplies smi values and return the result as a smi,
624  // if possible.
625  // If dst is src1, then src1 will be destroyed, even if
626  // the operation is unsuccessful.
627  void SmiMul(Register dst,
628  Register src1,
629  Register src2,
630  Label* on_not_smi_result,
631  Label::Distance near_jump = Label::kFar);
632 
633  // Divides one smi by another and returns the quotient.
634  // Clobbers rax and rdx registers.
635  void SmiDiv(Register dst,
636  Register src1,
637  Register src2,
638  Label* on_not_smi_result,
639  Label::Distance near_jump = Label::kFar);
640 
641  // Divides one smi by another and returns the remainder.
642  // Clobbers rax and rdx registers.
643  void SmiMod(Register dst,
644  Register src1,
645  Register src2,
646  Label* on_not_smi_result,
647  Label::Distance near_jump = Label::kFar);
648 
649  // Bitwise operations.
650  void SmiNot(Register dst, Register src);
651  void SmiAnd(Register dst, Register src1, Register src2);
652  void SmiOr(Register dst, Register src1, Register src2);
653  void SmiXor(Register dst, Register src1, Register src2);
654  void SmiAndConstant(Register dst, Register src1, Smi* constant);
655  void SmiOrConstant(Register dst, Register src1, Smi* constant);
656  void SmiXorConstant(Register dst, Register src1, Smi* constant);
657 
659  Register src,
660  int shift_value);
662  Register src,
663  int shift_value,
664  Label* on_not_smi_result,
665  Label::Distance near_jump = Label::kFar);
667  Register src,
668  int shift_value);
669 
670  // Shifts a smi value to the left, and returns the result if that is a smi.
671  // Uses and clobbers rcx, so dst may not be rcx.
672  void SmiShiftLeft(Register dst,
673  Register src1,
674  Register src2);
675  // Shifts a smi value to the right, shifting in zero bits at the top, and
676  // returns the unsigned intepretation of the result if that is a smi.
677  // Uses and clobbers rcx, so dst may not be rcx.
679  Register src1,
680  Register src2,
681  Label* on_not_smi_result,
682  Label::Distance near_jump = Label::kFar);
683  // Shifts a smi value to the right, sign extending the top, and
684  // returns the signed intepretation of the result. That will always
685  // be a valid smi value, since it's numerically smaller than the
686  // original.
687  // Uses and clobbers rcx, so dst may not be rcx.
689  Register src1,
690  Register src2);
691 
692  // Specialized operations
693 
694  // Select the non-smi register of two registers where exactly one is a
695  // smi. If neither are smis, jump to the failure label.
696  void SelectNonSmi(Register dst,
697  Register src1,
698  Register src2,
699  Label* on_not_smis,
700  Label::Distance near_jump = Label::kFar);
701 
702  // Converts, if necessary, a smi to a combination of number and
703  // multiplier to be used as a scaled index.
704  // The src register contains a *positive* smi value. The shift is the
705  // power of two to multiply the index value by (e.g.
706  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
707  // The returned index register may be either src or dst, depending
708  // on what is most efficient. If src and dst are different registers,
709  // src is always unchanged.
710  SmiIndex SmiToIndex(Register dst, Register src, int shift);
711 
712  // Converts a positive smi to a negative index.
713  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
714 
715  // Add the value of a smi in memory to an int32 register.
716  // Sets flags as a normal add.
717  void AddSmiField(Register dst, const Operand& src);
718 
719  // Basic Smi operations.
720  void Move(Register dst, Smi* source) {
721  LoadSmiConstant(dst, source);
722  }
723 
724  void Move(const Operand& dst, Smi* source) {
725  Register constant = GetSmiConstant(source);
726  movq(dst, constant);
727  }
728 
729  void Push(Smi* smi);
730  void Test(const Operand& dst, Smi* source);
731 
732 
733  // ---------------------------------------------------------------------------
734  // String macros.
735 
736  // If object is a string, its map is loaded into object_map.
737  void JumpIfNotString(Register object,
738  Register object_map,
739  Label* not_string,
740  Label::Distance near_jump = Label::kFar);
741 
742 
744  Register first_object,
745  Register second_object,
746  Register scratch1,
747  Register scratch2,
748  Label* on_not_both_flat_ascii,
749  Label::Distance near_jump = Label::kFar);
750 
751  // Check whether the instance type represents a flat ASCII string. Jump to the
752  // label if not. If the instance type can be scratched specify same register
753  // for both instance type and scratch.
755  Register instance_type,
756  Register scratch,
757  Label*on_not_flat_ascii_string,
758  Label::Distance near_jump = Label::kFar);
759 
761  Register first_object_instance_type,
762  Register second_object_instance_type,
763  Register scratch1,
764  Register scratch2,
765  Label* on_fail,
766  Label::Distance near_jump = Label::kFar);
767 
768  // ---------------------------------------------------------------------------
769  // Macro instructions.
770 
771  // Load a register with a long value as efficiently as possible.
772  void Set(Register dst, int64_t x);
773  void Set(const Operand& dst, int64_t x);
774 
775  // Move if the registers are not identical.
776  void Move(Register target, Register source);
777 
778  // Support for constant splitting.
779  bool IsUnsafeInt(const int x);
780  void SafeMove(Register dst, Smi* src);
781  void SafePush(Smi* src);
782 
783  // Bit-field support.
784  void TestBit(const Operand& dst, int bit_index);
785 
786  // Handle support
787  void Move(Register dst, Handle<Object> source);
788  void Move(const Operand& dst, Handle<Object> source);
789  void Cmp(Register dst, Handle<Object> source);
790  void Cmp(const Operand& dst, Handle<Object> source);
791  void Cmp(Register dst, Smi* src);
792  void Cmp(const Operand& dst, Smi* src);
793  void Push(Handle<Object> source);
794 
795  // Load a heap object and handle the case of new-space objects by
796  // indirecting via a global cell.
797  void LoadHeapObject(Register result, Handle<HeapObject> object);
798  void PushHeapObject(Handle<HeapObject> object);
799 
800  void LoadObject(Register result, Handle<Object> object) {
801  if (object->IsHeapObject()) {
802  LoadHeapObject(result, Handle<HeapObject>::cast(object));
803  } else {
804  Move(result, object);
805  }
806  }
807 
808  // Load a global cell into a register.
810 
811  // Emit code to discard a non-negative number of pointer-sized elements
812  // from the stack, clobbering only the rsp register.
813  void Drop(int stack_elements);
814 
815  void Call(Label* target) { call(target); }
816 
817  // Control Flow
818  void Jump(Address destination, RelocInfo::Mode rmode);
819  void Jump(ExternalReference ext);
820  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
821 
822  void Call(Address destination, RelocInfo::Mode rmode);
823  void Call(ExternalReference ext);
824  void Call(Handle<Code> code_object,
825  RelocInfo::Mode rmode,
827 
828  // The size of the code generated for different call instructions.
829  int CallSize(Address destination, RelocInfo::Mode rmode) {
830  return kCallInstructionLength;
831  }
832  int CallSize(ExternalReference ext);
833  int CallSize(Handle<Code> code_object) {
834  // Code calls use 32-bit relative addressing.
836  }
837  int CallSize(Register target) {
838  // Opcode: REX_opt FF /2 m64
839  return (target.high_bit() != 0) ? 3 : 2;
840  }
841  int CallSize(const Operand& target) {
842  // Opcode: REX_opt FF /2 m64
843  return (target.requires_rex() ? 2 : 1) + target.operand_size();
844  }
845 
846  // Emit call to the code we are currently generating.
847  void CallSelf() {
848  Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
849  Call(self, RelocInfo::CODE_TARGET);
850  }
851 
852  // Non-x64 instructions.
853  // Push/pop all general purpose registers.
854  // Does not push rsp/rbp nor any of the assembler's special purpose registers
855  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
856  void Pushad();
857  void Popad();
858  // Sets the stack as after performing Popad, without actually loading the
859  // registers.
860  void Dropad();
861 
862  // Compare object type for heap object.
863  // Always use unsigned comparisons: above and below, not less and greater.
864  // Incoming register is heap_object and outgoing register is map.
865  // They may be the same register, and may be kScratchRegister.
866  void CmpObjectType(Register heap_object, InstanceType type, Register map);
867 
868  // Compare instance type for map.
869  // Always use unsigned comparisons: above and below, not less and greater.
870  void CmpInstanceType(Register map, InstanceType type);
871 
872  // Check if a map for a JSObject indicates that the object has fast elements.
873  // Jump to the specified label if it does not.
874  void CheckFastElements(Register map,
875  Label* fail,
876  Label::Distance distance = Label::kFar);
877 
878  // Check if a map for a JSObject indicates that the object can have both smi
879  // and HeapObject elements. Jump to the specified label if it does not.
881  Label* fail,
882  Label::Distance distance = Label::kFar);
883 
884  // Check if a map for a JSObject indicates that the object has fast smi only
885  // elements. Jump to the specified label if it does not.
887  Label* fail,
888  Label::Distance distance = Label::kFar);
889 
890  // Check to see if maybe_number can be stored as a double in
891  // FastDoubleElements. If it can, store it at the index specified by index in
892  // the FastDoubleElements array elements, otherwise jump to fail. Note that
893  // index must not be smi-tagged.
894  void StoreNumberToDoubleElements(Register maybe_number,
895  Register elements,
896  Register index,
897  XMMRegister xmm_scratch,
898  Label* fail);
899 
900  // Compare an object's map with the specified map and its transitioned
901  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
902  // result of map compare. If multiple map compares are required, the compare
903  // sequences branches to early_success.
904  void CompareMap(Register obj,
905  Handle<Map> map,
906  Label* early_success,
908 
909  // Check if the map of an object is equal to a specified map and branch to
910  // label if not. Skip the smi check if not required (object is known to be a
911  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
912  // against maps that are ElementsKind transition maps of the specified map.
913  void CheckMap(Register obj,
914  Handle<Map> map,
915  Label* fail,
916  SmiCheckType smi_check_type,
918 
919  // Check if the map of an object is equal to a specified map and branch to a
920  // specified target if equal. Skip the smi check if not required (object is
921  // known to be a heap object)
922  void DispatchMap(Register obj,
923  Handle<Map> map,
924  Handle<Code> success,
925  SmiCheckType smi_check_type);
926 
927  // Check if the object in register heap_object is a string. Afterwards the
928  // register map contains the object map and the register instance_type
929  // contains the instance_type. The registers map and instance_type can be the
930  // same in which case it contains the instance type afterwards. Either of the
931  // registers map and instance_type can be the same as heap_object.
933  Register map,
934  Register instance_type);
935 
936  // FCmp compares and pops the two values on top of the FPU stack.
937  // The flag results are similar to integer cmp, but requires unsigned
938  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
939  void FCmp();
940 
941  void ClampUint8(Register reg);
942 
943  void ClampDoubleToUint8(XMMRegister input_reg,
944  XMMRegister temp_xmm_reg,
945  Register result_reg);
946 
947  void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
948 
949  void LoadInstanceDescriptors(Register map, Register descriptors);
950  void EnumLength(Register dst, Register map);
952 
953  template<typename Field>
954  void DecodeField(Register reg) {
955  static const int shift = Field::kShift + kSmiShift;
956  static const int mask = Field::kMask >> Field::kShift;
957  shr(reg, Immediate(shift));
958  and_(reg, Immediate(mask));
959  shl(reg, Immediate(kSmiShift));
960  }
961 
962  // Abort execution if argument is not a number, enabled via --debug-code.
963  void AssertNumber(Register object);
964 
965  // Abort execution if argument is a smi, enabled via --debug-code.
966  void AssertNotSmi(Register object);
967 
968  // Abort execution if argument is not a smi, enabled via --debug-code.
969  void AssertSmi(Register object);
970  void AssertSmi(const Operand& object);
971 
972  // Abort execution if a 64 bit register containing a 32 bit payload does not
973  // have zeros in the top 32 bits, enabled via --debug-code.
974  void AssertZeroExtended(Register reg);
975 
976  // Abort execution if argument is not a string, enabled via --debug-code.
977  void AssertString(Register object);
978 
979  // Abort execution if argument is not the root value with the given index,
980  // enabled via --debug-code.
981  void AssertRootValue(Register src,
982  Heap::RootListIndex root_value_index,
983  const char* message);
984 
985  // ---------------------------------------------------------------------------
986  // Exception handling
987 
988  // Push a new try handler and link it into try handler chain.
989  void PushTryHandler(StackHandler::Kind kind, int handler_index);
990 
991  // Unlink the stack handler on top of the stack from the try handler chain.
992  void PopTryHandler();
993 
994  // Activate the top handler in the try hander chain and pass the
995  // thrown value.
996  void Throw(Register value);
997 
998  // Propagate an uncatchable exception out of the current JS stack.
999  void ThrowUncatchable(Register value);
1000 
1001  // ---------------------------------------------------------------------------
1002  // Inline caching support
1003 
1004  // Generate code for checking access rights - used for security checks
1005  // on access to global objects across environments. The holder register
1006  // is left untouched, but the scratch register and kScratchRegister,
1007  // which must be different, are clobbered.
1008  void CheckAccessGlobalProxy(Register holder_reg,
1009  Register scratch,
1010  Label* miss);
1011 
1012  void GetNumberHash(Register r0, Register scratch);
1013 
1014  void LoadFromNumberDictionary(Label* miss,
1015  Register elements,
1016  Register key,
1017  Register r0,
1018  Register r1,
1019  Register r2,
1020  Register result);
1021 
1022 
1023  // ---------------------------------------------------------------------------
1024  // Allocation support
1025 
1026  // Allocate an object in new space. If the new space is exhausted control
1027  // continues at the gc_required label. The allocated object is returned in
1028  // result and end of the new object is returned in result_end. The register
1029  // scratch can be passed as no_reg in which case an additional object
1030  // reference will be added to the reloc info. The returned pointers in result
1031  // and result_end have not yet been tagged as heap objects. If
1032  // result_contains_top_on_entry is true the content of result is known to be
1033  // the allocation top on entry (could be result_end from a previous call to
1034  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
1035  // should be no_reg as it is never used.
1036  void AllocateInNewSpace(int object_size,
1037  Register result,
1038  Register result_end,
1039  Register scratch,
1040  Label* gc_required,
1042 
1043  void AllocateInNewSpace(int header_size,
1044  ScaleFactor element_size,
1045  Register element_count,
1046  Register result,
1047  Register result_end,
1048  Register scratch,
1049  Label* gc_required,
1051 
1052  void AllocateInNewSpace(Register object_size,
1053  Register result,
1054  Register result_end,
1055  Register scratch,
1056  Label* gc_required,
1058 
1059  // Undo allocation in new space. The object passed and objects allocated after
1060  // it will no longer be allocated. Make sure that no pointers are left to the
1061  // object(s) no longer allocated as they would be invalid when allocation is
1062  // un-done.
1063  void UndoAllocationInNewSpace(Register object);
1064 
1065  // Allocate a heap number in new space with undefined value. Returns
1066  // tagged pointer in result register, or jumps to gc_required if new
1067  // space is full.
1068  void AllocateHeapNumber(Register result,
1069  Register scratch,
1070  Label* gc_required);
1071 
1072  // Allocate a sequential string. All the header fields of the string object
1073  // are initialized.
1074  void AllocateTwoByteString(Register result,
1075  Register length,
1076  Register scratch1,
1077  Register scratch2,
1078  Register scratch3,
1079  Label* gc_required);
1080  void AllocateAsciiString(Register result,
1081  Register length,
1082  Register scratch1,
1083  Register scratch2,
1084  Register scratch3,
1085  Label* gc_required);
1086 
1087  // Allocate a raw cons string object. Only the map field of the result is
1088  // initialized.
1089  void AllocateTwoByteConsString(Register result,
1090  Register scratch1,
1091  Register scratch2,
1092  Label* gc_required);
1093  void AllocateAsciiConsString(Register result,
1094  Register scratch1,
1095  Register scratch2,
1096  Label* gc_required);
1097 
1098  // Allocate a raw sliced string object. Only the map field of the result is
1099  // initialized.
1101  Register scratch1,
1102  Register scratch2,
1103  Label* gc_required);
1104  void AllocateAsciiSlicedString(Register result,
1105  Register scratch1,
1106  Register scratch2,
1107  Label* gc_required);
1108 
1109  // ---------------------------------------------------------------------------
1110  // Support functions.
1111 
1112  // Check if result is zero and op is negative.
1113  void NegativeZeroTest(Register result, Register op, Label* then_label);
1114 
1115  // Check if result is zero and op is negative in code using jump targets.
1116  void NegativeZeroTest(CodeGenerator* cgen,
1117  Register result,
1118  Register op,
1119  JumpTarget* then_target);
1120 
1121  // Check if result is zero and any of op1 and op2 are negative.
1122  // Register scratch is destroyed, and it must be different from op2.
1123  void NegativeZeroTest(Register result, Register op1, Register op2,
1124  Register scratch, Label* then_label);
1125 
1126  // Try to get function prototype of a function and puts the value in
1127  // the result register. Checks that the function really is a
1128  // function and jumps to the miss label if the fast checks fail. The
1129  // function register will be untouched; the other register may be
1130  // clobbered.
1131  void TryGetFunctionPrototype(Register function,
1132  Register result,
1133  Label* miss,
1134  bool miss_on_bound_function = false);
1135 
1136  // Generates code for reporting that an illegal operation has
1137  // occurred.
1138  void IllegalOperation(int num_arguments);
1139 
1140  // Picks out an array index from the hash field.
1141  // Register use:
1142  // hash - holds the index's hash. Clobbered.
1143  // index - holds the overwritten index on exit.
1144  void IndexFromHash(Register hash, Register index);
1145 
1146  // Find the function context up the context chain.
1147  void LoadContext(Register dst, int context_chain_length);
1148 
1149  // Conditionally load the cached Array transitioned map of type
1150  // transitioned_kind from the native context if the map in register
1151  // map_in_out is the cached Array map in the native context of
1152  // expected_kind.
1154  ElementsKind expected_kind,
1155  ElementsKind transitioned_kind,
1156  Register map_in_out,
1157  Register scratch,
1158  Label* no_map_match);
1159 
1160  // Load the initial map for new Arrays from a JSFunction.
1161  void LoadInitialArrayMap(Register function_in,
1162  Register scratch,
1163  Register map_out,
1164  bool can_have_holes);
1165 
1166  // Load the global function with the given index.
1167  void LoadGlobalFunction(int index, Register function);
1168 
1169  // Load the initial map from the global function. The registers
1170  // function and map can be the same.
1171  void LoadGlobalFunctionInitialMap(Register function, Register map);
1172 
1173  // ---------------------------------------------------------------------------
1174  // Runtime calls
1175 
1176  // Call a code stub.
1177  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1178 
1179  // Tail call a code stub (jump).
1180  void TailCallStub(CodeStub* stub);
1181 
1182  // Return from a code stub after popping its arguments.
1183  void StubReturn(int argc);
1184 
1185  // Call a runtime routine.
1186  void CallRuntime(const Runtime::Function* f, int num_arguments);
1187 
1188  // Call a runtime function and save the value of XMM registers.
1190 
1191  // Convenience function: Same as above, but takes the fid instead.
1192  void CallRuntime(Runtime::FunctionId id, int num_arguments);
1193 
1194  // Convenience function: call an external reference.
1195  void CallExternalReference(const ExternalReference& ext,
1196  int num_arguments);
1197 
1198  // Tail call of a runtime routine (jump).
1199  // Like JumpToExternalReference, but also takes care of passing the number
1200  // of parameters.
1201  void TailCallExternalReference(const ExternalReference& ext,
1202  int num_arguments,
1203  int result_size);
1204 
1205  // Convenience function: tail call a runtime routine (jump).
1207  int num_arguments,
1208  int result_size);
1209 
1210  // Jump to a runtime routine.
1211  void JumpToExternalReference(const ExternalReference& ext, int result_size);
1212 
1213  // Prepares stack to put arguments (aligns and so on). WIN64 calling
1214  // convention requires to put the pointer to the return value slot into
1215  // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
1216  // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
1217  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1218  void PrepareCallApiFunction(int arg_stack_space);
1219 
1220  // Calls an API function. Allocates HandleScope, extracts returned value
1221  // from handle and propagates exceptions. Clobbers r14, r15, rbx and
1222  // caller-save registers. Restores context. On return removes
1223  // stack_space * kPointerSize (GCed).
1224  void CallApiFunctionAndReturn(Address function_address, int stack_space);
1225 
1226  // Before calling a C-function from generated code, align arguments on stack.
1227  // After aligning the frame, arguments must be stored in esp[0], esp[4],
1228  // etc., not pushed. The argument count assumes all arguments are word sized.
1229  // The number of slots reserved for arguments depends on platform. On Windows
1230  // stack slots are reserved for the arguments passed in registers. On other
1231  // platforms stack slots are only reserved for the arguments actually passed
1232  // on the stack.
1233  void PrepareCallCFunction(int num_arguments);
1234 
1235  // Calls a C function and cleans up the space for arguments allocated
1236  // by PrepareCallCFunction. The called function is not allowed to trigger a
1237  // garbage collection, since that might move the code and invalidate the
1238  // return address (unless this is somehow accounted for by the called
1239  // function).
1240  void CallCFunction(ExternalReference function, int num_arguments);
1241  void CallCFunction(Register function, int num_arguments);
1242 
1243  // Calculate the number of stack slots to reserve for arguments when calling a
1244  // C function.
1245  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1246 
1247  // ---------------------------------------------------------------------------
1248  // Utilities
1249 
1250  void Ret();
1251 
1252  // Return and drop arguments from stack, where the number of arguments
1253  // may be bigger than 2^16 - 1. Requires a scratch register.
1254  void Ret(int bytes_dropped, Register scratch);
1255 
1257  ASSERT(!code_object_.is_null());
1258  return code_object_;
1259  }
1260 
1261  // Copy length bytes from source to destination.
1262  // Uses scratch register internally (if you have a low-eight register
1263  // free, do use it, otherwise kScratchRegister will be used).
1264  // The min_length is a minimum limit on the value that length will have.
1265  // The algorithm has some special cases that might be omitted if the string
1266  // is known to always be long.
1267  void CopyBytes(Register destination,
1268  Register source,
1269  Register length,
1270  int min_length = 0,
1271  Register scratch = kScratchRegister);
1272 
1273  // Initialize fields with filler values. Fields starting at |start_offset|
1274  // not including end_offset are overwritten with the value in |filler|. At
1275  // the end the loop, |start_offset| takes the value of |end_offset|.
1276  void InitializeFieldsWithFiller(Register start_offset,
1277  Register end_offset,
1278  Register filler);
1279 
1280 
1281  // ---------------------------------------------------------------------------
1282  // StatsCounter support
1283 
1284  void SetCounter(StatsCounter* counter, int value);
1285  void IncrementCounter(StatsCounter* counter, int value);
1286  void DecrementCounter(StatsCounter* counter, int value);
1287 
1288 
1289  // ---------------------------------------------------------------------------
1290  // Debugging
1291 
1292  // Calls Abort(msg) if the condition cc is not satisfied.
1293  // Use --debug_code to enable.
1294  void Assert(Condition cc, const char* msg);
1295 
1296  void AssertFastElements(Register elements);
1297 
1298  // Like Assert(), but always enabled.
1299  void Check(Condition cc, const char* msg);
1300 
1301  // Print a message to stdout and abort execution.
1302  void Abort(const char* msg);
1303 
1304  // Check that the stack is aligned.
1305  void CheckStackAlignment();
1306 
1307  // Verify restrictions about code generated in stubs.
1308  void set_generating_stub(bool value) { generating_stub_ = value; }
1309  bool generating_stub() { return generating_stub_; }
1310  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1311  bool allow_stub_calls() { return allow_stub_calls_; }
1312  void set_has_frame(bool value) { has_frame_ = value; }
1313  bool has_frame() { return has_frame_; }
1314  inline bool AllowThisStubCall(CodeStub* stub);
1315 
1317  return SafepointRegisterStackIndex(reg.code());
1318  }
1319 
1320  // Activation support.
1321  void EnterFrame(StackFrame::Type type);
1322  void LeaveFrame(StackFrame::Type type);
1323 
1324  // Expects object in rax and returns map with validated enum cache
1325  // in rax. Assumes that any other register can be used as a scratch.
1326  void CheckEnumCache(Register null_value,
1327  Label* call_runtime);
1328 
1329  private:
1330  // Order general registers are pushed by Pushad.
1331  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1332  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1333  static const int kNumSafepointSavedRegisters = 11;
1334  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1335 
1336  bool generating_stub_;
1337  bool allow_stub_calls_;
1338  bool has_frame_;
1339  bool root_array_available_;
1340 
1341  // Returns a register holding the smi value. The register MUST NOT be
1342  // modified. It may be the "smi 1 constant" register.
1343  Register GetSmiConstant(Smi* value);
1344 
1345  intptr_t RootRegisterDelta(ExternalReference other);
1346 
1347  // Moves the smi value to the destination register.
1348  void LoadSmiConstant(Register dst, Smi* value);
1349 
1350  // This handle will be patched with the code object on installation.
1351  Handle<Object> code_object_;
1352 
1353  // Helper functions for generating invokes.
1354  void InvokePrologue(const ParameterCount& expected,
1355  const ParameterCount& actual,
1356  Handle<Code> code_constant,
1357  Register code_register,
1358  Label* done,
1359  bool* definitely_mismatches,
1360  InvokeFlag flag,
1361  Label::Distance near_jump = Label::kFar,
1362  const CallWrapper& call_wrapper = NullCallWrapper(),
1363  CallKind call_kind = CALL_AS_METHOD);
1364 
1365  void EnterExitFramePrologue(bool save_rax);
1366 
1367  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1368  // accessible via StackSpaceOperand.
1369  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1370 
1371  void LeaveExitFrameEpilogue();
1372 
1373  // Allocation support helpers.
1374  // Loads the top of new-space into the result register.
1375  // Otherwise the address of the new-space top is loaded into scratch (if
1376  // scratch is valid), and the new-space top is loaded into result.
1377  void LoadAllocationTopHelper(Register result,
1378  Register scratch,
1380  // Update allocation top with value in result_end register.
1381  // If scratch is valid, it contains the address of the allocation top.
1382  void UpdateAllocationTopHelper(Register result_end, Register scratch);
1383 
1384  // Helper for PopHandleScope. Allowed to perform a GC and returns
1385  // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
1386  // possibly returns a failure object indicating an allocation failure.
1387  Object* PopHandleScopeHelper(Register saved,
1388  Register scratch,
1389  bool gc_allowed);
1390 
1391  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1392  void InNewSpace(Register object,
1393  Register scratch,
1394  Condition cc,
1395  Label* branch,
1396  Label::Distance distance = Label::kFar);
1397 
1398  // Helper for finding the mark bits for an address. Afterwards, the
1399  // bitmap register points at the word with the mark bits and the mask
1400  // the position of the first bit. Uses rcx as scratch and leaves addr_reg
1401  // unchanged.
1402  inline void GetMarkBits(Register addr_reg,
1403  Register bitmap_reg,
1404  Register mask_reg);
1405 
1406  // Helper for throwing exceptions. Compute a handler address and jump to
1407  // it. See the implementation for register usage.
1408  void JumpToHandlerEntry();
1409 
1410  // Compute memory operands for safepoint stack slots.
1411  Operand SafepointRegisterSlot(Register reg);
1412  static int SafepointRegisterStackIndex(int reg_code) {
1413  return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1414  }
1415 
1416  // Needs access to SafepointRegisterStackIndex for optimized frame
1417  // traversal.
1418  friend class OptimizedFrame;
1419 };
1420 
1421 
1422 // The code patcher is used to patch (typically) small parts of code e.g. for
1423 // debugging and other types of instrumentation. When using the code patcher
1424 // the exact number of bytes specified must be emitted. Is not legal to emit
1425 // relocation information. If any of these constraints are violated it causes
1426 // an assertion.
1427 class CodePatcher {
1428  public:
1429  CodePatcher(byte* address, int size);
1430  virtual ~CodePatcher();
1431 
1432  // Macro assembler to emit code.
1433  MacroAssembler* masm() { return &masm_; }
1434 
1435  private:
1436  byte* address_; // The address of the code being patched.
1437  int size_; // Number of bytes of the expected patch size.
1438  MacroAssembler masm_; // Macro assembler used to generate the code.
1439 };
1440 
1441 
1442 // -----------------------------------------------------------------------------
1443 // Static helper functions.
1444 
1445 // Generate an Operand for loading a field from an object.
1446 inline Operand FieldOperand(Register object, int offset) {
1447  return Operand(object, offset - kHeapObjectTag);
1448 }
1449 
1450 
1451 // Generate an Operand for loading an indexed field from an object.
1452 inline Operand FieldOperand(Register object,
1453  Register index,
1454  ScaleFactor scale,
1455  int offset) {
1456  return Operand(object, index, scale, offset - kHeapObjectTag);
1457 }
1458 
1459 
1460 inline Operand ContextOperand(Register context, int index) {
1461  return Operand(context, Context::SlotOffset(index));
1462 }
1463 
1464 
1465 inline Operand GlobalObjectOperand() {
1467 }
1468 
1469 
1470 // Provides access to exit frame stack space (not GCed).
1471 inline Operand StackSpaceOperand(int index) {
1472 #ifdef _WIN64
1473  const int kShaddowSpace = 4;
1474  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1475 #else
1476  return Operand(rsp, index * kPointerSize);
1477 #endif
1478 }
1479 
1480 
1481 
1482 #ifdef GENERATED_CODE_COVERAGE
1483 extern void LogGeneratedCodeCoverage(const char* file_line);
1484 #define CODE_COVERAGE_STRINGIFY(x) #x
1485 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1486 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1487 #define ACCESS_MASM(masm) { \
1488  byte* x64_coverage_function = \
1489  reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
1490  masm->pushfd(); \
1491  masm->pushad(); \
1492  masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
1493  masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
1494  masm->pop(rax); \
1495  masm->popad(); \
1496  masm->popfd(); \
1497  } \
1498  masm->
1499 #else
1500 #define ACCESS_MASM(masm) masm->
1501 #endif
1502 
1503 } } // namespace v8::internal
1504 
1505 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_
byte * Address
Definition: globals.h:157
void CallRuntime(const Runtime::Function *f, int num_arguments)
void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, DoubleRegister temp_double_reg)
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch)
void Push(Handle< Object > handle)
void JumpIfSmiEqualsConstant(Register src, Smi *constant, Label *on_equals, Label::Distance near_jump=Label::kFar)
void ClampUint8(Register output_reg, Register input_reg)
Isolate * isolate() const
Definition: assembler.h:61
void JumpIfNotString(Register object, Register object_map, Label *not_string, Label::Distance near_jump=Label::kFar)
void Assert(Condition cond, const char *msg)
void SmiShiftArithmeticRight(Register dst, Register src1, Register src2)
void JumpIfNotValidSmiValue(Register src, Label *on_invalid, Label::Distance near_jump=Label::kFar)
static int SlotOffset(int index)
Definition: contexts.h:425
void LoadRootIndexed(Register destination, Register variable_offset, int fixed_offset)
int LoadAddressSize(ExternalReference source)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
const Register r3
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift)
void SafeMove(Register dst, Smi *src)
void SmiDiv(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void SmiOrIfSmis(Register dst, Register src1, Register src2, Label *on_not_smis, Label::Distance near_jump=Label::kFar)
void SmiOr(Register dst, Register src1, Register src2)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Condition CheckUInteger32ValidSmiValue(Register src)
void LeaveExitFrame(bool save_doubles, Register argument_count)
void LoadGlobalCell(Register dst, Handle< JSGlobalPropertyCell > cell)
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void AssertString(Register object)
static TypeFeedbackId None()
Definition: utils.h:999
void RecordWriteArray(Register array, Register value, Register index, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpToExternalReference(const ExternalReference &builtin)
void Cmp(Register dst, Handle< Object > source)
void LoadInstanceDescriptors(Register map, Register descriptors)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
const Register rsi
void addq(Register dst, Register src)
void SmiAdd(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void JumpIfSmi(Register value, Label *smi_label)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
bool AllowThisStubCall(CodeStub *stub)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void CheckSmiToIndicator(Register dst, Register src)
static const int kCallInstructionLength
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
SmiIndex SmiToIndex(Register dst, Register src, int shift)
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
static const int kShortCallInstructionLength
Condition CheckIsMinSmi(Register src)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void NumberOfOwnDescriptors(Register dst, Register map)
void SmiAndConstant(Register dst, Register src1, Smi *constant)
void SmiOrConstant(Register dst, Register src1, Smi *constant)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const Register r2
void Abort(const char *msg)
void SmiSub(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void SmiTryAddConstant(Register dst, Register src, Smi *constant, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
MemOperand ContextOperand(Register context, int index)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type, CompareMapMode mode=REQUIRE_EXACT_MAP)
void SmiMod(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
Condition CheckBothSmi(Register first, Register second)
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void PushHeapObject(Handle< HeapObject > object)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
static int SafepointRegisterStackIndex(Register reg)
static const int kNumRegisters
Definition: assembler-arm.h:73
void Move(Register dst, Smi *source)
const Register kRootRegister
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
uint8_t byte
Definition: globals.h:156
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
int CallSize(const Operand &target)
T ** location() const
Definition: handles.h:75
void SmiToInteger64(Register dst, Register src)
void EnumLength(Register dst, Register map)
void Load(Register destination, ExternalReference source)
void LeaveFrame(StackFrame::Type type)
void CheckFastElements(Register map, Register scratch, Label *fail)
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void Integer64PlusConstantToSmi(Register dst, Register src, int constant)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
const int kPointerSize
Definition: globals.h:220
void SmiMul(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void SmiShiftLogicalRight(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void CallCFunction(ExternalReference function, int num_arguments)
Condition IsObjectStringType(Register obj, Register type)
void SafePush(const Immediate &x)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Operand FieldOperand(Register object, int offset)
void SmiShiftLeftConstant(Register dst, Register src, int shift_value)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:4009
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void SmiXorConstant(Register dst, Register src1, Smi *constant)
Operand ExternalOperand(ExternalReference reference, Register scratch=kScratchRegister)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void SmiXor(Register dst, Register src1, Register src2)
const Register rsp
void LoadHeapObject(Register dst, Handle< HeapObject > object)
void SmiShiftLeft(Register dst, Register src1, Register src2)
void Throw(Register value)
Condition CheckInteger32ValidSmiValue(Register src)
void Move(Register dst, Handle< Object > value)
void shl(Register dst, uint8_t imm8)
Operand StackSpaceOperand(int index)
void EnterApiExitFrame(int argc)
void PrepareCallApiFunction(int argc)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
SmiIndex(Register index_register, ScaleFactor scale)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
const Register r0
void NegativeZeroTest(Register result, Register op, Label *then_label)
const int kRootRegisterBias
MacroAssembler(Isolate *isolate, void *buffer, int size)
void SmiCompareInteger32(const Operand &dst, Register src)
void LoadContext(Register dst, int context_chain_length)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static int CallSize(Register target, Condition cond=al)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Label *fail)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
void AssertFastElements(Register elements)
void LoadAddress(Register destination, ExternalReference source)
bool IsUnsafeInt(const int x)
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void SmiSubConstant(Register dst, Register src, Smi *constant)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void SmiAnd(Register dst, Register src1, Register src2)
void Drop(int count, Condition cond=al)
void Integer32ToSmiField(const Operand &dst, Register src)
void JumpIfUIntNotValidSmiValue(Register src, Label *on_invalid, Label::Distance near_jump=Label::kFar)
int CallSize(Address destination, RelocInfo::Mode rmode)
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void IllegalOperation(int num_arguments)
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CallApiFunctionAndReturn(ExternalReference function, int stack_space)
void AssertZeroExtended(Register reg)
void LoadObject(Register result, Handle< Object > object)
const Register r1
void SmiTest(Register src)
void SmiShiftArithmeticRightConstant(Register dst, Register src, int shift_value)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
bool is_null() const
Definition: handles.h:87
void ThrowUncatchable(Register value)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void AllocateInNewSpace(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void SmiCompare(Register smi1, Register smi2)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success, CompareMapMode mode=REQUIRE_EXACT_MAP)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
const Register kScratchRegister
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper, CallKind call_kind)
const int kSmiShiftSize
Definition: v8.h:4060
const int kSmiTagSize
Definition: v8.h:4015
void Store(ExternalReference destination, Register source)
void PositiveSmiDivPowerOfTwoToInteger32(Register dst, Register src, int power)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void SmiToInteger32(Register dst, Register src)
void UndoAllocationInNewSpace(Register object, Register scratch)
void SmiNot(Register dst, Register src)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
void shr(Register dst, uint8_t imm8)
void Call(Register target, Condition cond=al)
void Set(Register dst, const Immediate &x)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
void SmiShiftLogicalRightConstant(Register dst, Register src, int shift_value, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Condition CheckNonNegativeSmi(Register src)
void Check(Condition cond, const char *msg)
void SmiAddConstant(Register dst, Register src, Smi *constant)
void LoadInitialArrayMap(Register function_in, Register scratch, Register map_out, bool can_have_holes)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void AssertRootValue(Register src, Heap::RootListIndex root_value_index, const char *message)
Condition CheckEitherSmi(Register first, Register second, Register scratch=kScratchRegister)
void SmiNeg(Register dst, Register src, Label *on_smi_result, Label::Distance near_jump=Label::kFar)
const int kSmiConstantRegisterValue
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void AddSmiField(Register dst, const Operand &src)
void TestBit(const Operand &dst, int bit_index)
Condition CheckSmi(Register src)
void SelectNonSmi(Register dst, Register src1, Register src2, Label *on_not_smis, Label::Distance near_jump=Label::kFar)
void TailCallStub(CodeStub *stub, Condition cond=al)
CodePatcher(byte *address, int instructions)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void Integer32ToSmi(Register dst, Register src)
const Register kSmiConstantRegister
void CallStub(CodeStub *stub, Condition cond=al)
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
Condition CheckBothNonNegativeSmi(Register first, Register second)
int ArgumentStackSlotsForCFunctionCall(int num_arguments)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
int CallSize(Handle< Code > code_object)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void SetCallKind(Register dst, CallKind kind)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void JumpUnlessBothNonNegativeSmi(Register src1, Register src2, Label *on_not_both_smi, Label::Distance near_jump=Label::kFar)
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst, Register src, int power)
void Test(const Operand &dst, Smi *source)
void PushRoot(Heap::RootListIndex index)
void CmpObjectType(Register heap_object, InstanceType type, Register map)
void CmpInstanceType(Register map, InstanceType type)
void JumpUnlessNonNegativeSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
void EnterFrame(StackFrame::Type type)
void movq(const Operand &dst, Register src)
void PushAddress(ExternalReference source)
void CheckEnumCache(Register null_value, Label *call_runtime)
void AssertNumber(Register object)
void Move(const Operand &dst, Smi *source)
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923