v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-x64.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29 #define V8_X64_MACRO_ASSEMBLER_X64_H_
30 
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // Default scratch register used by MacroAssembler (and other code that needs
39 // a spare register). The register isn't callee save, and not used by the
40 // function calling convention.
41 const Register kScratchRegister = { 10 }; // r10.
42 const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
43 const Register kRootRegister = { 13 }; // r13 (callee save).
44 // Value of smi in kSmiConstantRegister.
46 // Actual value of root register is offset from the root array's start
47 // to take advantage of negitive 8-bit displacement values.
48 const int kRootRegisterBias = 128;
49 
50 // Convenience for platform-independent signatures.
51 typedef Operand MemOperand;
52 
55 
61 };
62 
64 
65 class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
66  public:
69  : EnumSet<SmiOperationConstraint, byte>(bits) { }
70 };
71 
72 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
73 
74 // Forward declaration.
75 class JumpTarget;
76 
77 struct SmiIndex {
79  : reg(index_register),
80  scale(scale) {}
83 };
84 
85 
86 // MacroAssembler implements a collection of frequently used macros.
87 class MacroAssembler: public Assembler {
88  public:
89  // The isolate parameter can be NULL if the macro assembler should
90  // not use isolate-dependent functionality. In this case, it's the
91  // responsibility of the caller to never invoke such function on the
92  // macro assembler.
93  MacroAssembler(Isolate* isolate, void* buffer, int size);
94 
95  // Prevent the use of the RootArray during the lifetime of this
96  // scope object.
97  class NoRootArrayScope BASE_EMBEDDED {
98  public:
99  explicit NoRootArrayScope(MacroAssembler* assembler)
100  : variable_(&assembler->root_array_available_),
101  old_value_(assembler->root_array_available_) {
102  assembler->root_array_available_ = false;
103  }
105  *variable_ = old_value_;
106  }
107  private:
108  bool* variable_;
109  bool old_value_;
110  };
111 
112  // Operand pointing to an external reference.
113  // May emit code to set up the scratch register. The operand is
114  // only guaranteed to be correct as long as the scratch register
115  // isn't changed.
116  // If the operand is used more than once, use a scratch register
117  // that is guaranteed not to be clobbered.
118  Operand ExternalOperand(ExternalReference reference,
119  Register scratch = kScratchRegister);
120  // Loads and stores the value of an external reference.
121  // Special case code for load and store to take advantage of
122  // load_rax/store_rax if possible/necessary.
123  // For other operations, just use:
124  // Operand operand = ExternalOperand(extref);
125  // operation(operand, ..);
126  void Load(Register destination, ExternalReference source);
127  void Store(ExternalReference destination, Register source);
128  // Loads the address of the external reference into the destination
129  // register.
130  void LoadAddress(Register destination, ExternalReference source);
131  // Returns the size of the code generated by LoadAddress.
132  // Used by CallSize(ExternalReference) to find the size of a call.
133  int LoadAddressSize(ExternalReference source);
134  // Pushes the address of the external reference onto the stack.
135  void PushAddress(ExternalReference source);
136 
137  // Operations on roots in the root-array.
138  void LoadRoot(Register destination, Heap::RootListIndex index);
139  void StoreRoot(Register source, Heap::RootListIndex index);
140  // Load a root value where the index (or part of it) is variable.
141  // The variable_offset register is added to the fixed_offset value
142  // to get the index into the root-array.
143  void LoadRootIndexed(Register destination,
144  Register variable_offset,
145  int fixed_offset);
146  void CompareRoot(Register with, Heap::RootListIndex index);
147  void CompareRoot(const Operand& with, Heap::RootListIndex index);
148  void PushRoot(Heap::RootListIndex index);
149 
150  // These functions do not arrange the registers in any particular order so
151  // they are not useful for calls that can cause a GC. The caller can
152  // exclude up to 3 registers that do not need to be saved and restored.
153  void PushCallerSaved(SaveFPRegsMode fp_mode,
154  Register exclusion1 = no_reg,
155  Register exclusion2 = no_reg,
156  Register exclusion3 = no_reg);
157  void PopCallerSaved(SaveFPRegsMode fp_mode,
158  Register exclusion1 = no_reg,
159  Register exclusion2 = no_reg,
160  Register exclusion3 = no_reg);
161 
162 // ---------------------------------------------------------------------------
163 // GC Support
164 
165 
167  kReturnAtEnd,
169  };
170 
171  // Record in the remembered set the fact that we have a pointer to new space
172  // at the address pointed to by the addr register. Only works if addr is not
173  // in new space.
174  void RememberedSetHelper(Register object, // Used for debug code.
175  Register addr,
176  Register scratch,
177  SaveFPRegsMode save_fp,
178  RememberedSetFinalAction and_then);
179 
180  void CheckPageFlag(Register object,
181  Register scratch,
182  int mask,
183  Condition cc,
184  Label* condition_met,
185  Label::Distance condition_met_distance = Label::kFar);
186 
188  Register scratch,
189  Label* if_deprecated);
190 
191  // Check if object is in new space. Jumps if the object is not in new space.
192  // The register scratch can be object itself, but scratch will be clobbered.
194  Register scratch,
195  Label* branch,
196  Label::Distance distance = Label::kFar) {
197  InNewSpace(object, scratch, not_equal, branch, distance);
198  }
199 
200  // Check if object is in new space. Jumps if the object is in new space.
201  // The register scratch can be object itself, but it will be clobbered.
203  Register scratch,
204  Label* branch,
205  Label::Distance distance = Label::kFar) {
206  InNewSpace(object, scratch, equal, branch, distance);
207  }
208 
209  // Check if an object has the black incremental marking color. Also uses rcx!
210  void JumpIfBlack(Register object,
211  Register scratch0,
212  Register scratch1,
213  Label* on_black,
214  Label::Distance on_black_distance = Label::kFar);
215 
216  // Detects conservatively whether an object is data-only, i.e. it does need to
217  // be scanned by the garbage collector.
218  void JumpIfDataObject(Register value,
219  Register scratch,
220  Label* not_data_object,
221  Label::Distance not_data_object_distance);
222 
223  // Checks the color of an object. If the object is already grey or black
224  // then we just fall through, since it is already live. If it is white and
225  // we can determine that it doesn't need to be scanned, then we just mark it
226  // black and fall through. For the rest we jump to the label so the
227  // incremental marker can fix its assumptions.
228  void EnsureNotWhite(Register object,
229  Register scratch1,
230  Register scratch2,
231  Label* object_is_white_and_not_data,
232  Label::Distance distance);
233 
234  // Notify the garbage collector that we wrote a pointer into an object.
235  // |object| is the object being stored into, |value| is the object being
236  // stored. value and scratch registers are clobbered by the operation.
237  // The offset is the offset from the start of the object, not the offset from
238  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
239  void RecordWriteField(
240  Register object,
241  int offset,
242  Register value,
243  Register scratch,
244  SaveFPRegsMode save_fp,
245  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
246  SmiCheck smi_check = INLINE_SMI_CHECK);
247 
248  // As above, but the offset has the tag presubtracted. For use with
249  // Operand(reg, off).
251  Register context,
252  int offset,
253  Register value,
254  Register scratch,
255  SaveFPRegsMode save_fp,
256  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
257  SmiCheck smi_check = INLINE_SMI_CHECK) {
258  RecordWriteField(context,
259  offset + kHeapObjectTag,
260  value,
261  scratch,
262  save_fp,
263  remembered_set_action,
264  smi_check);
265  }
266 
267  // Notify the garbage collector that we wrote a pointer into a fixed array.
268  // |array| is the array being stored into, |value| is the
269  // object being stored. |index| is the array index represented as a non-smi.
270  // All registers are clobbered by the operation RecordWriteArray
271  // filters out smis so it does not update the write barrier if the
272  // value is a smi.
273  void RecordWriteArray(
274  Register array,
275  Register value,
276  Register index,
277  SaveFPRegsMode save_fp,
278  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
279  SmiCheck smi_check = INLINE_SMI_CHECK);
280 
281  // For page containing |object| mark region covering |address|
282  // dirty. |object| is the object being stored into, |value| is the
283  // object being stored. The address and value registers are clobbered by the
284  // operation. RecordWrite filters out smis so it does not update
285  // the write barrier if the value is a smi.
286  void RecordWrite(
287  Register object,
288  Register address,
289  Register value,
290  SaveFPRegsMode save_fp,
291  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
292  SmiCheck smi_check = INLINE_SMI_CHECK);
293 
294 #ifdef ENABLE_DEBUGGER_SUPPORT
295  // ---------------------------------------------------------------------------
296  // Debugger Support
297 
298  void DebugBreak();
299 #endif
300 
301  // Generates function and stub prologue code.
302  void Prologue(PrologueFrameMode frame_mode);
303 
304  // Enter specific kind of exit frame; either in normal or
305  // debug mode. Expects the number of arguments in register rax and
306  // sets up the number of arguments in register rdi and the pointer
307  // to the first argument in register rsi.
308  //
309  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
310  // accessible via StackSpaceOperand.
311  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
312 
313  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
314  // memory (not GCed) on the stack accessible via StackSpaceOperand.
315  void EnterApiExitFrame(int arg_stack_space);
316 
317  // Leave the current exit frame. Expects/provides the return value in
318  // register rax:rdx (untouched) and the pointer to the first
319  // argument in register rsi.
320  void LeaveExitFrame(bool save_doubles = false);
321 
322  // Leave the current exit frame. Expects/provides the return value in
323  // register rax (untouched).
324  void LeaveApiExitFrame(bool restore_context);
325 
326  // Push and pop the registers that can hold pointers.
329  // Store the value in register src in the safepoint register stack
330  // slot for register dst.
331  void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
334 
336  ExternalReference roots_array_start =
337  ExternalReference::roots_array_start(isolate());
338  Move(kRootRegister, roots_array_start);
339  addp(kRootRegister, Immediate(kRootRegisterBias));
340  }
341 
342  // ---------------------------------------------------------------------------
343  // JavaScript invokes
344 
345  // Invoke the JavaScript function code by either calling or jumping.
346  void InvokeCode(Register code,
347  const ParameterCount& expected,
348  const ParameterCount& actual,
350  const CallWrapper& call_wrapper);
351 
352  // Invoke the JavaScript function in the given register. Changes the
353  // current context to the context in the function before invoking.
354  void InvokeFunction(Register function,
355  const ParameterCount& actual,
357  const CallWrapper& call_wrapper);
358 
359  void InvokeFunction(Register function,
360  const ParameterCount& expected,
361  const ParameterCount& actual,
363  const CallWrapper& call_wrapper);
364 
365  void InvokeFunction(Handle<JSFunction> function,
366  const ParameterCount& expected,
367  const ParameterCount& actual,
369  const CallWrapper& call_wrapper);
370 
371  // Invoke specified builtin JavaScript function. Adds an entry to
372  // the unresolved list if the name does not resolve.
375  const CallWrapper& call_wrapper = NullCallWrapper());
376 
377  // Store the function for the given builtin in the target register.
379 
380  // Store the code object for the given builtin in the target register.
382 
383 
384  // ---------------------------------------------------------------------------
385  // Smi tagging, untagging and operations on tagged smis.
386 
387  // Support for constant splitting.
388  bool IsUnsafeInt(const int32_t x);
389  void SafeMove(Register dst, Smi* src);
390  void SafePush(Smi* src);
391 
395  }
396 
397  // Conversions between tagged smi values and non-tagged integer values.
398 
399  // Tag an integer value. The result must be known to be a valid smi value.
400  // Only uses the low 32 bits of the src register. Sets the N and Z flags
401  // based on the value of the resulting smi.
402  void Integer32ToSmi(Register dst, Register src);
403 
404  // Stores an integer32 value into a memory field that already holds a smi.
405  void Integer32ToSmiField(const Operand& dst, Register src);
406 
407  // Adds constant to src and tags the result as a smi.
408  // Result must be a valid smi.
409  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
410 
411  // Convert smi to 32-bit integer. I.e., not sign extended into
412  // high 32 bits of destination.
413  void SmiToInteger32(Register dst, Register src);
414  void SmiToInteger32(Register dst, const Operand& src);
415 
416  // Convert smi to 64-bit integer (sign extended if necessary).
417  void SmiToInteger64(Register dst, Register src);
418  void SmiToInteger64(Register dst, const Operand& src);
419 
420  // Multiply a positive smi's integer value by a power of two.
421  // Provides result as 64-bit integer value.
423  Register src,
424  int power);
425 
426  // Divide a positive smi's integer value by a power of two.
427  // Provides result as 32-bit integer value.
429  Register src,
430  int power);
431 
432  // Perform the logical or of two smi values and return a smi value.
433  // If either argument is not a smi, jump to on_not_smis and retain
434  // the original values of source registers. The destination register
435  // may be changed if it's not one of the source registers.
436  void SmiOrIfSmis(Register dst,
437  Register src1,
438  Register src2,
439  Label* on_not_smis,
440  Label::Distance near_jump = Label::kFar);
441 
442 
443  // Simple comparison of smis. Both sides must be known smis to use these,
444  // otherwise use Cmp.
445  void SmiCompare(Register smi1, Register smi2);
446  void SmiCompare(Register dst, Smi* src);
447  void SmiCompare(Register dst, const Operand& src);
448  void SmiCompare(const Operand& dst, Register src);
449  void SmiCompare(const Operand& dst, Smi* src);
450  // Compare the int32 in src register to the value of the smi stored at dst.
451  void SmiCompareInteger32(const Operand& dst, Register src);
452  // Sets sign and zero flags depending on value of smi in register.
453  void SmiTest(Register src);
454 
455  // Functions performing a check on a known or potential smi. Returns
456  // a condition that is satisfied if the check is successful.
457 
458  // Is the value a tagged smi.
460  Condition CheckSmi(const Operand& src);
461 
462  // Is the value a non-negative tagged smi.
464 
465  // Are both values tagged smis.
466  Condition CheckBothSmi(Register first, Register second);
467 
468  // Are both values non-negative tagged smis.
470 
471  // Are either value a tagged smi.
473  Register second,
474  Register scratch = kScratchRegister);
475 
476  // Is the value the minimum smi value (since we are using
477  // two's complement numbers, negating the value is known to yield
478  // a non-smi value).
480 
481  // Checks whether an 32-bit integer value is a valid for conversion
482  // to a smi.
484 
485  // Checks whether an 32-bit unsigned integer value is a valid for
486  // conversion to a smi.
488 
489  // Check whether src is a Smi, and set dst to zero if it is a smi,
490  // and to one if it isn't.
491  void CheckSmiToIndicator(Register dst, Register src);
492  void CheckSmiToIndicator(Register dst, const Operand& src);
493 
494  // Test-and-jump functions. Typically combines a check function
495  // above with a conditional jump.
496 
497  // Jump if the value cannot be represented by a smi.
498  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
499  Label::Distance near_jump = Label::kFar);
500 
501  // Jump if the unsigned integer value cannot be represented by a smi.
502  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
503  Label::Distance near_jump = Label::kFar);
504 
505  // Jump to label if the value is a tagged smi.
506  void JumpIfSmi(Register src,
507  Label* on_smi,
508  Label::Distance near_jump = Label::kFar);
509 
510  // Jump to label if the value is not a tagged smi.
511  void JumpIfNotSmi(Register src,
512  Label* on_not_smi,
513  Label::Distance near_jump = Label::kFar);
514 
515  // Jump to label if the value is not a non-negative tagged smi.
517  Label* on_not_smi,
518  Label::Distance near_jump = Label::kFar);
519 
520  // Jump to label if the value, which must be a tagged smi, has value equal
521  // to the constant.
523  Smi* constant,
524  Label* on_equals,
525  Label::Distance near_jump = Label::kFar);
526 
527  // Jump if either or both register are not smi values.
528  void JumpIfNotBothSmi(Register src1,
529  Register src2,
530  Label* on_not_both_smi,
531  Label::Distance near_jump = Label::kFar);
532 
533  // Jump if either or both register are not non-negative smi values.
535  Label* on_not_both_smi,
536  Label::Distance near_jump = Label::kFar);
537 
538  // Operations on tagged smi values.
539 
540  // Smis represent a subset of integers. The subset is always equivalent to
541  // a two's complement interpretation of a fixed number of bits.
542 
543  // Add an integer constant to a tagged smi, giving a tagged smi as result.
544  // No overflow testing on the result is done.
545  void SmiAddConstant(Register dst, Register src, Smi* constant);
546 
547  // Add an integer constant to a tagged smi, giving a tagged smi as result.
548  // No overflow testing on the result is done.
549  void SmiAddConstant(const Operand& dst, Smi* constant);
550 
551  // Add an integer constant to a tagged smi, giving a tagged smi as result,
552  // or jumping to a label if the result cannot be represented by a smi.
553  void SmiAddConstant(Register dst,
554  Register src,
555  Smi* constant,
557  Label* bailout_label,
558  Label::Distance near_jump = Label::kFar);
559 
560  // Subtract an integer constant from a tagged smi, giving a tagged smi as
561  // result. No testing on the result is done. Sets the N and Z flags
562  // based on the value of the resulting integer.
563  void SmiSubConstant(Register dst, Register src, Smi* constant);
564 
565  // Subtract an integer constant from a tagged smi, giving a tagged smi as
566  // result, or jumping to a label if the result cannot be represented by a smi.
567  void SmiSubConstant(Register dst,
568  Register src,
569  Smi* constant,
571  Label* bailout_label,
572  Label::Distance near_jump = Label::kFar);
573 
574  // Negating a smi can give a negative zero or too large positive value.
575  // NOTICE: This operation jumps on success, not failure!
576  void SmiNeg(Register dst,
577  Register src,
578  Label* on_smi_result,
579  Label::Distance near_jump = Label::kFar);
580 
581  // Adds smi values and return the result as a smi.
582  // If dst is src1, then src1 will be destroyed if the operation is
583  // successful, otherwise kept intact.
584  void SmiAdd(Register dst,
585  Register src1,
586  Register src2,
587  Label* on_not_smi_result,
588  Label::Distance near_jump = Label::kFar);
589  void SmiAdd(Register dst,
590  Register src1,
591  const Operand& src2,
592  Label* on_not_smi_result,
593  Label::Distance near_jump = Label::kFar);
594 
595  void SmiAdd(Register dst,
596  Register src1,
597  Register src2);
598 
599  // Subtracts smi values and return the result as a smi.
600  // If dst is src1, then src1 will be destroyed if the operation is
601  // successful, otherwise kept intact.
602  void SmiSub(Register dst,
603  Register src1,
604  Register src2,
605  Label* on_not_smi_result,
606  Label::Distance near_jump = Label::kFar);
607  void SmiSub(Register dst,
608  Register src1,
609  const Operand& src2,
610  Label* on_not_smi_result,
611  Label::Distance near_jump = Label::kFar);
612 
613  void SmiSub(Register dst,
614  Register src1,
615  Register src2);
616 
617  void SmiSub(Register dst,
618  Register src1,
619  const Operand& src2);
620 
621  // Multiplies smi values and return the result as a smi,
622  // if possible.
623  // If dst is src1, then src1 will be destroyed, even if
624  // the operation is unsuccessful.
625  void SmiMul(Register dst,
626  Register src1,
627  Register src2,
628  Label* on_not_smi_result,
629  Label::Distance near_jump = Label::kFar);
630 
631  // Divides one smi by another and returns the quotient.
632  // Clobbers rax and rdx registers.
633  void SmiDiv(Register dst,
634  Register src1,
635  Register src2,
636  Label* on_not_smi_result,
637  Label::Distance near_jump = Label::kFar);
638 
639  // Divides one smi by another and returns the remainder.
640  // Clobbers rax and rdx registers.
641  void SmiMod(Register dst,
642  Register src1,
643  Register src2,
644  Label* on_not_smi_result,
645  Label::Distance near_jump = Label::kFar);
646 
647  // Bitwise operations.
648  void SmiNot(Register dst, Register src);
649  void SmiAnd(Register dst, Register src1, Register src2);
650  void SmiOr(Register dst, Register src1, Register src2);
651  void SmiXor(Register dst, Register src1, Register src2);
652  void SmiAndConstant(Register dst, Register src1, Smi* constant);
653  void SmiOrConstant(Register dst, Register src1, Smi* constant);
654  void SmiXorConstant(Register dst, Register src1, Smi* constant);
655 
657  Register src,
658  int shift_value);
660  Register src,
661  int shift_value,
662  Label* on_not_smi_result,
663  Label::Distance near_jump = Label::kFar);
665  Register src,
666  int shift_value);
667 
668  // Shifts a smi value to the left, and returns the result if that is a smi.
669  // Uses and clobbers rcx, so dst may not be rcx.
670  void SmiShiftLeft(Register dst,
671  Register src1,
672  Register src2);
673  // Shifts a smi value to the right, shifting in zero bits at the top, and
674  // returns the unsigned intepretation of the result if that is a smi.
675  // Uses and clobbers rcx, so dst may not be rcx.
677  Register src1,
678  Register src2,
679  Label* on_not_smi_result,
680  Label::Distance near_jump = Label::kFar);
681  // Shifts a smi value to the right, sign extending the top, and
682  // returns the signed intepretation of the result. That will always
683  // be a valid smi value, since it's numerically smaller than the
684  // original.
685  // Uses and clobbers rcx, so dst may not be rcx.
687  Register src1,
688  Register src2);
689 
690  // Specialized operations
691 
692  // Select the non-smi register of two registers where exactly one is a
693  // smi. If neither are smis, jump to the failure label.
694  void SelectNonSmi(Register dst,
695  Register src1,
696  Register src2,
697  Label* on_not_smis,
698  Label::Distance near_jump = Label::kFar);
699 
700  // Converts, if necessary, a smi to a combination of number and
701  // multiplier to be used as a scaled index.
702  // The src register contains a *positive* smi value. The shift is the
703  // power of two to multiply the index value by (e.g.
704  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
705  // The returned index register may be either src or dst, depending
706  // on what is most efficient. If src and dst are different registers,
707  // src is always unchanged.
708  SmiIndex SmiToIndex(Register dst, Register src, int shift);
709 
710  // Converts a positive smi to a negative index.
711  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
712 
713  // Add the value of a smi in memory to an int32 register.
714  // Sets flags as a normal add.
715  void AddSmiField(Register dst, const Operand& src);
716 
717  // Basic Smi operations.
718  void Move(Register dst, Smi* source) {
719  LoadSmiConstant(dst, source);
720  }
721 
722  void Move(const Operand& dst, Smi* source) {
723  Register constant = GetSmiConstant(source);
724  movp(dst, constant);
725  }
726 
727  void Push(Smi* smi);
728 
729  // Save away a 64-bit integer on the stack as two 32-bit integers
730  // masquerading as smis so that the garbage collector skips visiting them.
732  // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
733  // smis on the top of stack.
735 
736  void Test(const Operand& dst, Smi* source);
737 
738 
739  // ---------------------------------------------------------------------------
740  // String macros.
741 
742  // Generate code to do a lookup in the number string cache. If the number in
743  // the register object is found in the cache the generated code falls through
744  // with the result in the result register. The object and the result register
745  // can be the same. If the number is not found in the cache the code jumps to
746  // the label not_found with only the content of register object unchanged.
747  void LookupNumberStringCache(Register object,
748  Register result,
749  Register scratch1,
750  Register scratch2,
751  Label* not_found);
752 
753  // If object is a string, its map is loaded into object_map.
754  void JumpIfNotString(Register object,
755  Register object_map,
756  Label* not_string,
757  Label::Distance near_jump = Label::kFar);
758 
759 
761  Register first_object,
762  Register second_object,
763  Register scratch1,
764  Register scratch2,
765  Label* on_not_both_flat_ascii,
766  Label::Distance near_jump = Label::kFar);
767 
768  // Check whether the instance type represents a flat ASCII string. Jump to the
769  // label if not. If the instance type can be scratched specify same register
770  // for both instance type and scratch.
772  Register instance_type,
773  Register scratch,
774  Label*on_not_flat_ascii_string,
775  Label::Distance near_jump = Label::kFar);
776 
778  Register first_object_instance_type,
779  Register second_object_instance_type,
780  Register scratch1,
781  Register scratch2,
782  Label* on_fail,
783  Label::Distance near_jump = Label::kFar);
784 
786  Register index,
787  Register value,
788  uint32_t encoding_mask);
789 
790  // Checks if the given register or operand is a unique name
791  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
792  Label::Distance distance = Label::kFar);
793  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
794  Label::Distance distance = Label::kFar);
795 
796  // ---------------------------------------------------------------------------
797  // Macro instructions.
798 
799  // Load/store with specific representation.
800  void Load(Register dst, const Operand& src, Representation r);
801  void Store(const Operand& dst, Register src, Representation r);
802 
803  // Load a register with a long value as efficiently as possible.
804  void Set(Register dst, int64_t x);
805  void Set(const Operand& dst, intptr_t x);
806 
807  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
808  // hinders register renaming and makes dependence chains longer. So we use
809  // xorps to clear the dst register before cvtsi2sd to solve this issue.
810  void Cvtlsi2sd(XMMRegister dst, Register src);
811  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
812 
813  // Move if the registers are not identical.
814  void Move(Register target, Register source);
815 
816  // Bit-field support.
817  void TestBit(const Operand& dst, int bit_index);
818 
819  // Handle support
820  void Move(Register dst, Handle<Object> source);
821  void Move(const Operand& dst, Handle<Object> source);
822  void Cmp(Register dst, Handle<Object> source);
823  void Cmp(const Operand& dst, Handle<Object> source);
824  void Cmp(Register dst, Smi* src);
825  void Cmp(const Operand& dst, Smi* src);
826  void Push(Handle<Object> source);
827 
828  // Load a heap object and handle the case of new-space objects by
829  // indirecting via a global cell.
830  void MoveHeapObject(Register result, Handle<Object> object);
831 
832  // Load a global cell into a register.
833  void LoadGlobalCell(Register dst, Handle<Cell> cell);
834 
835  // Emit code to discard a non-negative number of pointer-sized elements
836  // from the stack, clobbering only the rsp register.
837  void Drop(int stack_elements);
838 
839  void Call(Label* target) { call(target); }
840  void Push(Register src);
841  void Push(const Operand& src);
842  void Push(Immediate value);
843  void PushImm32(int32_t imm32);
844  void Pop(Register dst);
845  void Pop(const Operand& dst);
847  void PopReturnAddressTo(Register dst) { popq(dst); }
848  void Move(Register dst, ExternalReference ext) {
849  movp(dst, reinterpret_cast<void*>(ext.address()),
850  RelocInfo::EXTERNAL_REFERENCE);
851  }
852 
853  // Loads a pointer into a register with a relocation mode.
854  void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
855  // This method must not be used with heap object references. The stored
856  // address is not GC safe. Use the handle version instead.
857  ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
858  movp(dst, ptr, rmode);
859  }
860 
861  void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
862  AllowDeferredHandleDereference using_raw_address;
863  ASSERT(!RelocInfo::IsNone(rmode));
864  ASSERT(value->IsHeapObject());
865  ASSERT(!isolate()->heap()->InNewSpace(*value));
866  movp(dst, reinterpret_cast<void*>(value.location()), rmode);
867  }
868 
869  // Control Flow
870  void Jump(Address destination, RelocInfo::Mode rmode);
871  void Jump(ExternalReference ext);
872  void Jump(const Operand& op);
873  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
874 
875  void Call(Address destination, RelocInfo::Mode rmode);
876  void Call(ExternalReference ext);
877  void Call(const Operand& op);
878  void Call(Handle<Code> code_object,
879  RelocInfo::Mode rmode,
881 
882  // The size of the code generated for different call instructions.
883  int CallSize(Address destination) {
884  return kCallSequenceLength;
885  }
886  int CallSize(ExternalReference ext);
887  int CallSize(Handle<Code> code_object) {
888  // Code calls use 32-bit relative addressing.
890  }
891  int CallSize(Register target) {
892  // Opcode: REX_opt FF /2 m64
893  return (target.high_bit() != 0) ? 3 : 2;
894  }
895  int CallSize(const Operand& target) {
896  // Opcode: REX_opt FF /2 m64
897  return (target.requires_rex() ? 2 : 1) + target.operand_size();
898  }
899 
900  // Emit call to the code we are currently generating.
901  void CallSelf() {
902  Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
903  Call(self, RelocInfo::CODE_TARGET);
904  }
905 
906  // Non-x64 instructions.
907  // Push/pop all general purpose registers.
908  // Does not push rsp/rbp nor any of the assembler's special purpose registers
909  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
910  void Pushad();
911  void Popad();
912  // Sets the stack as after performing Popad, without actually loading the
913  // registers.
914  void Dropad();
915 
916  // Compare object type for heap object.
917  // Always use unsigned comparisons: above and below, not less and greater.
918  // Incoming register is heap_object and outgoing register is map.
919  // They may be the same register, and may be kScratchRegister.
920  void CmpObjectType(Register heap_object, InstanceType type, Register map);
921 
922  // Compare instance type for map.
923  // Always use unsigned comparisons: above and below, not less and greater.
925 
926  // Check if a map for a JSObject indicates that the object has fast elements.
927  // Jump to the specified label if it does not.
929  Label* fail,
930  Label::Distance distance = Label::kFar);
931 
932  // Check if a map for a JSObject indicates that the object can have both smi
933  // and HeapObject elements. Jump to the specified label if it does not.
935  Label* fail,
936  Label::Distance distance = Label::kFar);
937 
938  // Check if a map for a JSObject indicates that the object has fast smi only
939  // elements. Jump to the specified label if it does not.
941  Label* fail,
942  Label::Distance distance = Label::kFar);
943 
944  // Check to see if maybe_number can be stored as a double in
945  // FastDoubleElements. If it can, store it at the index specified by index in
946  // the FastDoubleElements array elements, otherwise jump to fail. Note that
947  // index must not be smi-tagged.
948  void StoreNumberToDoubleElements(Register maybe_number,
949  Register elements,
950  Register index,
951  XMMRegister xmm_scratch,
952  Label* fail,
953  int elements_offset = 0);
954 
955  // Compare an object's map with the specified map.
957 
958  // Check if the map of an object is equal to a specified map and branch to
959  // label if not. Skip the smi check if not required (object is known to be a
960  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
961  // against maps that are ElementsKind transition maps of the specified map.
962  void CheckMap(Register obj,
964  Label* fail,
965  SmiCheckType smi_check_type);
966 
967  // Check if the map of an object is equal to a specified map and branch to a
968  // specified target if equal. Skip the smi check if not required (object is
969  // known to be a heap object)
970  void DispatchMap(Register obj,
971  Register unused,
973  Handle<Code> success,
974  SmiCheckType smi_check_type);
975 
976  // Check if the object in register heap_object is a string. Afterwards the
977  // register map contains the object map and the register instance_type
978  // contains the instance_type. The registers map and instance_type can be the
979  // same in which case it contains the instance type afterwards. Either of the
980  // registers map and instance_type can be the same as heap_object.
982  Register map,
983  Register instance_type);
984 
985  // Check if the object in register heap_object is a name. Afterwards the
986  // register map contains the object map and the register instance_type
987  // contains the instance_type. The registers map and instance_type can be the
988  // same in which case it contains the instance type afterwards. Either of the
989  // registers map and instance_type can be the same as heap_object.
990  Condition IsObjectNameType(Register heap_object,
991  Register map,
992  Register instance_type);
993 
994  // FCmp compares and pops the two values on top of the FPU stack.
995  // The flag results are similar to integer cmp, but requires unsigned
996  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
997  void FCmp();
998 
999  void ClampUint8(Register reg);
1000 
1001  void ClampDoubleToUint8(XMMRegister input_reg,
1002  XMMRegister temp_xmm_reg,
1003  Register result_reg);
1004 
1005  void SlowTruncateToI(Register result_reg, Register input_reg,
1006  int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1007 
1008  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1009  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1010 
1011  void DoubleToI(Register result_reg, XMMRegister input_reg,
1012  XMMRegister scratch, MinusZeroMode minus_zero_mode,
1013  Label* conversion_failed, Label::Distance dst = Label::kFar);
1014 
1015  void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
1016  MinusZeroMode minus_zero_mode, Label* lost_precision,
1017  Label::Distance dst = Label::kFar);
1018 
1019  void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
1020 
1021  void LoadInstanceDescriptors(Register map, Register descriptors);
1022  void EnumLength(Register dst, Register map);
1024 
1025  template<typename Field>
1026  void DecodeField(Register reg) {
1027  static const int shift = Field::kShift + kSmiShift;
1028  static const int mask = Field::kMask >> Field::kShift;
1029  shr(reg, Immediate(shift));
1030  andp(reg, Immediate(mask));
1031  shl(reg, Immediate(kSmiShift));
1032  }
1033 
1034  // Abort execution if argument is not a number, enabled via --debug-code.
1035  void AssertNumber(Register object);
1036 
1037  // Abort execution if argument is a smi, enabled via --debug-code.
1038  void AssertNotSmi(Register object);
1039 
1040  // Abort execution if argument is not a smi, enabled via --debug-code.
1041  void AssertSmi(Register object);
1042  void AssertSmi(const Operand& object);
1043 
1044  // Abort execution if a 64 bit register containing a 32 bit payload does not
1045  // have zeros in the top 32 bits, enabled via --debug-code.
1046  void AssertZeroExtended(Register reg);
1047 
1048  // Abort execution if argument is not a string, enabled via --debug-code.
1049  void AssertString(Register object);
1050 
1051  // Abort execution if argument is not a name, enabled via --debug-code.
1052  void AssertName(Register object);
1053 
1054  // Abort execution if argument is not undefined or an AllocationSite, enabled
1055  // via --debug-code.
1057 
1058  // Abort execution if argument is not the root value with the given index,
1059  // enabled via --debug-code.
1060  void AssertRootValue(Register src,
1061  Heap::RootListIndex root_value_index,
1062  BailoutReason reason);
1063 
1064  // ---------------------------------------------------------------------------
1065  // Exception handling
1066 
1067  // Push a new try handler and link it into try handler chain.
1068  void PushTryHandler(StackHandler::Kind kind, int handler_index);
1069 
1070  // Unlink the stack handler on top of the stack from the try handler chain.
1071  void PopTryHandler();
1072 
1073  // Activate the top handler in the try hander chain and pass the
1074  // thrown value.
1075  void Throw(Register value);
1076 
1077  // Propagate an uncatchable exception out of the current JS stack.
1078  void ThrowUncatchable(Register value);
1079 
1080  // Throw a message string as an exception.
1081  void Throw(BailoutReason reason);
1082 
1083  // Throw a message string as an exception if a condition is not true.
1084  void ThrowIf(Condition cc, BailoutReason reason);
1085 
1086  // ---------------------------------------------------------------------------
1087  // Inline caching support
1088 
1089  // Generate code for checking access rights - used for security checks
1090  // on access to global objects across environments. The holder register
1091  // is left untouched, but the scratch register and kScratchRegister,
1092  // which must be different, are clobbered.
1093  void CheckAccessGlobalProxy(Register holder_reg,
1094  Register scratch,
1095  Label* miss);
1096 
1097  void GetNumberHash(Register r0, Register scratch);
1098 
1099  void LoadFromNumberDictionary(Label* miss,
1100  Register elements,
1101  Register key,
1102  Register r0,
1103  Register r1,
1104  Register r2,
1105  Register result);
1106 
1107 
1108  // ---------------------------------------------------------------------------
1109  // Allocation support
1110 
1111  // Allocate an object in new space or old pointer space. If the given space
1112  // is exhausted control continues at the gc_required label. The allocated
1113  // object is returned in result and end of the new object is returned in
1114  // result_end. The register scratch can be passed as no_reg in which case
1115  // an additional object reference will be added to the reloc info. The
1116  // returned pointers in result and result_end have not yet been tagged as
1117  // heap objects. If result_contains_top_on_entry is true the content of
1118  // result is known to be the allocation top on entry (could be result_end
1119  // from a previous call). If result_contains_top_on_entry is true scratch
1120  // should be no_reg as it is never used.
1121  void Allocate(int object_size,
1122  Register result,
1123  Register result_end,
1124  Register scratch,
1125  Label* gc_required,
1127 
1128  void Allocate(int header_size,
1129  ScaleFactor element_size,
1130  Register element_count,
1131  Register result,
1132  Register result_end,
1133  Register scratch,
1134  Label* gc_required,
1136 
1137  void Allocate(Register object_size,
1138  Register result,
1139  Register result_end,
1140  Register scratch,
1141  Label* gc_required,
1143 
1144  // Undo allocation in new space. The object passed and objects allocated after
1145  // it will no longer be allocated. Make sure that no pointers are left to the
1146  // object(s) no longer allocated as they would be invalid when allocation is
1147  // un-done.
1148  void UndoAllocationInNewSpace(Register object);
1149 
1150  // Allocate a heap number in new space with undefined value. Returns
1151  // tagged pointer in result register, or jumps to gc_required if new
1152  // space is full.
1153  void AllocateHeapNumber(Register result,
1154  Register scratch,
1155  Label* gc_required);
1156 
1157  // Allocate a sequential string. All the header fields of the string object
1158  // are initialized.
1159  void AllocateTwoByteString(Register result,
1160  Register length,
1161  Register scratch1,
1162  Register scratch2,
1163  Register scratch3,
1164  Label* gc_required);
1165  void AllocateAsciiString(Register result,
1166  Register length,
1167  Register scratch1,
1168  Register scratch2,
1169  Register scratch3,
1170  Label* gc_required);
1171 
1172  // Allocate a raw cons string object. Only the map field of the result is
1173  // initialized.
1174  void AllocateTwoByteConsString(Register result,
1175  Register scratch1,
1176  Register scratch2,
1177  Label* gc_required);
1178  void AllocateAsciiConsString(Register result,
1179  Register scratch1,
1180  Register scratch2,
1181  Label* gc_required);
1182 
1183  // Allocate a raw sliced string object. Only the map field of the result is
1184  // initialized.
1186  Register scratch1,
1187  Register scratch2,
1188  Label* gc_required);
1189  void AllocateAsciiSlicedString(Register result,
1190  Register scratch1,
1191  Register scratch2,
1192  Label* gc_required);
1193 
1194  // ---------------------------------------------------------------------------
1195  // Support functions.
1196 
1197  // Check if result is zero and op is negative.
1198  void NegativeZeroTest(Register result, Register op, Label* then_label);
1199 
1200  // Check if result is zero and op is negative in code using jump targets.
1201  void NegativeZeroTest(CodeGenerator* cgen,
1202  Register result,
1203  Register op,
1204  JumpTarget* then_target);
1205 
1206  // Check if result is zero and any of op1 and op2 are negative.
1207  // Register scratch is destroyed, and it must be different from op2.
1208  void NegativeZeroTest(Register result, Register op1, Register op2,
1209  Register scratch, Label* then_label);
1210 
1211  // Try to get function prototype of a function and puts the value in
1212  // the result register. Checks that the function really is a
1213  // function and jumps to the miss label if the fast checks fail. The
1214  // function register will be untouched; the other register may be
1215  // clobbered.
1216  void TryGetFunctionPrototype(Register function,
1217  Register result,
1218  Label* miss,
1219  bool miss_on_bound_function = false);
1220 
1221  // Generates code for reporting that an illegal operation has
1222  // occurred.
1223  void IllegalOperation(int num_arguments);
1224 
1225  // Picks out an array index from the hash field.
1226  // Register use:
1227  // hash - holds the index's hash. Clobbered.
1228  // index - holds the overwritten index on exit.
1229  void IndexFromHash(Register hash, Register index);
1230 
1231  // Find the function context up the context chain.
1232  void LoadContext(Register dst, int context_chain_length);
1233 
1234  // Conditionally load the cached Array transitioned map of type
1235  // transitioned_kind from the native context if the map in register
1236  // map_in_out is the cached Array map in the native context of
1237  // expected_kind.
1239  ElementsKind expected_kind,
1240  ElementsKind transitioned_kind,
1241  Register map_in_out,
1242  Register scratch,
1243  Label* no_map_match);
1244 
1245  // Load the global function with the given index.
1246  void LoadGlobalFunction(int index, Register function);
1247 
1248  // Load the initial map from the global function. The registers
1249  // function and map can be the same.
1251 
1252  // ---------------------------------------------------------------------------
1253  // Runtime calls
1254 
1255  // Call a code stub.
1256  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1257 
1258  // Tail call a code stub (jump).
1259  void TailCallStub(CodeStub* stub);
1260 
1261  // Return from a code stub after popping its arguments.
1262  void StubReturn(int argc);
1263 
1264  // Call a runtime routine.
1265  void CallRuntime(const Runtime::Function* f,
1266  int num_arguments,
1267  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1268 
1269  // Call a runtime function and save the value of XMM registers.
1271  const Runtime::Function* function = Runtime::FunctionForId(id);
1272  CallRuntime(function, function->nargs, kSaveFPRegs);
1273  }
1274 
1275  // Convenience function: Same as above, but takes the fid instead.
1277  int num_arguments,
1278  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1279  CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1280  }
1281 
1282  // Convenience function: call an external reference.
1283  void CallExternalReference(const ExternalReference& ext,
1284  int num_arguments);
1285 
1286  // Tail call of a runtime routine (jump).
1287  // Like JumpToExternalReference, but also takes care of passing the number
1288  // of parameters.
1289  void TailCallExternalReference(const ExternalReference& ext,
1290  int num_arguments,
1291  int result_size);
1292 
1293  // Convenience function: tail call a runtime routine (jump).
1295  int num_arguments,
1296  int result_size);
1297 
1298  // Jump to a runtime routine.
1299  void JumpToExternalReference(const ExternalReference& ext, int result_size);
1300 
1301  // Prepares stack to put arguments (aligns and so on). WIN64 calling
1302  // convention requires to put the pointer to the return value slot into
1303  // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
1304  // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
1305  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1306  void PrepareCallApiFunction(int arg_stack_space);
1307 
1308  // Calls an API function. Allocates HandleScope, extracts returned value
1309  // from handle and propagates exceptions. Clobbers r14, r15, rbx and
1310  // caller-save registers. Restores context. On return removes
1311  // stack_space * kPointerSize (GCed).
1312  void CallApiFunctionAndReturn(Register function_address,
1313  Address thunk_address,
1314  Register thunk_last_arg,
1315  int stack_space,
1316  Operand return_value_operand,
1317  Operand* context_restore_operand);
1318 
1319  // Before calling a C-function from generated code, align arguments on stack.
1320  // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1321  // etc., not pushed. The argument count assumes all arguments are word sized.
1322  // The number of slots reserved for arguments depends on platform. On Windows
1323  // stack slots are reserved for the arguments passed in registers. On other
1324  // platforms stack slots are only reserved for the arguments actually passed
1325  // on the stack.
1326  void PrepareCallCFunction(int num_arguments);
1327 
1328  // Calls a C function and cleans up the space for arguments allocated
1329  // by PrepareCallCFunction. The called function is not allowed to trigger a
1330  // garbage collection, since that might move the code and invalidate the
1331  // return address (unless this is somehow accounted for by the called
1332  // function).
1333  void CallCFunction(ExternalReference function, int num_arguments);
1334  void CallCFunction(Register function, int num_arguments);
1335 
1336  // Calculate the number of stack slots to reserve for arguments when calling a
1337  // C function.
1338  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1339 
1340  // ---------------------------------------------------------------------------
1341  // Utilities
1342 
1343  void Ret();
1344 
1345  // Return and drop arguments from stack, where the number of arguments
1346  // may be bigger than 2^16 - 1. Requires a scratch register.
1347  void Ret(int bytes_dropped, Register scratch);
1348 
1350  ASSERT(!code_object_.is_null());
1351  return code_object_;
1352  }
1353 
1354  // Copy length bytes from source to destination.
1355  // Uses scratch register internally (if you have a low-eight register
1356  // free, do use it, otherwise kScratchRegister will be used).
1357  // The min_length is a minimum limit on the value that length will have.
1358  // The algorithm has some special cases that might be omitted if the string
1359  // is known to always be long.
1360  void CopyBytes(Register destination,
1361  Register source,
1362  Register length,
1363  int min_length = 0,
1364  Register scratch = kScratchRegister);
1365 
1366  // Initialize fields with filler values. Fields starting at |start_offset|
1367  // not including end_offset are overwritten with the value in |filler|. At
1368  // the end the loop, |start_offset| takes the value of |end_offset|.
1369  void InitializeFieldsWithFiller(Register start_offset,
1370  Register end_offset,
1371  Register filler);
1372 
1373 
1374  // Emit code for a truncating division by a constant. The dividend register is
1375  // unchanged, the result is in rdx, and rax gets clobbered.
1376  void TruncatingDiv(Register dividend, int32_t divisor);
1377 
1378  // ---------------------------------------------------------------------------
1379  // StatsCounter support
1380 
1381  void SetCounter(StatsCounter* counter, int value);
1382  void IncrementCounter(StatsCounter* counter, int value);
1383  void DecrementCounter(StatsCounter* counter, int value);
1384 
1385 
1386  // ---------------------------------------------------------------------------
1387  // Debugging
1388 
1389  // Calls Abort(msg) if the condition cc is not satisfied.
1390  // Use --debug_code to enable.
1391  void Assert(Condition cc, BailoutReason reason);
1392 
1393  void AssertFastElements(Register elements);
1394 
1395  // Like Assert(), but always enabled.
1396  void Check(Condition cc, BailoutReason reason);
1397 
1398  // Print a message to stdout and abort execution.
1399  void Abort(BailoutReason msg);
1400 
1401  // Check that the stack is aligned.
1402  void CheckStackAlignment();
1403 
1404  // Verify restrictions about code generated in stubs.
1405  void set_generating_stub(bool value) { generating_stub_ = value; }
1406  bool generating_stub() { return generating_stub_; }
1407  void set_has_frame(bool value) { has_frame_ = value; }
1408  bool has_frame() { return has_frame_; }
1409  inline bool AllowThisStubCall(CodeStub* stub);
1410 
1412  return SafepointRegisterStackIndex(reg.code());
1413  }
1414 
1415  // Activation support.
1416  void EnterFrame(StackFrame::Type type);
1417  void LeaveFrame(StackFrame::Type type);
1418 
1419  // Expects object in rax and returns map with validated enum cache
1420  // in rax. Assumes that any other register can be used as a scratch.
1421  void CheckEnumCache(Register null_value,
1422  Label* call_runtime);
1423 
1424  // AllocationMemento support. Arrays may have an associated
1425  // AllocationMemento object that can be checked for in order to pretransition
1426  // to another type.
1427  // On entry, receiver_reg should point to the array object.
1428  // scratch_reg gets clobbered.
1429  // If allocation info is present, condition flags are set to equal.
1430  void TestJSArrayForAllocationMemento(Register receiver_reg,
1431  Register scratch_reg,
1432  Label* no_memento_found);
1433 
1435  Register scratch_reg,
1436  Label* memento_found) {
1437  Label no_memento_found;
1438  TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1439  &no_memento_found);
1440  j(equal, memento_found);
1441  bind(&no_memento_found);
1442  }
1443 
1444  // Jumps to found label if a prototype map has dictionary elements.
1445  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1446  Register scratch1, Label* found);
1447 
1448  private:
1449  // Order general registers are pushed by Pushad.
1450  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1451  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1452  static const int kNumSafepointSavedRegisters = 11;
1453  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1454 
1455  bool generating_stub_;
1456  bool has_frame_;
1457  bool root_array_available_;
1458 
1459  // Returns a register holding the smi value. The register MUST NOT be
1460  // modified. It may be the "smi 1 constant" register.
1461  Register GetSmiConstant(Smi* value);
1462 
1463  intptr_t RootRegisterDelta(ExternalReference other);
1464 
1465  // Moves the smi value to the destination register.
1466  void LoadSmiConstant(Register dst, Smi* value);
1467 
1468  // This handle will be patched with the code object on installation.
1469  Handle<Object> code_object_;
1470 
1471  // Helper functions for generating invokes.
1472  void InvokePrologue(const ParameterCount& expected,
1473  const ParameterCount& actual,
1474  Handle<Code> code_constant,
1475  Register code_register,
1476  Label* done,
1477  bool* definitely_mismatches,
1478  InvokeFlag flag,
1479  Label::Distance near_jump = Label::kFar,
1480  const CallWrapper& call_wrapper = NullCallWrapper());
1481 
1482  void EnterExitFramePrologue(bool save_rax);
1483 
1484  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1485  // accessible via StackSpaceOperand.
1486  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1487 
1488  void LeaveExitFrameEpilogue(bool restore_context);
1489 
1490  // Allocation support helpers.
1491  // Loads the top of new-space into the result register.
1492  // Otherwise the address of the new-space top is loaded into scratch (if
1493  // scratch is valid), and the new-space top is loaded into result.
1494  void LoadAllocationTopHelper(Register result,
1495  Register scratch,
1497 
1498  // Update allocation top with value in result_end register.
1499  // If scratch is valid, it contains the address of the allocation top.
1500  void UpdateAllocationTopHelper(Register result_end,
1501  Register scratch,
1503 
1504  // Helper for PopHandleScope. Allowed to perform a GC and returns
1505  // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
1506  // possibly returns a failure object indicating an allocation failure.
1507  Object* PopHandleScopeHelper(Register saved,
1508  Register scratch,
1509  bool gc_allowed);
1510 
1511  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1512  void InNewSpace(Register object,
1513  Register scratch,
1514  Condition cc,
1515  Label* branch,
1516  Label::Distance distance = Label::kFar);
1517 
1518  // Helper for finding the mark bits for an address. Afterwards, the
1519  // bitmap register points at the word with the mark bits and the mask
1520  // the position of the first bit. Uses rcx as scratch and leaves addr_reg
1521  // unchanged.
1522  inline void GetMarkBits(Register addr_reg,
1523  Register bitmap_reg,
1524  Register mask_reg);
1525 
1526  // Helper for throwing exceptions. Compute a handler address and jump to
1527  // it. See the implementation for register usage.
1528  void JumpToHandlerEntry();
1529 
1530  // Compute memory operands for safepoint stack slots.
1531  Operand SafepointRegisterSlot(Register reg);
1532  static int SafepointRegisterStackIndex(int reg_code) {
1533  return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1534  }
1535 
1536  // Needs access to SafepointRegisterStackIndex for compiled frame
1537  // traversal.
1538  friend class StandardFrame;
1539 };
1540 
1541 
1542 // The code patcher is used to patch (typically) small parts of code e.g. for
1543 // debugging and other types of instrumentation. When using the code patcher
1544 // the exact number of bytes specified must be emitted. Is not legal to emit
1545 // relocation information. If any of these constraints are violated it causes
1546 // an assertion.
1547 class CodePatcher {
1548  public:
1549  CodePatcher(byte* address, int size);
1550  virtual ~CodePatcher();
1551 
1552  // Macro assembler to emit code.
1553  MacroAssembler* masm() { return &masm_; }
1554 
1555  private:
1556  byte* address_; // The address of the code being patched.
1557  int size_; // Number of bytes of the expected patch size.
1558  MacroAssembler masm_; // Macro assembler used to generate the code.
1559 };
1560 
1561 
1562 // -----------------------------------------------------------------------------
1563 // Static helper functions.
1564 
1565 // Generate an Operand for loading a field from an object.
1566 inline Operand FieldOperand(Register object, int offset) {
1567  return Operand(object, offset - kHeapObjectTag);
1568 }
1569 
1570 
1571 // Generate an Operand for loading an indexed field from an object.
1572 inline Operand FieldOperand(Register object,
1573  Register index,
1574  ScaleFactor scale,
1575  int offset) {
1576  return Operand(object, index, scale, offset - kHeapObjectTag);
1577 }
1578 
1579 
1580 inline Operand ContextOperand(Register context, int index) {
1581  return Operand(context, Context::SlotOffset(index));
1582 }
1583 
1584 
1585 inline Operand GlobalObjectOperand() {
1587 }
1588 
1589 
1590 // Provides access to exit frame stack space (not GCed).
1591 inline Operand StackSpaceOperand(int index) {
1592 #ifdef _WIN64
1593  const int kShaddowSpace = 4;
1594  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1595 #else
1596  return Operand(rsp, index * kPointerSize);
1597 #endif
1598 }
1599 
1600 
1602  return Operand(rsp, disp);
1603 }
1604 
1605 
1606 #ifdef GENERATED_CODE_COVERAGE
1607 extern void LogGeneratedCodeCoverage(const char* file_line);
1608 #define CODE_COVERAGE_STRINGIFY(x) #x
1609 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1610 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1611 #define ACCESS_MASM(masm) { \
1612  Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
1613  masm->pushfq(); \
1614  masm->Pushad(); \
1615  masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
1616  masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
1617  masm->Pop(rax); \
1618  masm->Popad(); \
1619  masm->popfq(); \
1620  } \
1621  masm->
1622 #else
1623 #define ACCESS_MASM(masm) masm->
1624 #endif
1625 
1626 } } // namespace v8::internal
1627 
1628 #endif // V8_X64_MACRO_ASSEMBLER_X64_H_
byte * Address
Definition: globals.h:186
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch)
static RelocInfo::Mode RelocInfoNone()
void SlowTruncateToI(Register result_reg, Register input_reg, int offset=HeapNumber::kValueOffset-kHeapObjectTag)
void JumpIfSmiEqualsConstant(Register src, Smi *constant, Label *on_equals, Label::Distance near_jump=Label::kFar)
void ClampUint8(Register output_reg, Register input_reg)
Isolate * isolate() const
Definition: assembler.h:62
void JumpIfNotString(Register object, Register object_map, Label *not_string, Label::Distance near_jump=Label::kFar)
void SmiShiftArithmeticRight(Register dst, Register src1, Register src2)
void JumpIfNotValidSmiValue(Register src, Label *on_invalid, Label::Distance near_jump=Label::kFar)
static int SlotOffset(int index)
Definition: contexts.h:498
void LoadRootIndexed(Register destination, Register variable_offset, int fixed_offset)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
int LoadAddressSize(ExternalReference source)
void AssertRootValue(Register src, Heap::RootListIndex root_value_index, BailoutReason reason)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
const Register r3
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
void SmiDiv(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
CodePatcher(byte *address, int instructions, FlushICache flush_cache=FLUSH)
void SmiOrIfSmis(Register dst, Register src1, Register src2, Label *on_not_smis, Label::Distance near_jump=Label::kFar)
void SmiOr(Register dst, Register src1, Register src2)
Condition IsObjectStringType(Register obj, Register type, Condition cond=al)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void PushInt64AsTwoSmis(Register src, Register scratch=kScratchRegister)
void Cvtlsi2sd(XMMRegister dst, Register src)
Condition CheckUInteger32ValidSmiValue(Register src)
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
void AssertString(Register object)
static TypeFeedbackId None()
Definition: utils.h:1149
void RecordWriteArray(Register array, Register value, Register index, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpToExternalReference(const ExternalReference &builtin)
void LoadInstanceDescriptors(Register map, Register descriptors)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void JumpIfNotBothSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *not_flat_ascii_strings)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
const Register rsi
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void SmiAdd(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void Store(Register src, const MemOperand &dst, Representation r)
void Move(Register dst, ExternalReference ext)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void JumpIfSmi(Register value, Label *smi_label)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
int int32_t
Definition: unicode.cc:47
int CallSize(Address destination)
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
void CheckSmiToIndicator(Register dst, Register src)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
SmiIndex SmiToIndex(Register dst, Register src, int shift)
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
Definition: checks.h:329
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
static const int kShortCallInstructionLength
Condition CheckIsMinSmi(Register src)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void NumberOfOwnDescriptors(Register dst, Register map)
void SmiAndConstant(Register dst, Register src1, Smi *constant)
void SmiOrConstant(Register dst, Register src1, Smi *constant)
MemOperand GlobalObjectOperand()
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const Register r2
bool IsUnsafeInt(const int32_t x)
void SmiSub(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void Load(Register dst, const MemOperand &src, Representation r)
MemOperand ContextOperand(Register context, int index)
void SmiMod(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
Condition CheckBothSmi(Register first, Register second)
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
static int SafepointRegisterStackIndex(Register reg)
static const int kNumRegisters
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
void Move(Register dst, Smi *source)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
void pushq(Immediate value)
void Abort(BailoutReason msg)
const Register kRootRegister
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void IsObjectNameType(Register object, Register scratch, Label *fail)
void JumpIfInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
uint8_t byte
Definition: globals.h:185
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void PopReturnAddressTo(Register dst)
int CallSize(const Operand &target)
void LeaveApiExitFrame(bool restore_context)
void SmiToInteger64(Register dst, Register src)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void EnumLength(Register dst, Register map)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void CheckFastElements(Register map, Register scratch, Label *fail)
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
void movp(Register dst, void *ptr, RelocInfo::Mode rmode)
void LoadGlobalFunction(int index, Register function)
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label *condition_met)
void Integer64PlusConstantToSmi(Register dst, Register src, int constant)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
PrologueFrameMode
Definition: frames.h:957
const int kPointerSize
Definition: globals.h:268
void SmiMul(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void SmiShiftLogicalRight(Register dst, Register src1, Register src2, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
void CallCFunction(ExternalReference function, int num_arguments)
void SafePush(const Immediate &x)
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Operand FieldOperand(Register object, int offset)
void SmiShiftLeftConstant(Register dst, Register src, int shift_value)
void CheckFastSmiElements(Register map, Register scratch, Label *fail)
const int kHeapObjectTag
Definition: v8.h:5473
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void TruncateHeapNumberToI(Register result, Register object)
void SmiXorConstant(Register dst, Register src1, Smi *constant)
Operand ExternalOperand(ExternalReference reference, Register scratch=kScratchRegister)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void SmiXor(Register dst, Register src1, Register src2)
const Register rsp
void SmiShiftLeft(Register dst, Register src1, Register src2)
void Throw(Register value)
Condition CheckInteger32ValidSmiValue(Register src)
void Move(Register dst, Handle< Object > value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void shl(Register dst, uint8_t imm8)
Operand StackSpaceOperand(int index)
void EnterApiExitFrame(int argc)
void PrepareCallApiFunction(int argc)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
SmiIndex(Register index_register, ScaleFactor scale)
const Register r0
void NegativeZeroTest(Register result, Register op, Label *then_label)
const int kRootRegisterBias
MacroAssembler(Isolate *isolate, void *buffer, int size)
void SmiCompareInteger32(const Operand &dst, Register src)
void LoadContext(Register dst, int context_chain_length)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static int CallSize(Register target, Condition cond=al)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch, Label::Distance distance=Label::kFar)
void AssertFastElements(Register elements)
void LoadAddress(Register destination, ExternalReference source)
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void JumpIfNotBothSmi(Register reg1, Register reg2, Label *on_not_both_smi)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void SmiSubConstant(Register dst, Register src, Smi *constant)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Move(Register dst, Handle< Object > value, RelocInfo::Mode rmode)
void SmiAnd(Register dst, Register src1, Register src2)
void Drop(int count, Condition cond=al)
void Integer32ToSmiField(const Operand &dst, Register src)
void JumpIfUIntNotValidSmiValue(Register src, Label *on_invalid, Label::Distance near_jump=Label::kFar)
InvokeFlag
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void IllegalOperation(int num_arguments)
AllocationFlags
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static const int kCallSequenceLength
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void DoubleToI(Register result_reg, XMMRegister input_reg, XMMRegister scratch, MinusZeroMode minus_zero_mode, Label *conversion_failed, Label::Distance dst=Label::kFar)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
void AssertZeroExtended(Register reg)
void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp, MinusZeroMode minus_zero_mode, Label *lost_precision)
const Register r1
void SmiTest(Register src)
void SmiShiftArithmeticRightConstant(Register dst, Register src, int shift_value)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
void ThrowUncatchable(Register value)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void SmiCompare(Register smi1, Register smi2)
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, Register scratch)
const int kNumSafepointRegisters
Definition: frames-arm.h:92
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
const Register kScratchRegister
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
const int kSmiShiftSize
Definition: v8.h:5539
const int kSmiTagSize
Definition: v8.h:5479
void PositiveSmiDivPowerOfTwoToInteger32(Register dst, Register src, int power)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void MoveHeapObject(Register result, Handle< Object > object)
void Cmp(const Register &rn, const Operand &operand)
void SmiToInteger32(Register dst, Register src)
void UndoAllocationInNewSpace(Register object, Register scratch)
void Prologue(PrologueFrameMode frame_mode)
void SmiNot(Register dst, Register src)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, Register scratch_reg, Label *memento_found)
void shr(Register dst, uint8_t imm8)
void Call(Register target, Condition cond=al)
void SmiShiftLogicalRightConstant(Register dst, Register src, int shift_value, Label *on_not_smi_result, Label::Distance near_jump=Label::kFar)
Operand StackOperandForReturnAddress(int32_t disp)
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
Condition CheckNonNegativeSmi(Register src)
void Check(Condition cond, BailoutReason reason)
void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void SmiAddConstant(Register dst, Register src, Smi *constant)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void PopInt64AsTwoSmis(Register dst, Register scratch=kScratchRegister)
Condition CheckEitherSmi(Register first, Register second, Register scratch=kScratchRegister)
void SmiNeg(Register dst, Register src, Label *on_smi_result, Label::Distance near_jump=Label::kFar)
void Assert(Condition cond, BailoutReason reason)
void LoadGlobalCell(Register dst, Handle< Cell > cell)
const int kSmiConstantRegisterValue
void Move(Register dst, void *ptr, RelocInfo::Mode rmode)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void AddSmiField(Register dst, const Operand &src)
void TestBit(const Operand &dst, int bit_index)
Condition CheckSmi(Register src)
void SelectNonSmi(Register dst, Register src1, Register src2, Label *on_not_smis, Label::Distance near_jump=Label::kFar)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void TailCallStub(CodeStub *stub, Condition cond=al)
HeapObject * obj
void AssertName(Register object)
const Register no_reg
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
void popq(Register dst)
void Integer32ToSmi(Register dst, Register src)
int LeaveFrame(StackFrame::Type type)
const Register kSmiConstantRegister
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
Condition CheckBothNonNegativeSmi(Register first, Register second)
int ArgumentStackSlotsForCFunctionCall(int num_arguments)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler)
int CallSize(Handle< Code > code_object)
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void JumpUnlessBothNonNegativeSmi(Register src1, Register src2, Label *on_not_both_smi, Label::Distance near_jump=Label::kFar)
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst, Register src, int power)
void Test(const Operand &dst, Smi *source)
void Set(Register dst, int64_t x)
void PushRoot(Heap::RootListIndex index)
void SafeMove(Register dst, const Immediate &x)
void CmpObjectType(Register heap_object, InstanceType type, Register map)
void PushReturnAddressFrom(Register src)
void CmpInstanceType(Register map, InstanceType type)
void JumpUnlessNonNegativeSmi(Register src, Label *on_not_smi, Label::Distance near_jump=Label::kFar)
void PushAddress(ExternalReference source)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void PushImm32(int32_t imm32)
void CheckEnumCache(Register null_value, Label *call_runtime)
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AssertNumber(Register object)
void Move(const Operand &dst, Smi *source)
const Register r4