v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-ia32.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_IA32_CODE_STUBS_IA32_H_
29 #define V8_IA32_CODE_STUBS_IA32_H_
30 
31 #include "macro-assembler.h"
32 #include "ic-inl.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 
38 void ArrayNativeCode(MacroAssembler* masm,
39  bool construct_call,
40  Label* call_generic_code);
41 
42 
43 class StoreBufferOverflowStub: public PlatformCodeStub {
44  public:
46  : save_doubles_(save_fp) {
48  }
49 
50  void Generate(MacroAssembler* masm);
51 
52  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
53  virtual bool SometimesSetsUpAFrame() { return false; }
54 
55  private:
56  SaveFPRegsMode save_doubles_;
57 
58  Major MajorKey() { return StoreBufferOverflow; }
59  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
60 };
61 
62 
63 class StringHelper : public AllStatic {
64  public:
65  // Generate code for copying characters using the rep movs instruction.
66  // Copies ecx characters from esi to edi. Copying of overlapping regions is
67  // not supported.
68  static void GenerateCopyCharactersREP(MacroAssembler* masm,
69  Register dest, // Must be edi.
70  Register src, // Must be esi.
71  Register count, // Must be ecx.
72  Register scratch, // Neither of above.
73  bool ascii);
74 
75  // Generate string hash.
76  static void GenerateHashInit(MacroAssembler* masm,
77  Register hash,
78  Register character,
79  Register scratch);
80  static void GenerateHashAddCharacter(MacroAssembler* masm,
81  Register hash,
82  Register character,
83  Register scratch);
84  static void GenerateHashGetHash(MacroAssembler* masm,
85  Register hash,
86  Register scratch);
87 
88  private:
89  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
90 };
91 
92 
93 class SubStringStub: public PlatformCodeStub {
94  public:
96 
97  private:
98  Major MajorKey() { return SubString; }
99  int MinorKey() { return 0; }
100 
101  void Generate(MacroAssembler* masm);
102 };
103 
104 
105 class StringCompareStub: public PlatformCodeStub {
106  public:
108 
109  // Compares two flat ASCII strings and returns result in eax.
111  Register left,
112  Register right,
113  Register scratch1,
114  Register scratch2,
115  Register scratch3);
116 
117  // Compares two flat ASCII strings for equality and returns result
118  // in eax.
120  Register left,
121  Register right,
122  Register scratch1,
123  Register scratch2);
124 
125  private:
126  virtual Major MajorKey() { return StringCompare; }
127  virtual int MinorKey() { return 0; }
128  virtual void Generate(MacroAssembler* masm);
129 
130  static void GenerateAsciiCharsCompareLoop(
131  MacroAssembler* masm,
132  Register left,
133  Register right,
134  Register length,
135  Register scratch,
136  Label* chars_not_equal,
137  Label::Distance chars_not_equal_near = Label::kFar);
138 };
139 
140 
141 class NameDictionaryLookupStub: public PlatformCodeStub {
142  public:
144 
146  Register result,
147  Register index,
149  : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
150 
151  void Generate(MacroAssembler* masm);
152 
153  static void GenerateNegativeLookup(MacroAssembler* masm,
154  Label* miss,
155  Label* done,
156  Register properties,
158  Register r0);
159 
160  static void GeneratePositiveLookup(MacroAssembler* masm,
161  Label* miss,
162  Label* done,
163  Register elements,
164  Register name,
165  Register r0,
166  Register r1);
167 
168  virtual bool SometimesSetsUpAFrame() { return false; }
169 
170  private:
171  static const int kInlinedProbes = 4;
172  static const int kTotalProbes = 20;
173 
174  static const int kCapacityOffset =
177 
178  static const int kElementsStartOffset =
181 
182  Major MajorKey() { return NameDictionaryLookup; }
183 
184  int MinorKey() {
185  return DictionaryBits::encode(dictionary_.code()) |
186  ResultBits::encode(result_.code()) |
187  IndexBits::encode(index_.code()) |
188  LookupModeBits::encode(mode_);
189  }
190 
191  class DictionaryBits: public BitField<int, 0, 3> {};
192  class ResultBits: public BitField<int, 3, 3> {};
193  class IndexBits: public BitField<int, 6, 3> {};
194  class LookupModeBits: public BitField<LookupMode, 9, 1> {};
195 
196  Register dictionary_;
197  Register result_;
198  Register index_;
199  LookupMode mode_;
200 };
201 
202 
203 class RecordWriteStub: public PlatformCodeStub {
204  public:
206  Register value,
207  Register address,
208  RememberedSetAction remembered_set_action,
209  SaveFPRegsMode fp_mode)
210  : object_(object),
211  value_(value),
212  address_(address),
213  remembered_set_action_(remembered_set_action),
214  save_fp_regs_mode_(fp_mode),
215  regs_(object, // An input reg.
216  address, // An input reg.
217  value) { // One scratch reg.
219  }
220 
221  enum Mode {
223  INCREMENTAL,
225  };
226 
227  virtual bool SometimesSetsUpAFrame() { return false; }
228 
229  static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
230  static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
231 
232  static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
233  static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
234 
235  static Mode GetMode(Code* stub) {
236  byte first_instruction = stub->instruction_start()[0];
237  byte second_instruction = stub->instruction_start()[2];
238 
239  if (first_instruction == kTwoByteJumpInstruction) {
240  return INCREMENTAL;
241  }
242 
243  ASSERT(first_instruction == kTwoByteNopInstruction);
244 
245  if (second_instruction == kFiveByteJumpInstruction) {
246  return INCREMENTAL_COMPACTION;
247  }
248 
249  ASSERT(second_instruction == kFiveByteNopInstruction);
250 
251  return STORE_BUFFER_ONLY;
252  }
253 
254  static void Patch(Code* stub, Mode mode) {
255  switch (mode) {
256  case STORE_BUFFER_ONLY:
257  ASSERT(GetMode(stub) == INCREMENTAL ||
261  break;
262  case INCREMENTAL:
263  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
265  break;
267  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
270  break;
271  }
272  ASSERT(GetMode(stub) == mode);
273  CPU::FlushICache(stub->instruction_start(), 7);
274  }
275 
276  private:
277  // This is a helper class for freeing up 3 scratch registers, where the third
278  // is always ecx (needed for shift operations). The input is two registers
279  // that must be preserved and one scratch register provided by the caller.
280  class RegisterAllocation {
281  public:
282  RegisterAllocation(Register object,
283  Register address,
284  Register scratch0)
285  : object_orig_(object),
286  address_orig_(address),
287  scratch0_orig_(scratch0),
288  object_(object),
289  address_(address),
290  scratch0_(scratch0) {
291  ASSERT(!AreAliased(scratch0, object, address, no_reg));
292  scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
293  if (scratch0.is(ecx)) {
294  scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
295  }
296  if (object.is(ecx)) {
297  object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
298  }
299  if (address.is(ecx)) {
300  address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
301  }
302  ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
303  }
304 
305  void Save(MacroAssembler* masm) {
306  ASSERT(!address_orig_.is(object_));
307  ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
308  ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
309  ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
310  ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
311  // We don't have to save scratch0_orig_ because it was given to us as
312  // a scratch register. But if we had to switch to a different reg then
313  // we should save the new scratch0_.
314  if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
315  if (!ecx.is(scratch0_orig_) &&
316  !ecx.is(object_orig_) &&
317  !ecx.is(address_orig_)) {
318  masm->push(ecx);
319  }
320  masm->push(scratch1_);
321  if (!address_.is(address_orig_)) {
322  masm->push(address_);
323  masm->mov(address_, address_orig_);
324  }
325  if (!object_.is(object_orig_)) {
326  masm->push(object_);
327  masm->mov(object_, object_orig_);
328  }
329  }
330 
331  void Restore(MacroAssembler* masm) {
332  // These will have been preserved the entire time, so we just need to move
333  // them back. Only in one case is the orig_ reg different from the plain
334  // one, since only one of them can alias with ecx.
335  if (!object_.is(object_orig_)) {
336  masm->mov(object_orig_, object_);
337  masm->pop(object_);
338  }
339  if (!address_.is(address_orig_)) {
340  masm->mov(address_orig_, address_);
341  masm->pop(address_);
342  }
343  masm->pop(scratch1_);
344  if (!ecx.is(scratch0_orig_) &&
345  !ecx.is(object_orig_) &&
346  !ecx.is(address_orig_)) {
347  masm->pop(ecx);
348  }
349  if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
350  }
351 
352  // If we have to call into C then we need to save and restore all caller-
353  // saved registers that were not already preserved. The caller saved
354  // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
355  // will be restored by other means so we don't bother pushing them here.
356  void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
357  if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
358  if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
359  if (mode == kSaveFPRegs) {
360  CpuFeatureScope scope(masm, SSE2);
361  masm->sub(esp,
362  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
363  // Save all XMM registers except XMM0.
364  for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
365  XMMRegister reg = XMMRegister::from_code(i);
366  masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
367  }
368  }
369  }
370 
371  inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
372  SaveFPRegsMode mode) {
373  if (mode == kSaveFPRegs) {
374  CpuFeatureScope scope(masm, SSE2);
375  // Restore all XMM registers except XMM0.
376  for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
377  XMMRegister reg = XMMRegister::from_code(i);
378  masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
379  }
380  masm->add(esp,
381  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
382  }
383  if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
384  if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
385  }
386 
387  inline Register object() { return object_; }
388  inline Register address() { return address_; }
389  inline Register scratch0() { return scratch0_; }
390  inline Register scratch1() { return scratch1_; }
391 
392  private:
393  Register object_orig_;
394  Register address_orig_;
395  Register scratch0_orig_;
396  Register object_;
397  Register address_;
398  Register scratch0_;
399  Register scratch1_;
400  // Third scratch register is always ecx.
401 
402  Register GetRegThatIsNotEcxOr(Register r1,
403  Register r2,
404  Register r3) {
405  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
406  Register candidate = Register::FromAllocationIndex(i);
407  if (candidate.is(ecx)) continue;
408  if (candidate.is(r1)) continue;
409  if (candidate.is(r2)) continue;
410  if (candidate.is(r3)) continue;
411  return candidate;
412  }
413  UNREACHABLE();
414  return no_reg;
415  }
416  friend class RecordWriteStub;
417  };
418 
419  enum OnNoNeedToInformIncrementalMarker {
420  kReturnOnNoNeedToInformIncrementalMarker,
421  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
422  }
423 ;
424  void Generate(MacroAssembler* masm);
425  void GenerateIncremental(MacroAssembler* masm, Mode mode);
426  void CheckNeedsToInformIncrementalMarker(
427  MacroAssembler* masm,
428  OnNoNeedToInformIncrementalMarker on_no_need,
429  Mode mode);
430  void InformIncrementalMarker(MacroAssembler* masm);
431 
432  Major MajorKey() { return RecordWrite; }
433 
434  int MinorKey() {
435  return ObjectBits::encode(object_.code()) |
436  ValueBits::encode(value_.code()) |
437  AddressBits::encode(address_.code()) |
438  RememberedSetActionBits::encode(remembered_set_action_) |
439  SaveFPRegsModeBits::encode(save_fp_regs_mode_);
440  }
441 
442  void Activate(Code* code) {
443  code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
444  }
445 
446  class ObjectBits: public BitField<int, 0, 3> {};
447  class ValueBits: public BitField<int, 3, 3> {};
448  class AddressBits: public BitField<int, 6, 3> {};
449  class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
450  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
451 
452  Register object_;
453  Register value_;
454  Register address_;
455  RememberedSetAction remembered_set_action_;
456  SaveFPRegsMode save_fp_regs_mode_;
457  RegisterAllocation regs_;
458 };
459 
460 
461 } } // namespace v8::internal
462 
463 #endif // V8_IA32_CODE_STUBS_IA32_H_
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
const Register r3
static const byte kTwoByteNopInstruction
const Register esp
static int NumAllocatableRegisters()
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
static const int kNumRegisters
static bool IsSafeForSnapshot(CpuFeature f)
Definition: assembler-arm.h:78
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
const Register r2
uint8_t byte
Definition: globals.h:185
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
const Register eax
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:266
byte * instruction_start()
Definition: objects-inl.h:5857
const int kPointerSize
Definition: globals.h:268
const Register ecx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const byte kTwoByteJumpInstruction
static const byte kFiveByteNopInstruction
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
const Register r0
NameDictionaryLookupStub(Register dictionary, Register result, Register index, LookupMode mode)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
Definition: objects.h:3016
bool is(Register reg) const
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const Register r1
static XMMRegister from_code(int code)
const Register no_reg
static const byte kFiveByteJumpInstruction
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
const Register edx
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void Generate(MacroAssembler *masm)