v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
29 #define V8_ARM64_CODE_STUBS_ARM64_H_
30 
31 #include "ic-inl.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 
37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
38 
39 
40 class StoreBufferOverflowStub: public PlatformCodeStub {
41  public:
43  : save_doubles_(save_fp) { }
44 
45  void Generate(MacroAssembler* masm);
46 
47  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
48  virtual bool SometimesSetsUpAFrame() { return false; }
49 
50  private:
51  SaveFPRegsMode save_doubles_;
52 
53  Major MajorKey() { return StoreBufferOverflow; }
54  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
55 };
56 
57 
58 class StringHelper : public AllStatic {
59  public:
60  // TODO(all): These don't seem to be used any more. Delete them.
61 
62  // Generate string hash.
63  static void GenerateHashInit(MacroAssembler* masm,
64  Register hash,
65  Register character);
66 
67  static void GenerateHashAddCharacter(MacroAssembler* masm,
68  Register hash,
69  Register character);
70 
71  static void GenerateHashGetHash(MacroAssembler* masm,
72  Register hash,
73  Register scratch);
74 
75  private:
76  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
77 };
78 
79 
81  public:
83  : save_doubles_(with_fp) {}
84 
85  static Register to_be_pushed_lr() { return ip0; }
86  static void GenerateAheadOfTime(Isolate* isolate);
87  private:
88  Major MajorKey() { return StoreRegistersState; }
89  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
90  SaveFPRegsMode save_doubles_;
91 
92  void Generate(MacroAssembler* masm);
93 };
94 
95 
97  public:
99  : save_doubles_(with_fp) {}
100 
101  static void GenerateAheadOfTime(Isolate* isolate);
102  private:
103  Major MajorKey() { return RestoreRegistersState; }
104  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
105  SaveFPRegsMode save_doubles_;
106 
107  void Generate(MacroAssembler* masm);
108 };
109 
110 
111 class RecordWriteStub: public PlatformCodeStub {
112  public:
113  // Stub to record the write of 'value' at 'address' in 'object'.
114  // Typically 'address' = 'object' + <some offset>.
115  // See MacroAssembler::RecordWriteField() for example.
117  Register value,
118  Register address,
119  RememberedSetAction remembered_set_action,
120  SaveFPRegsMode fp_mode)
121  : object_(object),
122  value_(value),
123  address_(address),
124  remembered_set_action_(remembered_set_action),
125  save_fp_regs_mode_(fp_mode),
126  regs_(object, // An input reg.
127  address, // An input reg.
128  value) { // One scratch reg.
129  }
130 
131  enum Mode {
133  INCREMENTAL,
135  };
136 
137  virtual bool SometimesSetsUpAFrame() { return false; }
138 
139  static Mode GetMode(Code* stub) {
140  // Find the mode depending on the first two instructions.
141  Instruction* instr1 =
142  reinterpret_cast<Instruction*>(stub->instruction_start());
143  Instruction* instr2 = instr1->following();
144 
145  if (instr1->IsUncondBranchImm()) {
146  ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
147  return INCREMENTAL;
148  }
149 
150  ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
151 
152  if (instr2->IsUncondBranchImm()) {
153  return INCREMENTAL_COMPACTION;
154  }
155 
156  ASSERT(instr2->IsPCRelAddressing());
157 
158  return STORE_BUFFER_ONLY;
159  }
160 
161  // We patch the two first instructions of the stub back and forth between an
162  // adr and branch when we start and stop incremental heap marking.
163  // The branch is
164  // b label
165  // The adr is
166  // adr xzr label
167  // so effectively a nop.
168  static void Patch(Code* stub, Mode mode) {
169  // We are going to patch the two first instructions of the stub.
170  PatchingAssembler patcher(
171  reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
172  Instruction* instr1 = patcher.InstructionAt(0);
173  Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
174  // Instructions must be either 'adr' or 'b'.
175  ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
176  ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
177  // Retrieve the offsets to the labels.
178  int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
179  int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
180 
181  switch (mode) {
182  case STORE_BUFFER_ONLY:
183  ASSERT(GetMode(stub) == INCREMENTAL ||
185  patcher.adr(xzr, offset_to_incremental_noncompacting);
186  patcher.adr(xzr, offset_to_incremental_compacting);
187  break;
188  case INCREMENTAL:
189  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
190  patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
191  patcher.adr(xzr, offset_to_incremental_compacting);
192  break;
194  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
195  patcher.adr(xzr, offset_to_incremental_noncompacting);
196  patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
197  break;
198  }
199  ASSERT(GetMode(stub) == mode);
200  }
201 
202  private:
203  // This is a helper class to manage the registers associated with the stub.
204  // The 'object' and 'address' registers must be preserved.
205  class RegisterAllocation {
206  public:
207  RegisterAllocation(Register object,
208  Register address,
209  Register scratch)
210  : object_(object),
211  address_(address),
212  scratch0_(scratch),
213  saved_regs_(kCallerSaved) {
214  ASSERT(!AreAliased(scratch, object, address));
215 
216  // We would like to require more scratch registers for this stub,
217  // but the number of registers comes down to the ones used in
218  // FullCodeGen::SetVar(), which is architecture independent.
219  // We allocate 2 extra scratch registers that we'll save on the stack.
220  CPURegList pool_available = GetValidRegistersForAllocation();
221  CPURegList used_regs(object, address, scratch);
222  pool_available.Remove(used_regs);
223  scratch1_ = Register(pool_available.PopLowestIndex());
224  scratch2_ = Register(pool_available.PopLowestIndex());
225 
226  // SaveCallerRegisters method needs to save caller saved register, however
227  // we don't bother saving ip0 and ip1 because they are used as scratch
228  // registers by the MacroAssembler.
229  saved_regs_.Remove(ip0);
230  saved_regs_.Remove(ip1);
231 
232  // The scratch registers will be restored by other means so we don't need
233  // to save them with the other caller saved registers.
234  saved_regs_.Remove(scratch0_);
235  saved_regs_.Remove(scratch1_);
236  saved_regs_.Remove(scratch2_);
237  }
238 
239  void Save(MacroAssembler* masm) {
240  // We don't have to save scratch0_ because it was given to us as
241  // a scratch register.
242  masm->Push(scratch1_, scratch2_);
243  }
244 
245  void Restore(MacroAssembler* masm) {
246  masm->Pop(scratch2_, scratch1_);
247  }
248 
249  // If we have to call into C then we need to save and restore all caller-
250  // saved registers that were not already preserved.
251  void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
252  // TODO(all): This can be very expensive, and it is likely that not every
253  // register will need to be preserved. Can we improve this?
254  masm->PushCPURegList(saved_regs_);
255  if (mode == kSaveFPRegs) {
256  masm->PushCPURegList(kCallerSavedFP);
257  }
258  }
259 
260  void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
261  // TODO(all): This can be very expensive, and it is likely that not every
262  // register will need to be preserved. Can we improve this?
263  if (mode == kSaveFPRegs) {
264  masm->PopCPURegList(kCallerSavedFP);
265  }
266  masm->PopCPURegList(saved_regs_);
267  }
268 
269  Register object() { return object_; }
270  Register address() { return address_; }
271  Register scratch0() { return scratch0_; }
272  Register scratch1() { return scratch1_; }
273  Register scratch2() { return scratch2_; }
274 
275  private:
276  Register object_;
277  Register address_;
278  Register scratch0_;
279  Register scratch1_;
280  Register scratch2_;
281  CPURegList saved_regs_;
282 
283  // TODO(all): We should consider moving this somewhere else.
284  static CPURegList GetValidRegistersForAllocation() {
285  // The list of valid registers for allocation is defined as all the
286  // registers without those with a special meaning.
287  //
288  // The default list excludes registers x26 to x31 because they are
289  // reserved for the following purpose:
290  // - x26 root register
291  // - x27 context pointer register
292  // - x28 jssp
293  // - x29 frame pointer
294  // - x30 link register(lr)
295  // - x31 xzr/stack pointer
296  CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
297 
298  // We also remove MacroAssembler's scratch registers.
299  list.Remove(ip0);
300  list.Remove(ip1);
301  list.Remove(x8);
302  list.Remove(x9);
303 
304  return list;
305  }
306 
307  friend class RecordWriteStub;
308  };
309 
310  // A list of stub variants which are pregenerated.
311  // The variants are stored in the same format as the minor key, so
312  // MinorKeyFor() can be used to populate and check this list.
313  static const int kAheadOfTime[];
314 
315  void Generate(MacroAssembler* masm);
316  void GenerateIncremental(MacroAssembler* masm, Mode mode);
317 
318  enum OnNoNeedToInformIncrementalMarker {
319  kReturnOnNoNeedToInformIncrementalMarker,
320  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
321  };
322 
323  void CheckNeedsToInformIncrementalMarker(
324  MacroAssembler* masm,
325  OnNoNeedToInformIncrementalMarker on_no_need,
326  Mode mode);
327  void InformIncrementalMarker(MacroAssembler* masm);
328 
329  Major MajorKey() { return RecordWrite; }
330 
331  int MinorKey() {
332  return MinorKeyFor(object_, value_, address_, remembered_set_action_,
333  save_fp_regs_mode_);
334  }
335 
336  static int MinorKeyFor(Register object,
337  Register value,
338  Register address,
339  RememberedSetAction action,
340  SaveFPRegsMode fp_mode) {
341  ASSERT(object.Is64Bits());
342  ASSERT(value.Is64Bits());
343  ASSERT(address.Is64Bits());
344  return ObjectBits::encode(object.code()) |
345  ValueBits::encode(value.code()) |
346  AddressBits::encode(address.code()) |
347  RememberedSetActionBits::encode(action) |
348  SaveFPRegsModeBits::encode(fp_mode);
349  }
350 
351  void Activate(Code* code) {
352  code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
353  }
354 
355  class ObjectBits: public BitField<int, 0, 5> {};
356  class ValueBits: public BitField<int, 5, 5> {};
357  class AddressBits: public BitField<int, 10, 5> {};
358  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
359  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
360 
361  Register object_;
362  Register value_;
363  Register address_;
364  RememberedSetAction remembered_set_action_;
365  SaveFPRegsMode save_fp_regs_mode_;
366  Label slow_;
367  RegisterAllocation regs_;
368 };
369 
370 
371 // Helper to call C++ functions from generated code. The caller must prepare
372 // the exit frame before doing the call with GenerateCall.
373 class DirectCEntryStub: public PlatformCodeStub {
374  public:
376  void Generate(MacroAssembler* masm);
377  void GenerateCall(MacroAssembler* masm, Register target);
378 
379  private:
380  Major MajorKey() { return DirectCEntry; }
381  int MinorKey() { return 0; }
382 
383  bool NeedsImmovableCode() { return true; }
384 };
385 
386 
387 class NameDictionaryLookupStub: public PlatformCodeStub {
388  public:
390 
391  explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
392 
393  void Generate(MacroAssembler* masm);
394 
395  static void GenerateNegativeLookup(MacroAssembler* masm,
396  Label* miss,
397  Label* done,
398  Register receiver,
399  Register properties,
401  Register scratch0);
402 
403  static void GeneratePositiveLookup(MacroAssembler* masm,
404  Label* miss,
405  Label* done,
406  Register elements,
407  Register name,
408  Register scratch1,
409  Register scratch2);
410 
411  virtual bool SometimesSetsUpAFrame() { return false; }
412 
413  private:
414  static const int kInlinedProbes = 4;
415  static const int kTotalProbes = 20;
416 
417  static const int kCapacityOffset =
420 
421  static const int kElementsStartOffset =
424 
425  Major MajorKey() { return NameDictionaryLookup; }
426 
427  int MinorKey() {
428  return LookupModeBits::encode(mode_);
429  }
430 
431  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
432 
433  LookupMode mode_;
434 };
435 
436 
437 class SubStringStub: public PlatformCodeStub {
438  public:
440 
441  private:
442  Major MajorKey() { return SubString; }
443  int MinorKey() { return 0; }
444 
445  void Generate(MacroAssembler* masm);
446 };
447 
448 
449 class StringCompareStub: public PlatformCodeStub {
450  public:
452 
453  // Compares two flat ASCII strings and returns result in x0.
455  Register left,
456  Register right,
457  Register scratch1,
458  Register scratch2,
459  Register scratch3,
460  Register scratch4);
461 
462  // Compare two flat ASCII strings for equality and returns result
463  // in x0.
465  Register left,
466  Register right,
467  Register scratch1,
468  Register scratch2,
469  Register scratch3);
470 
471  private:
472  virtual Major MajorKey() { return StringCompare; }
473  virtual int MinorKey() { return 0; }
474  virtual void Generate(MacroAssembler* masm);
475 
476  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
477  Register left,
478  Register right,
479  Register length,
480  Register scratch1,
481  Register scratch2,
482  Label* chars_not_equal);
483 };
484 
485 
486 struct PlatformCallInterfaceDescriptor {
489  : storage_mode_(storage_mode) { }
490 
491  TargetAddressStorageMode storage_mode() { return storage_mode_; }
492 
493  private:
494  TargetAddressStorageMode storage_mode_;
495 };
496 
497 
498 } } // namespace v8::internal
499 
500 #endif // V8_ARM64_CODE_STUBS_ARM64_H_
const unsigned kInstructionSizeLog2
const RegList kCallerSaved
Definition: frames-arm.h:75
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
const unsigned kXRegSizeInBits
#define kCallerSavedFP
void b(int branch_offset, Condition cond=al)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
int int32_t
Definition: unicode.cc:47
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
static void GenerateAheadOfTime(Isolate *isolate)
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
RestoreRegistersStateStub(SaveFPRegsMode with_fp)
CPURegister PopLowestIndex()
static void GenerateAheadOfTime(Isolate *isolate)
void GenerateCall(MacroAssembler *masm, Register target)
PlatformCallInterfaceDescriptor(TargetAddressStorageMode storage_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
byte * instruction_start()
Definition: objects-inl.h:5857
const int kPointerSize
Definition: globals.h:268
const unsigned kInstructionSize
StoreRegistersStateStub(SaveFPRegsMode with_fp)
Instruction * InstructionAt(int offset) const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
void Generate(MacroAssembler *masm)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
Definition: objects.h:3016
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
V8_INLINE Instruction * following(int count=1)
void Remove(const CPURegList &other)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
void adr(const Register &rd, Label *label)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
DirectCEntryStub()
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void Generate(MacroAssembler *masm)