v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM_CODE_STUBS_ARM_H_
29 #define V8_ARM_CODE_STUBS_ARM_H_
30 
31 #include "ic-inl.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 
37 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
38 
39 
41  public:
43  : save_doubles_(save_fp) {}
44 
45  void Generate(MacroAssembler* masm);
46 
47  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
48  virtual bool SometimesSetsUpAFrame() { return false; }
49 
50  private:
51  SaveFPRegsMode save_doubles_;
52 
53  Major MajorKey() { return StoreBufferOverflow; }
54  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
55 };
56 
57 
58 class StringHelper : public AllStatic {
59  public:
60  // Generate code for copying a large number of characters. This function
61  // is allowed to spend extra time setting up conditions to make copying
62  // faster. Copying of overlapping regions is not supported.
63  // Dest register ends at the position after the last character written.
65  Register dest,
66  Register src,
67  Register count,
68  Register scratch1,
69  Register scratch2,
70  Register scratch3,
71  Register scratch4,
72  int flags);
73 
74 
75  // Generate string hash.
76  static void GenerateHashInit(MacroAssembler* masm,
77  Register hash,
78  Register character);
79 
80  static void GenerateHashAddCharacter(MacroAssembler* masm,
81  Register hash,
82  Register character);
83 
84  static void GenerateHashGetHash(MacroAssembler* masm,
85  Register hash);
86 
87  private:
88  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
89 };
90 
91 
93  public:
95 
96  private:
97  Major MajorKey() { return SubString; }
98  int MinorKey() { return 0; }
99 
100  void Generate(MacroAssembler* masm);
101 };
102 
103 
104 
106  public:
108 
109  // Compares two flat ASCII strings and returns result in r0.
111  Register left,
112  Register right,
113  Register scratch1,
114  Register scratch2,
115  Register scratch3,
116  Register scratch4);
117 
118  // Compares two flat ASCII strings for equality and returns result
119  // in r0.
121  Register left,
122  Register right,
123  Register scratch1,
124  Register scratch2,
125  Register scratch3);
126 
127  private:
128  virtual Major MajorKey() { return StringCompare; }
129  virtual int MinorKey() { return 0; }
130  virtual void Generate(MacroAssembler* masm);
131 
132  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
133  Register left,
134  Register right,
135  Register length,
136  Register scratch1,
137  Register scratch2,
138  Label* chars_not_equal);
139 };
140 
141 
142 // This stub can convert a signed int32 to a heap number (double). It does
143 // not work for int32s that are in Smi range! No GC occurs during this stub
144 // so you don't have to set up the frame.
146  public:
148  Register the_heap_number,
149  Register scratch)
150  : the_int_(the_int),
151  the_heap_number_(the_heap_number),
152  scratch_(scratch) { }
153 
154  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
155 
156  private:
157  Register the_int_;
158  Register the_heap_number_;
159  Register scratch_;
160 
161  // Minor key encoding in 16 bits.
162  class IntRegisterBits: public BitField<int, 0, 4> {};
163  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
164  class ScratchRegisterBits: public BitField<int, 8, 4> {};
165 
166  Major MajorKey() { return WriteInt32ToHeapNumber; }
167  int MinorKey() {
168  // Encode the parameters in a unique 16 bit value.
169  return IntRegisterBits::encode(the_int_.code())
170  | HeapNumberRegisterBits::encode(the_heap_number_.code())
171  | ScratchRegisterBits::encode(scratch_.code());
172  }
173 
174  void Generate(MacroAssembler* masm);
175 };
176 
177 
179  public:
181  Register value,
182  Register address,
183  RememberedSetAction remembered_set_action,
184  SaveFPRegsMode fp_mode)
185  : object_(object),
186  value_(value),
187  address_(address),
188  remembered_set_action_(remembered_set_action),
189  save_fp_regs_mode_(fp_mode),
190  regs_(object, // An input reg.
191  address, // An input reg.
192  value) { // One scratch reg.
193  }
194 
195  enum Mode {
199  };
200 
201  virtual bool SometimesSetsUpAFrame() { return false; }
202 
203  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
204  masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
206  }
207 
208  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
209  masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
210  ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
211  }
212 
213  static Mode GetMode(Code* stub) {
214  Instr first_instruction = Assembler::instr_at(stub->instruction_start());
215  Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
217 
218  if (Assembler::IsBranch(first_instruction)) {
219  return INCREMENTAL;
220  }
221 
222  ASSERT(Assembler::IsTstImmediate(first_instruction));
223 
224  if (Assembler::IsBranch(second_instruction)) {
225  return INCREMENTAL_COMPACTION;
226  }
227 
228  ASSERT(Assembler::IsTstImmediate(second_instruction));
229 
230  return STORE_BUFFER_ONLY;
231  }
232 
233  static void Patch(Code* stub, Mode mode) {
234  MacroAssembler masm(NULL,
235  stub->instruction_start(),
236  stub->instruction_size());
237  switch (mode) {
238  case STORE_BUFFER_ONLY:
239  ASSERT(GetMode(stub) == INCREMENTAL ||
241  PatchBranchIntoNop(&masm, 0);
243  break;
244  case INCREMENTAL:
245  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
246  PatchNopIntoBranch(&masm, 0);
247  break;
249  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
251  break;
252  }
253  ASSERT(GetMode(stub) == mode);
254  CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
255  }
256 
257  private:
258  // This is a helper class for freeing up 3 scratch registers. The input is
259  // two registers that must be preserved and one scratch register provided by
260  // the caller.
261  class RegisterAllocation {
262  public:
263  RegisterAllocation(Register object,
264  Register address,
265  Register scratch0)
266  : object_(object),
267  address_(address),
268  scratch0_(scratch0) {
269  ASSERT(!AreAliased(scratch0, object, address, no_reg));
270  scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
271  }
272 
273  void Save(MacroAssembler* masm) {
274  ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
275  // We don't have to save scratch0_ because it was given to us as
276  // a scratch register.
277  masm->push(scratch1_);
278  }
279 
280  void Restore(MacroAssembler* masm) {
281  masm->pop(scratch1_);
282  }
283 
284  // If we have to call into C then we need to save and restore all caller-
285  // saved registers that were not already preserved. The scratch registers
286  // will be restored by other means so we don't bother pushing them here.
287  void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
288  masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
289  if (mode == kSaveFPRegs) {
290  masm->SaveFPRegs(sp, scratch0_);
291  }
292  }
293 
294  inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
295  SaveFPRegsMode mode) {
296  if (mode == kSaveFPRegs) {
297  masm->RestoreFPRegs(sp, scratch0_);
298  }
299  masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
300  }
301 
302  inline Register object() { return object_; }
303  inline Register address() { return address_; }
304  inline Register scratch0() { return scratch0_; }
305  inline Register scratch1() { return scratch1_; }
306 
307  private:
308  Register object_;
309  Register address_;
310  Register scratch0_;
311  Register scratch1_;
312 
313  friend class RecordWriteStub;
314  };
315 
316  enum OnNoNeedToInformIncrementalMarker {
317  kReturnOnNoNeedToInformIncrementalMarker,
318  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
319  };
320 
321  void Generate(MacroAssembler* masm);
322  void GenerateIncremental(MacroAssembler* masm, Mode mode);
323  void CheckNeedsToInformIncrementalMarker(
324  MacroAssembler* masm,
325  OnNoNeedToInformIncrementalMarker on_no_need,
326  Mode mode);
327  void InformIncrementalMarker(MacroAssembler* masm);
328 
329  Major MajorKey() { return RecordWrite; }
330 
331  int MinorKey() {
332  return ObjectBits::encode(object_.code()) |
333  ValueBits::encode(value_.code()) |
334  AddressBits::encode(address_.code()) |
335  RememberedSetActionBits::encode(remembered_set_action_) |
336  SaveFPRegsModeBits::encode(save_fp_regs_mode_);
337  }
338 
339  void Activate(Code* code) {
340  code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
341  }
342 
343  class ObjectBits: public BitField<int, 0, 4> {};
344  class ValueBits: public BitField<int, 4, 4> {};
345  class AddressBits: public BitField<int, 8, 4> {};
346  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
347  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
348 
349  Register object_;
350  Register value_;
351  Register address_;
352  RememberedSetAction remembered_set_action_;
353  SaveFPRegsMode save_fp_regs_mode_;
354  Label slow_;
355  RegisterAllocation regs_;
356 };
357 
358 
359 // Trampoline stub to call into native code. To call safely into native code
360 // in the presence of compacting GC (which can move code objects) we need to
361 // keep the code which called into native pinned in the memory. Currently the
362 // simplest approach is to generate such stub early enough so it can never be
363 // moved by GC
365  public:
367  void Generate(MacroAssembler* masm);
368  void GenerateCall(MacroAssembler* masm, Register target);
369 
370  private:
371  Major MajorKey() { return DirectCEntry; }
372  int MinorKey() { return 0; }
373 
374  bool NeedsImmovableCode() { return true; }
375 };
376 
377 
379  public:
381 
382  explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
383 
384  void Generate(MacroAssembler* masm);
385 
386  static void GenerateNegativeLookup(MacroAssembler* masm,
387  Label* miss,
388  Label* done,
389  Register receiver,
390  Register properties,
392  Register scratch0);
393 
394  static void GeneratePositiveLookup(MacroAssembler* masm,
395  Label* miss,
396  Label* done,
397  Register elements,
398  Register name,
399  Register r0,
400  Register r1);
401 
402  virtual bool SometimesSetsUpAFrame() { return false; }
403 
404  private:
405  static const int kInlinedProbes = 4;
406  static const int kTotalProbes = 20;
407 
408  static const int kCapacityOffset =
411 
412  static const int kElementsStartOffset =
415 
416  Major MajorKey() { return NameDictionaryLookup; }
417 
418  int MinorKey() {
419  return LookupModeBits::encode(mode_);
420  }
421 
422  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
423 
424  LookupMode mode_;
425 };
426 
427 
431  : storage_mode_(storage_mode) { }
432 
433  TargetAddressStorageMode storage_mode() { return storage_mode_; }
434 
435  private:
436  TargetAddressStorageMode storage_mode_;
437 };
438 
439 
440 } } // namespace v8::internal
441 
442 #endif // V8_ARM_CODE_STUBS_ARM_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static bool IsBranch(Instr instr)
const RegList kCallerSaved
Definition: frames-arm.h:75
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
virtual bool SometimesSetsUpAFrame()
void instr_at_put(int pos, Instr instr)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
void GenerateCall(MacroAssembler *masm, Register target)
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, int flags)
const Register sp
PlatformCallInterfaceDescriptor(TargetAddressStorageMode storage_mode)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
byte * instruction_start()
Definition: objects-inl.h:5857
const int kPointerSize
Definition: globals.h:268
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
void Generate(MacroAssembler *masm)
const Register r0
static bool IsTstImmediate(Instr instr)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
Definition: objects.h:3016
const Register lr
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const Register r1
static void PatchNopIntoBranch(MacroAssembler *masm, int pos)
static const int kInstrSize
const Register no_reg
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
DirectCEntryStub()
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void Generate(MacroAssembler *masm)