v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-mips.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MIPS_CODE_STUBS_ARM_H_
29 #define V8_MIPS_CODE_STUBS_ARM_H_
30 
31 #include "ic-inl.h"
32 
33 
34 namespace v8 {
35 namespace internal {
36 
37 
38 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
39 
40 
41 class StoreBufferOverflowStub: public PlatformCodeStub {
42  public:
44  : save_doubles_(save_fp) {}
45 
46  void Generate(MacroAssembler* masm);
47 
48  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
49  virtual bool SometimesSetsUpAFrame() { return false; }
50 
51  private:
52  SaveFPRegsMode save_doubles_;
53 
54  Major MajorKey() { return StoreBufferOverflow; }
55  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
56 };
57 
58 
59 class StringHelper : public AllStatic {
60  public:
61  // Generate code for copying a large number of characters. This function
62  // is allowed to spend extra time setting up conditions to make copying
63  // faster. Copying of overlapping regions is not supported.
64  // Dest register ends at the position after the last character written.
65  static void GenerateCopyCharactersLong(MacroAssembler* masm,
66  Register dest,
67  Register src,
68  Register count,
69  Register scratch1,
70  Register scratch2,
71  Register scratch3,
72  Register scratch4,
73  Register scratch5,
74  int flags);
75 
76 
77  // Generate string hash.
78  static void GenerateHashInit(MacroAssembler* masm,
79  Register hash,
80  Register character);
81 
82  static void GenerateHashAddCharacter(MacroAssembler* masm,
83  Register hash,
84  Register character);
85 
86  static void GenerateHashGetHash(MacroAssembler* masm,
87  Register hash);
88 
89  private:
90  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
91 };
92 
93 
94 class SubStringStub: public PlatformCodeStub {
95  public:
97 
98  private:
99  Major MajorKey() { return SubString; }
100  int MinorKey() { return 0; }
101 
102  void Generate(MacroAssembler* masm);
103 };
104 
105 class StoreRegistersStateStub: public PlatformCodeStub {
106  public:
108  : save_doubles_(with_fp) {}
109 
110  static void GenerateAheadOfTime(Isolate* isolate);
111  private:
112  Major MajorKey() { return StoreRegistersState; }
113  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
114  SaveFPRegsMode save_doubles_;
115 
116  void Generate(MacroAssembler* masm);
117 };
118 
119 class RestoreRegistersStateStub: public PlatformCodeStub {
120  public:
122  : save_doubles_(with_fp) {}
123 
124  static void GenerateAheadOfTime(Isolate* isolate);
125  private:
126  Major MajorKey() { return RestoreRegistersState; }
127  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
128  SaveFPRegsMode save_doubles_;
129 
130  void Generate(MacroAssembler* masm);
131 };
132 
133 class StringCompareStub: public PlatformCodeStub {
134  public:
136 
137  // Compare two flat ASCII strings and returns result in v0.
139  Register left,
140  Register right,
141  Register scratch1,
142  Register scratch2,
143  Register scratch3,
144  Register scratch4);
145 
146  // Compares two flat ASCII strings for equality and returns result
147  // in v0.
149  Register left,
150  Register right,
151  Register scratch1,
152  Register scratch2,
153  Register scratch3);
154 
155  private:
156  virtual Major MajorKey() { return StringCompare; }
157  virtual int MinorKey() { return 0; }
158  virtual void Generate(MacroAssembler* masm);
159 
160  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
161  Register left,
162  Register right,
163  Register length,
164  Register scratch1,
165  Register scratch2,
166  Register scratch3,
167  Label* chars_not_equal);
168 };
169 
170 
171 // This stub can convert a signed int32 to a heap number (double). It does
172 // not work for int32s that are in Smi range! No GC occurs during this stub
173 // so you don't have to set up the frame.
174 class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
175  public:
177  Register the_heap_number,
178  Register scratch,
179  Register scratch2)
180  : the_int_(the_int),
181  the_heap_number_(the_heap_number),
182  scratch_(scratch),
183  sign_(scratch2) {
184  ASSERT(IntRegisterBits::is_valid(the_int_.code()));
185  ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
186  ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
187  ASSERT(SignRegisterBits::is_valid(sign_.code()));
188  }
189 
190  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
191 
192  private:
193  Register the_int_;
194  Register the_heap_number_;
195  Register scratch_;
196  Register sign_;
197 
198  // Minor key encoding in 16 bits.
199  class IntRegisterBits: public BitField<int, 0, 4> {};
200  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
201  class ScratchRegisterBits: public BitField<int, 8, 4> {};
202  class SignRegisterBits: public BitField<int, 12, 4> {};
203 
204  Major MajorKey() { return WriteInt32ToHeapNumber; }
205  int MinorKey() {
206  // Encode the parameters in a unique 16 bit value.
207  return IntRegisterBits::encode(the_int_.code())
208  | HeapNumberRegisterBits::encode(the_heap_number_.code())
209  | ScratchRegisterBits::encode(scratch_.code())
210  | SignRegisterBits::encode(sign_.code());
211  }
212 
213  void Generate(MacroAssembler* masm);
214 };
215 
216 
217 class RecordWriteStub: public PlatformCodeStub {
218  public:
220  Register value,
221  Register address,
222  RememberedSetAction remembered_set_action,
223  SaveFPRegsMode fp_mode)
224  : object_(object),
225  value_(value),
226  address_(address),
227  remembered_set_action_(remembered_set_action),
228  save_fp_regs_mode_(fp_mode),
229  regs_(object, // An input reg.
230  address, // An input reg.
231  value) { // One scratch reg.
232  }
233 
234  enum Mode {
236  INCREMENTAL,
238  };
239 
240  virtual bool SometimesSetsUpAFrame() { return false; }
241 
242  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
243  const unsigned offset = masm->instr_at(pos) & kImm16Mask;
244  masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
245  (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
246  ASSERT(Assembler::IsBne(masm->instr_at(pos)));
247  }
248 
249  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
250  const unsigned offset = masm->instr_at(pos) & kImm16Mask;
251  masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
252  (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
253  ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
254  }
255 
256  static Mode GetMode(Code* stub) {
257  Instr first_instruction = Assembler::instr_at(stub->instruction_start());
258  Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
260 
261  if (Assembler::IsBeq(first_instruction)) {
262  return INCREMENTAL;
263  }
264 
265  ASSERT(Assembler::IsBne(first_instruction));
266 
267  if (Assembler::IsBeq(second_instruction)) {
268  return INCREMENTAL_COMPACTION;
269  }
270 
271  ASSERT(Assembler::IsBne(second_instruction));
272 
273  return STORE_BUFFER_ONLY;
274  }
275 
276  static void Patch(Code* stub, Mode mode) {
277  MacroAssembler masm(NULL,
278  stub->instruction_start(),
279  stub->instruction_size());
280  switch (mode) {
281  case STORE_BUFFER_ONLY:
282  ASSERT(GetMode(stub) == INCREMENTAL ||
284  PatchBranchIntoNop(&masm, 0);
286  break;
287  case INCREMENTAL:
288  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
289  PatchNopIntoBranch(&masm, 0);
290  break;
292  ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
294  break;
295  }
296  ASSERT(GetMode(stub) == mode);
297  CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
298  }
299 
300  private:
301  // This is a helper class for freeing up 3 scratch registers. The input is
302  // two registers that must be preserved and one scratch register provided by
303  // the caller.
304  class RegisterAllocation {
305  public:
306  RegisterAllocation(Register object,
307  Register address,
308  Register scratch0)
309  : object_(object),
310  address_(address),
311  scratch0_(scratch0) {
312  ASSERT(!AreAliased(scratch0, object, address, no_reg));
313  scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
314  }
315 
316  void Save(MacroAssembler* masm) {
317  ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
318  // We don't have to save scratch0_ because it was given to us as
319  // a scratch register.
320  masm->push(scratch1_);
321  }
322 
323  void Restore(MacroAssembler* masm) {
324  masm->pop(scratch1_);
325  }
326 
327  // If we have to call into C then we need to save and restore all caller-
328  // saved registers that were not already preserved. The scratch registers
329  // will be restored by other means so we don't bother pushing them here.
330  void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
331  masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
332  if (mode == kSaveFPRegs) {
333  masm->MultiPushFPU(kCallerSavedFPU);
334  }
335  }
336 
337  inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
338  SaveFPRegsMode mode) {
339  if (mode == kSaveFPRegs) {
340  masm->MultiPopFPU(kCallerSavedFPU);
341  }
342  masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
343  }
344 
345  inline Register object() { return object_; }
346  inline Register address() { return address_; }
347  inline Register scratch0() { return scratch0_; }
348  inline Register scratch1() { return scratch1_; }
349 
350  private:
351  Register object_;
352  Register address_;
353  Register scratch0_;
354  Register scratch1_;
355 
356  friend class RecordWriteStub;
357  };
358 
359  enum OnNoNeedToInformIncrementalMarker {
360  kReturnOnNoNeedToInformIncrementalMarker,
361  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
362  };
363 
364  void Generate(MacroAssembler* masm);
365  void GenerateIncremental(MacroAssembler* masm, Mode mode);
366  void CheckNeedsToInformIncrementalMarker(
367  MacroAssembler* masm,
368  OnNoNeedToInformIncrementalMarker on_no_need,
369  Mode mode);
370  void InformIncrementalMarker(MacroAssembler* masm);
371 
372  Major MajorKey() { return RecordWrite; }
373 
374  int MinorKey() {
375  return ObjectBits::encode(object_.code()) |
376  ValueBits::encode(value_.code()) |
377  AddressBits::encode(address_.code()) |
378  RememberedSetActionBits::encode(remembered_set_action_) |
379  SaveFPRegsModeBits::encode(save_fp_regs_mode_);
380  }
381 
382  void Activate(Code* code) {
383  code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
384  }
385 
386  class ObjectBits: public BitField<int, 0, 5> {};
387  class ValueBits: public BitField<int, 5, 5> {};
388  class AddressBits: public BitField<int, 10, 5> {};
389  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
390  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
391 
392  Register object_;
393  Register value_;
394  Register address_;
395  RememberedSetAction remembered_set_action_;
396  SaveFPRegsMode save_fp_regs_mode_;
397  Label slow_;
398  RegisterAllocation regs_;
399 };
400 
401 
402 // Trampoline stub to call into native code. To call safely into native code
403 // in the presence of compacting GC (which can move code objects) we need to
404 // keep the code which called into native pinned in the memory. Currently the
405 // simplest approach is to generate such stub early enough so it can never be
406 // moved by GC
407 class DirectCEntryStub: public PlatformCodeStub {
408  public:
410  void Generate(MacroAssembler* masm);
411  void GenerateCall(MacroAssembler* masm, Register target);
412 
413  private:
414  Major MajorKey() { return DirectCEntry; }
415  int MinorKey() { return 0; }
416 
417  bool NeedsImmovableCode() { return true; }
418 };
419 
420 
421 class NameDictionaryLookupStub: public PlatformCodeStub {
422  public:
424 
425  explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
426 
427  void Generate(MacroAssembler* masm);
428 
429  static void GenerateNegativeLookup(MacroAssembler* masm,
430  Label* miss,
431  Label* done,
432  Register receiver,
433  Register properties,
435  Register scratch0);
436 
437  static void GeneratePositiveLookup(MacroAssembler* masm,
438  Label* miss,
439  Label* done,
440  Register elements,
441  Register name,
442  Register r0,
443  Register r1);
444 
445  virtual bool SometimesSetsUpAFrame() { return false; }
446 
447  private:
448  static const int kInlinedProbes = 4;
449  static const int kTotalProbes = 20;
450 
451  static const int kCapacityOffset =
454 
455  static const int kElementsStartOffset =
458 
459  Major MajorKey() { return NameDictionaryLookup; }
460 
461  int MinorKey() {
462  return LookupModeBits::encode(mode_);
463  }
464 
465  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
466 
467  LookupMode mode_;
468 };
469 
470 
471 } } // namespace v8::internal
472 
473 #endif // V8_MIPS_CODE_STUBS_ARM_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
void instr_at_put(int pos, Instr instr)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
static void GenerateAheadOfTime(Isolate *isolate)
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
RestoreRegistersStateStub(SaveFPRegsMode with_fp)
static void GenerateAheadOfTime(Isolate *isolate)
void GenerateCall(MacroAssembler *masm, Register target)
const RegList kCallerSavedFPU
Definition: frames-mips.h:89
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, int flags)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
byte * instruction_start()
Definition: objects-inl.h:5857
const int kPointerSize
Definition: globals.h:268
StoreRegistersStateStub(SaveFPRegsMode with_fp)
const int kRtShift
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch, Register scratch2)
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
void Generate(MacroAssembler *masm)
const Register r0
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
Definition: objects.h:3016
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const Register r1
const int kRsShift
static void PatchNopIntoBranch(MacroAssembler *masm, int pos)
static bool IsBne(Instr instr)
static bool IsBeq(Instr instr)
static const int kInstrSize
const Register no_reg
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
DirectCEntryStub()
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void Generate(MacroAssembler *masm)