v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
29 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
30 
31 #include "arm64/lithium-arm64.h"
32 
34 #include "deoptimizer.h"
35 #include "lithium-codegen.h"
36 #include "safepoint-table.h"
37 #include "scopes.h"
38 #include "v8utils.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 // Forward declarations.
44 class LDeferredCode;
45 class SafepointGenerator;
46 class BranchGenerator;
47 
48 class LCodeGen: public LCodeGenBase {
49  public:
51  : LCodeGenBase(chunk, assembler, info),
52  deoptimizations_(4, info->zone()),
53  deopt_jump_table_(4, info->zone()),
54  deoptimization_literals_(8, info->zone()),
55  inlined_function_count_(0),
56  scope_(info->scope()),
57  translations_(info->zone()),
58  deferred_(8, info->zone()),
59  osr_pc_offset_(-1),
60  frame_is_built_(false),
61  safepoints_(info->zone()),
62  resolver_(this),
63  expected_safepoint_kind_(Safepoint::kSimple) {
64  PopulateDeoptimizationLiteralsWithInlinedFunctions();
65  }
66 
67  // Simple accessors.
68  Scope* scope() const { return scope_; }
69 
70  int LookupDestination(int block_id) const {
71  return chunk()->LookupDestination(block_id);
72  }
73 
74  bool IsNextEmittedBlock(int block_id) const {
75  return LookupDestination(block_id) == GetNextEmittedBlock();
76  }
77 
78  bool NeedsEagerFrame() const {
79  return GetStackSlotCount() > 0 ||
80  info()->is_non_deferred_calling() ||
81  !info()->IsStub() ||
82  info()->requires_frame();
83  }
84  bool NeedsDeferredFrame() const {
85  return !NeedsEagerFrame() && info()->is_deferred_calling();
86  }
87 
89  return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
90  }
91 
92  // Try to generate code for the entire chunk, but it may fail if the
93  // chunk contains constructs we cannot handle. Returns true if the
94  // code generation attempt succeeded.
95  bool GenerateCode();
96 
97  // Finish the code by setting stack height, safepoint, and bailout
98  // information on it.
100 
101  // Support for converting LOperands to assembler types.
102  // LOperand must be a register.
103  Register ToRegister(LOperand* op) const;
104  Register ToRegister32(LOperand* op) const;
108  MemOperand ToMemOperand(LOperand* op) const;
109  Handle<Object> ToHandle(LConstantOperand* op) const;
110 
111  // TODO(jbramley): Examine these helpers and check that they make sense.
112  // IsInteger32Constant returns true for smi constants, for example.
113  bool IsInteger32Constant(LConstantOperand* op) const;
114  bool IsSmi(LConstantOperand* op) const;
115 
116  int32_t ToInteger32(LConstantOperand* op) const;
117  Smi* ToSmi(LConstantOperand* op) const;
118  double ToDouble(LConstantOperand* op) const;
120 
121  // Declare methods that deal with the individual node types.
122 #define DECLARE_DO(type) void Do##type(L##type* node);
124 #undef DECLARE_DO
125 
126  private:
127  // Return a double scratch register which can be used locally
128  // when generating code for a lithium instruction.
129  DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
130 
131  // Deferred code support.
132  void DoDeferredNumberTagD(LNumberTagD* instr);
133  void DoDeferredStackCheck(LStackCheck* instr);
134  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
135  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
136  void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
137  Label* exit,
138  Label* allocation_entry);
139 
141  void DoDeferredNumberTagU(LInstruction* instr,
142  LOperand* value,
143  LOperand* temp1,
144  LOperand* temp2);
145  void DoDeferredTaggedToI(LTaggedToI* instr,
146  LOperand* value,
147  LOperand* temp1,
148  LOperand* temp2);
149  void DoDeferredAllocate(LAllocate* instr);
150  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
151  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
152 
153  Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
154 
155  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
156  void EmitGoto(int block);
157  void DoGap(LGap* instr);
158 
159  // Generic version of EmitBranch. It contains some code to avoid emitting a
160  // branch on the next emitted basic block where we could just fall-through.
161  // You shouldn't use that directly but rather consider one of the helper like
162  // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
163  template<class InstrType>
164  void EmitBranchGeneric(InstrType instr,
165  const BranchGenerator& branch);
166 
167  template<class InstrType>
168  void EmitBranch(InstrType instr, Condition condition);
169 
170  template<class InstrType>
171  void EmitCompareAndBranch(InstrType instr,
172  Condition condition,
173  const Register& lhs,
174  const Operand& rhs);
175 
176  template<class InstrType>
177  void EmitTestAndBranch(InstrType instr,
178  Condition condition,
179  const Register& value,
180  uint64_t mask);
181 
182  template<class InstrType>
183  void EmitBranchIfNonZeroNumber(InstrType instr,
184  const FPRegister& value,
185  const FPRegister& scratch);
186 
187  template<class InstrType>
188  void EmitBranchIfHeapNumber(InstrType instr,
189  const Register& value);
190 
191  template<class InstrType>
192  void EmitBranchIfRoot(InstrType instr,
193  const Register& value,
194  Heap::RootListIndex index);
195 
196  // Emits optimized code to deep-copy the contents of statically known object
197  // graphs (e.g. object literal boilerplate). Expects a pointer to the
198  // allocated destination object in the result register, and a pointer to the
199  // source object in the source register.
200  void EmitDeepCopy(Handle<JSObject> object,
201  Register result,
202  Register source,
203  Register scratch,
204  int* offset,
206 
207  // Emits optimized code for %_IsString(x). Preserves input register.
208  // Returns the condition on which a final split to
209  // true and false label should be made, to optimize fallthrough.
210  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
211  SmiCheck check_needed);
212 
213  int DefineDeoptimizationLiteral(Handle<Object> literal);
214  void PopulateDeoptimizationData(Handle<Code> code);
215  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
216 
217  MemOperand BuildSeqStringOperand(Register string,
218  Register temp,
219  LOperand* index,
220  String::Encoding encoding);
221  void DeoptimizeBranch(
222  LEnvironment* environment,
223  BranchType branch_type, Register reg = NoReg, int bit = -1,
224  Deoptimizer::BailoutType* override_bailout_type = NULL);
225  void Deoptimize(LEnvironment* environment,
226  Deoptimizer::BailoutType* override_bailout_type = NULL);
227  void DeoptimizeIf(Condition cc, LEnvironment* environment);
228  void DeoptimizeIfZero(Register rt, LEnvironment* environment);
229  void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
230  void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
231  void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
232  void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
233  void DeoptimizeIfRoot(Register rt,
234  Heap::RootListIndex index,
235  LEnvironment* environment);
236  void DeoptimizeIfNotRoot(Register rt,
237  Heap::RootListIndex index,
238  LEnvironment* environment);
239  void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
240  void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
241  void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
242  void ApplyCheckIf(Condition cc, LBoundsCheck* check);
243 
244  MemOperand PrepareKeyedExternalArrayOperand(Register key,
245  Register base,
246  Register scratch,
247  bool key_is_smi,
248  bool key_is_constant,
249  int constant_key,
250  ElementsKind elements_kind,
251  int additional_index);
252  void CalcKeyedArrayBaseRegister(Register base,
253  Register elements,
254  Register key,
255  bool key_is_tagged,
256  ElementsKind elements_kind);
257 
258  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
259  Safepoint::DeoptMode mode);
260 
261  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
262 
263  void Abort(BailoutReason reason);
264 
265  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
266 
267  // Emit frame translation commands for an environment.
268  void WriteTranslation(LEnvironment* environment, Translation* translation);
269 
270  void AddToTranslation(LEnvironment* environment,
271  Translation* translation,
272  LOperand* op,
273  bool is_tagged,
274  bool is_uint32,
275  int* object_index_pointer,
276  int* dematerialized_index_pointer);
277 
278  void SaveCallerDoubles();
279  void RestoreCallerDoubles();
280 
281  // Code generation steps. Returns true if code generation should continue.
282  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
283  bool GeneratePrologue();
284  bool GenerateDeferredCode();
285  bool GenerateDeoptJumpTable();
286  bool GenerateSafepointTable();
287 
288  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
289  void GenerateOsrPrologue();
290 
291  enum SafepointMode {
292  RECORD_SIMPLE_SAFEPOINT,
293  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
294  };
295 
296  void CallCode(Handle<Code> code,
297  RelocInfo::Mode mode,
298  LInstruction* instr);
299 
300  void CallCodeGeneric(Handle<Code> code,
301  RelocInfo::Mode mode,
302  LInstruction* instr,
303  SafepointMode safepoint_mode);
304 
305  void CallRuntime(const Runtime::Function* function,
306  int num_arguments,
307  LInstruction* instr,
308  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
309 
310  void CallRuntime(Runtime::FunctionId id,
311  int num_arguments,
312  LInstruction* instr) {
313  const Runtime::Function* function = Runtime::FunctionForId(id);
314  CallRuntime(function, num_arguments, instr);
315  }
316 
317  void LoadContextFromDeferred(LOperand* context);
318  void CallRuntimeFromDeferred(Runtime::FunctionId id,
319  int argc,
320  LInstruction* instr,
321  LOperand* context);
322 
323  // Generate a direct call to a known function.
324  // If the function is already loaded into x1 by the caller, function_reg may
325  // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
326  // automatically load it.
327  void CallKnownFunction(Handle<JSFunction> function,
329  int arity,
330  LInstruction* instr,
331  Register function_reg = NoReg);
332 
333  // Support for recording safepoint and position information.
334  void RecordAndWritePosition(int position) V8_OVERRIDE;
335  void RecordSafepoint(LPointerMap* pointers,
336  Safepoint::Kind kind,
337  int arguments,
338  Safepoint::DeoptMode mode);
339  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
340  void RecordSafepoint(Safepoint::DeoptMode mode);
341  void RecordSafepointWithRegisters(LPointerMap* pointers,
342  int arguments,
343  Safepoint::DeoptMode mode);
344  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
345  int arguments,
346  Safepoint::DeoptMode mode);
347  void RecordSafepointWithLazyDeopt(LInstruction* instr,
348  SafepointMode safepoint_mode);
349 
350  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
351 
352  ZoneList<LEnvironment*> deoptimizations_;
353  ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
354  ZoneList<Handle<Object> > deoptimization_literals_;
355  int inlined_function_count_;
356  Scope* const scope_;
357  TranslationBuffer translations_;
358  ZoneList<LDeferredCode*> deferred_;
359  int osr_pc_offset_;
360  bool frame_is_built_;
361 
362  // Builder that keeps track of safepoints in the code. The table itself is
363  // emitted at the end of the generated code.
364  SafepointTableBuilder safepoints_;
365 
366  // Compiler from a set of parallel moves to a sequential list of moves.
367  LGapResolver resolver_;
368 
369  Safepoint::Kind expected_safepoint_kind_;
370 
371  int old_position_;
372 
373  class PushSafepointRegistersScope BASE_EMBEDDED {
374  public:
375  PushSafepointRegistersScope(LCodeGen* codegen,
376  Safepoint::Kind kind)
377  : codegen_(codegen) {
378  ASSERT(codegen_->info()->is_calling());
379  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
380  codegen_->expected_safepoint_kind_ = kind;
381 
382  UseScratchRegisterScope temps(codegen_->masm_);
383  // Preserve the value of lr which must be saved on the stack (the call to
384  // the stub will clobber it).
385  Register to_be_pushed_lr =
386  temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
387  codegen_->masm_->Mov(to_be_pushed_lr, lr);
388  switch (codegen_->expected_safepoint_kind_) {
389  case Safepoint::kWithRegisters: {
390  StoreRegistersStateStub stub(kDontSaveFPRegs);
391  codegen_->masm_->CallStub(&stub);
392  break;
393  }
394  case Safepoint::kWithRegistersAndDoubles: {
395  StoreRegistersStateStub stub(kSaveFPRegs);
396  codegen_->masm_->CallStub(&stub);
397  break;
398  }
399  default:
400  UNREACHABLE();
401  }
402  }
403 
404  ~PushSafepointRegistersScope() {
405  Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
406  ASSERT((kind & Safepoint::kWithRegisters) != 0);
407  switch (kind) {
408  case Safepoint::kWithRegisters: {
409  RestoreRegistersStateStub stub(kDontSaveFPRegs);
410  codegen_->masm_->CallStub(&stub);
411  break;
412  }
413  case Safepoint::kWithRegistersAndDoubles: {
414  RestoreRegistersStateStub stub(kSaveFPRegs);
415  codegen_->masm_->CallStub(&stub);
416  break;
417  }
418  default:
419  UNREACHABLE();
420  }
421  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
422  }
423 
424  private:
425  LCodeGen* codegen_;
426  };
427 
428  friend class LDeferredCode;
429  friend class SafepointGenerator;
431 };
432 
433 
434 class LDeferredCode: public ZoneObject {
435  public:
436  explicit LDeferredCode(LCodeGen* codegen)
437  : codegen_(codegen),
438  external_exit_(NULL),
439  instruction_index_(codegen->current_instruction_) {
440  codegen->AddDeferredCode(this);
441  }
442 
443  virtual ~LDeferredCode() { }
444  virtual void Generate() = 0;
445  virtual LInstruction* instr() = 0;
446 
447  void SetExit(Label* exit) { external_exit_ = exit; }
448  Label* entry() { return &entry_; }
449  Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
450  int instruction_index() const { return instruction_index_; }
451 
452  protected:
453  LCodeGen* codegen() const { return codegen_; }
454  MacroAssembler* masm() const { return codegen_->masm(); }
455 
456  private:
457  LCodeGen* codegen_;
458  Label entry_;
459  Label exit_;
460  Label* external_exit_;
461  int instruction_index_;
462 };
463 
464 
465 // This is the abstract class used by EmitBranchGeneric.
466 // It is used to emit code for conditional branching. The Emit() function
467 // emits code to branch when the condition holds and EmitInverted() emits
468 // the branch when the inverted condition is verified.
469 //
470 // For actual examples of condition see the concrete implementation in
471 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
472 class BranchGenerator BASE_EMBEDDED {
473  public:
474  explicit BranchGenerator(LCodeGen* codegen)
475  : codegen_(codegen) { }
476 
477  virtual ~BranchGenerator() { }
478 
479  virtual void Emit(Label* label) const = 0;
480  virtual void EmitInverted(Label* label) const = 0;
481 
482  protected:
483  MacroAssembler* masm() const { return codegen_->masm(); }
484 
486 };
487 
488 } } // namespace v8::internal
489 
490 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void FinishCode(Handle< Code > code)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
Smi * ToSmi(LConstantOperand *op) const
bool IsSmi(LConstantOperand *op) const
MacroAssembler * masm() const
int int32_t
Definition: unicode.cc:47
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
MacroAssembler * masm() const
bool IsInteger32Constant(LConstantOperand *op) const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:43
void DoDeferredStackCheck(LStackCheck *instr)
int LookupDestination(int block_id) const
Operand ToOperand(LOperand *op)
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
int32_t ToInteger32(LConstantOperand *op) const
Handle< Object > ToHandle(LConstantOperand *op) const
#define BASE_EMBEDDED
Definition: allocation.h:68
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
double ToDouble(LConstantOperand *op) const
Operand ToOperand32U(LOperand *op)
const Register lr
Operand ToOperand32I(LOperand *op)
#define V8_OVERRIDE
Definition: v8config.h:402
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
void WriteTranslation(LEnvironment *environment, Translation *translation)
#define DECLARE_DO(type)
virtual LInstruction * instr()=0
LinkRegisterStatus GetLinkRegisterState() const
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
virtual void Generate()=0
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
Register ToRegister32(LOperand *op) const
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)