v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-ia32.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
29 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_
30 
31 #include "ia32/lithium-ia32.h"
32 
33 #include "checks.h"
34 #include "deoptimizer.h"
36 #include "lithium-codegen.h"
37 #include "safepoint-table.h"
38 #include "scopes.h"
39 #include "v8utils.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 // Forward declarations.
45 class LDeferredCode;
46 class LGapNode;
47 class SafepointGenerator;
48 
49 class LCodeGen: public LCodeGenBase {
50  public:
52  : LCodeGenBase(chunk, assembler, info),
53  deoptimizations_(4, info->zone()),
54  jump_table_(4, info->zone()),
55  deoptimization_literals_(8, info->zone()),
56  inlined_function_count_(0),
57  scope_(info->scope()),
58  translations_(info->zone()),
59  deferred_(8, info->zone()),
60  dynamic_frame_alignment_(false),
61  support_aligned_spilled_doubles_(false),
62  osr_pc_offset_(-1),
63  frame_is_built_(false),
64  x87_stack_(assembler),
65  safepoints_(info->zone()),
66  resolver_(this),
67  expected_safepoint_kind_(Safepoint::kSimple) {
68  PopulateDeoptimizationLiteralsWithInlinedFunctions();
69  }
70 
71  int LookupDestination(int block_id) const {
72  return chunk()->LookupDestination(block_id);
73  }
74 
75  bool IsNextEmittedBlock(int block_id) const {
76  return LookupDestination(block_id) == GetNextEmittedBlock();
77  }
78 
79  bool NeedsEagerFrame() const {
80  return GetStackSlotCount() > 0 ||
81  info()->is_non_deferred_calling() ||
82  !info()->IsStub() ||
83  info()->requires_frame();
84  }
85  bool NeedsDeferredFrame() const {
86  return !NeedsEagerFrame() && info()->is_deferred_calling();
87  }
88 
89  // Support for converting LOperands to assembler types.
90  Operand ToOperand(LOperand* op) const;
91  Register ToRegister(LOperand* op) const;
94 
95  bool IsInteger32(LConstantOperand* op) const;
96  bool IsSmi(LConstantOperand* op) const;
97  Immediate ToImmediate(LOperand* op, const Representation& r) const {
98  return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
99  }
100  double ToDouble(LConstantOperand* op) const;
101 
102  // Support for non-sse2 (x87) floating point stack handling.
103  // These functions maintain the mapping of physical stack registers to our
104  // virtual registers between instructions.
106 
107  void X87Mov(X87Register reg, Operand src,
109  void X87Mov(Operand src, X87Register reg,
111 
112  void X87PrepareBinaryOp(
113  X87Register left, X87Register right, X87Register result);
114 
115  void X87LoadForUsage(X87Register reg);
116  void X87LoadForUsage(X87Register reg1, X87Register reg2);
117  void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
118  void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
119 
120  void X87Fxch(X87Register reg, int other_slot = 0) {
121  x87_stack_.Fxch(reg, other_slot);
122  }
123  void X87Free(X87Register reg) {
124  x87_stack_.Free(reg);
125  }
126 
127 
128  bool X87StackEmpty() {
129  return x87_stack_.depth() == 0;
130  }
131 
132  Handle<Object> ToHandle(LConstantOperand* op) const;
133 
134  // The operand denoting the second word (the one with a higher address) of
135  // a double stack slot.
137 
138  // Try to generate code for the entire chunk, but it may fail if the
139  // chunk contains constructs we cannot handle. Returns true if the
140  // code generation attempt succeeded.
141  bool GenerateCode();
142 
143  // Finish the code by setting stack height, safepoint, and bailout
144  // information on it.
146 
147  // Deferred code support.
148  void DoDeferredNumberTagD(LNumberTagD* instr);
149 
152  LOperand* value,
153  LOperand* temp1,
154  LOperand* temp2,
155  IntegerSignedness signedness);
156 
157  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
158  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
159  void DoDeferredStackCheck(LStackCheck* instr);
160  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
161  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
162  void DoDeferredAllocate(LAllocate* instr);
163  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
164  Label* map_check);
165  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
166 
167  // Parallel move support.
168  void DoParallelMove(LParallelMove* move);
169  void DoGap(LGap* instr);
170 
171  // Emit frame translation commands for an environment.
172  void WriteTranslation(LEnvironment* environment, Translation* translation);
173 
175 
176  // Declare methods that deal with the individual node types.
177 #define DECLARE_DO(type) void Do##type(L##type* node);
179 #undef DECLARE_DO
180 
181  private:
182  StrictMode strict_mode() const { return info()->strict_mode(); }
183 
184  Scope* scope() const { return scope_; }
185 
186  XMMRegister double_scratch0() const { return xmm0; }
187 
188  void EmitClassOfTest(Label* if_true,
189  Label* if_false,
190  Handle<String> class_name,
191  Register input,
192  Register temporary,
193  Register temporary2);
194 
195  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
196 
197  void Abort(BailoutReason reason);
198 
199  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
200 
201  void SaveCallerDoubles();
202  void RestoreCallerDoubles();
203 
204  // Code generation passes. Returns true if code generation should
205  // continue.
206  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
207  void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
208  bool GeneratePrologue();
209  bool GenerateDeferredCode();
210  bool GenerateJumpTable();
211  bool GenerateSafepointTable();
212 
213  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
214  void GenerateOsrPrologue();
215 
216  enum SafepointMode {
217  RECORD_SIMPLE_SAFEPOINT,
218  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
219  };
220 
221  void CallCode(Handle<Code> code,
222  RelocInfo::Mode mode,
223  LInstruction* instr);
224 
225  void CallCodeGeneric(Handle<Code> code,
226  RelocInfo::Mode mode,
227  LInstruction* instr,
228  SafepointMode safepoint_mode);
229 
230  void CallRuntime(const Runtime::Function* fun,
231  int argc,
232  LInstruction* instr,
233  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
234 
235  void CallRuntime(Runtime::FunctionId id,
236  int argc,
237  LInstruction* instr) {
238  const Runtime::Function* function = Runtime::FunctionForId(id);
239  CallRuntime(function, argc, instr);
240  }
241 
242  void CallRuntimeFromDeferred(Runtime::FunctionId id,
243  int argc,
244  LInstruction* instr,
245  LOperand* context);
246 
247  void LoadContextFromDeferred(LOperand* context);
248 
249  enum EDIState {
250  EDI_UNINITIALIZED,
251  EDI_CONTAINS_TARGET
252  };
253 
254  // Generate a direct call to a known function. Expects the function
255  // to be in edi.
256  void CallKnownFunction(Handle<JSFunction> function,
258  int arity,
259  LInstruction* instr,
260  EDIState edi_state);
261 
262  void RecordSafepointWithLazyDeopt(LInstruction* instr,
263  SafepointMode safepoint_mode);
264 
265  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
266  Safepoint::DeoptMode mode);
267  void DeoptimizeIf(Condition cc,
268  LEnvironment* environment,
269  Deoptimizer::BailoutType bailout_type);
270  void DeoptimizeIf(Condition cc, LEnvironment* environment);
271  void ApplyCheckIf(Condition cc, LBoundsCheck* check);
272 
273  bool DeoptEveryNTimes() {
274  return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
275  }
276 
277  void AddToTranslation(LEnvironment* environment,
278  Translation* translation,
279  LOperand* op,
280  bool is_tagged,
281  bool is_uint32,
282  int* object_index_pointer,
283  int* dematerialized_index_pointer);
284  void PopulateDeoptimizationData(Handle<Code> code);
285  int DefineDeoptimizationLiteral(Handle<Object> literal);
286 
287  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
288 
289  Register ToRegister(int index) const;
290  XMMRegister ToDoubleRegister(int index) const;
291  X87Register ToX87Register(int index) const;
292  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
293  int32_t ToInteger32(LConstantOperand* op) const;
294  ExternalReference ToExternalReference(LConstantOperand* op) const;
295 
296  Operand BuildFastArrayOperand(LOperand* elements_pointer,
297  LOperand* key,
298  Representation key_representation,
299  ElementsKind elements_kind,
300  uint32_t offset,
301  uint32_t additional_index = 0);
302 
303  Operand BuildSeqStringOperand(Register string,
304  LOperand* index,
305  String::Encoding encoding);
306 
307  void EmitIntegerMathAbs(LMathAbs* instr);
308 
309  // Support for recording safepoint and position information.
310  void RecordSafepoint(LPointerMap* pointers,
311  Safepoint::Kind kind,
312  int arguments,
313  Safepoint::DeoptMode mode);
314  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
315  void RecordSafepoint(Safepoint::DeoptMode mode);
316  void RecordSafepointWithRegisters(LPointerMap* pointers,
317  int arguments,
318  Safepoint::DeoptMode mode);
319 
320  void RecordAndWritePosition(int position) V8_OVERRIDE;
321 
322  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
323  void EmitGoto(int block);
324 
325  // EmitBranch expects to be the last instruction of a block.
326  template<class InstrType>
327  void EmitBranch(InstrType instr, Condition cc);
328  template<class InstrType>
329  void EmitFalseBranch(InstrType instr, Condition cc);
330  void EmitNumberUntagD(
331  Register input,
332  Register temp,
333  XMMRegister result,
334  bool allow_undefined_as_nan,
335  bool deoptimize_on_minus_zero,
336  LEnvironment* env,
338 
339  void EmitNumberUntagDNoSSE2(
340  Register input,
341  Register temp,
342  X87Register res_reg,
343  bool allow_undefined_as_nan,
344  bool deoptimize_on_minus_zero,
345  LEnvironment* env,
347 
348  // Emits optimized code for typeof x == "y". Modifies input register.
349  // Returns the condition on which a final split to
350  // true and false label should be made, to optimize fallthrough.
351  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
352 
353  // Emits optimized code for %_IsObject(x). Preserves input register.
354  // Returns the condition on which a final split to
355  // true and false label should be made, to optimize fallthrough.
356  Condition EmitIsObject(Register input,
357  Register temp1,
358  Label* is_not_object,
359  Label* is_object);
360 
361  // Emits optimized code for %_IsString(x). Preserves input register.
362  // Returns the condition on which a final split to
363  // true and false label should be made, to optimize fallthrough.
364  Condition EmitIsString(Register input,
365  Register temp1,
366  Label* is_not_string,
367  SmiCheck check_needed);
368 
369  // Emits optimized code for %_IsConstructCall().
370  // Caller should branch on equal condition.
371  void EmitIsConstructCall(Register temp);
372 
373  // Emits optimized code to deep-copy the contents of statically known
374  // object graphs (e.g. object literal boilerplate).
375  void EmitDeepCopy(Handle<JSObject> object,
376  Register result,
377  Register source,
378  int* offset,
380 
381  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
382  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
383  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
384  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
385  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
386  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
387  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
388 
389  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
390 
391  // Emits code for pushing either a tagged constant, a (non-double)
392  // register, or a stack slot operand.
393  void EmitPushTaggedOperand(LOperand* operand);
394 
395  void X87Fld(Operand src, X87OperandType opts);
396 
397  void EmitFlushX87ForDeopt();
398  void FlushX87StackIfNecessary(LInstruction* instr) {
399  x87_stack_.FlushIfNecessary(instr, this);
400  }
401  friend class LGapResolver;
402 
403 #ifdef _MSC_VER
404  // On windows, you may not access the stack more than one page below
405  // the most recently mapped page. To make the allocated area randomly
406  // accessible, we write an arbitrary value to each page in range
407  // esp + offset - page_size .. esp in turn.
408  void MakeSureStackPagesMapped(int offset);
409 #endif
410 
411  ZoneList<LEnvironment*> deoptimizations_;
413  ZoneList<Handle<Object> > deoptimization_literals_;
414  int inlined_function_count_;
415  Scope* const scope_;
416  TranslationBuffer translations_;
417  ZoneList<LDeferredCode*> deferred_;
418  bool dynamic_frame_alignment_;
419  bool support_aligned_spilled_doubles_;
420  int osr_pc_offset_;
421  bool frame_is_built_;
422 
423  class X87Stack {
424  public:
425  explicit X87Stack(MacroAssembler* masm)
426  : stack_depth_(0), is_mutable_(true), masm_(masm) { }
427  explicit X87Stack(const X87Stack& other)
428  : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
429  for (int i = 0; i < stack_depth_; i++) {
430  stack_[i] = other.stack_[i];
431  }
432  }
433  bool operator==(const X87Stack& other) const {
434  if (stack_depth_ != other.stack_depth_) return false;
435  for (int i = 0; i < stack_depth_; i++) {
436  if (!stack_[i].is(other.stack_[i])) return false;
437  }
438  return true;
439  }
440  bool Contains(X87Register reg);
441  void Fxch(X87Register reg, int other_slot = 0);
442  void Free(X87Register reg);
443  void PrepareToWrite(X87Register reg);
444  void CommitWrite(X87Register reg);
445  void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
446  void LeavingBlock(int current_block_id, LGoto* goto_instr);
447  int depth() const { return stack_depth_; }
448  void pop() {
449  ASSERT(is_mutable_);
450  stack_depth_--;
451  }
452  void push(X87Register reg) {
453  ASSERT(is_mutable_);
455  stack_[stack_depth_] = reg;
456  stack_depth_++;
457  }
458 
459  MacroAssembler* masm() const { return masm_; }
460 
461  private:
462  int ArrayIndex(X87Register reg);
463  int st2idx(int pos);
464 
465  X87Register stack_[X87Register::kNumAllocatableRegisters];
466  int stack_depth_;
467  bool is_mutable_;
468  MacroAssembler* masm_;
469  };
470  X87Stack x87_stack_;
471 
472  // Builder that keeps track of safepoints in the code. The table
473  // itself is emitted at the end of the generated code.
474  SafepointTableBuilder safepoints_;
475 
476  // Compiler from a set of parallel moves to a sequential list of moves.
477  LGapResolver resolver_;
478 
479  Safepoint::Kind expected_safepoint_kind_;
480 
481  class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
482  public:
483  explicit PushSafepointRegistersScope(LCodeGen* codegen)
484  : codegen_(codegen) {
485  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
486  codegen_->masm_->PushSafepointRegisters();
487  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
488  ASSERT(codegen_->info()->is_calling());
489  }
490 
491  ~PushSafepointRegistersScope() {
492  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
493  codegen_->masm_->PopSafepointRegisters();
494  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
495  }
496 
497  private:
498  LCodeGen* codegen_;
499  };
500 
501  friend class LDeferredCode;
502  friend class LEnvironment;
503  friend class SafepointGenerator;
505 };
506 
507 
508 class LDeferredCode : public ZoneObject {
509  public:
510  explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
511  : codegen_(codegen),
512  external_exit_(NULL),
513  instruction_index_(codegen->current_instruction_),
514  x87_stack_(x87_stack) {
515  codegen->AddDeferredCode(this);
516  }
517 
518  virtual ~LDeferredCode() {}
519  virtual void Generate() = 0;
520  virtual LInstruction* instr() = 0;
521 
522  void SetExit(Label* exit) { external_exit_ = exit; }
523  Label* entry() { return &entry_; }
524  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
525  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
526  int instruction_index() const { return instruction_index_; }
527  const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
528 
529  protected:
530  LCodeGen* codegen() const { return codegen_; }
531  MacroAssembler* masm() const { return codegen_->masm(); }
532 
533  private:
534  LCodeGen* codegen_;
535  Label entry_;
536  Label exit_;
537  Label* external_exit_;
538  Label done_;
539  int instruction_index_;
540  LCodeGen::X87Stack x87_stack_;
541 };
542 
543 } } // namespace v8::internal
544 
545 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void FinishCode(Handle< Code > code)
LDeferredCode(LCodeGen *codegen, const LCodeGen::X87Stack &x87_stack)
void EnsureRelocSpaceForDeoptimization()
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
bool IsSmi(LConstantOperand *op) const
Operand HighOperand(LOperand *op)
void X87PrepareBinaryOp(X87Register left, X87Register right, X87Register result)
void X87LoadForUsage(X87Register reg)
int int32_t
Definition: unicode.cc:47
X87Register ToX87Register(LOperand *op) const
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
MacroAssembler * masm() const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:43
void DoDeferredStackCheck(LStackCheck *instr)
void X87PrepareToWrite(X87Register reg)
int LookupDestination(int block_id) const
Immediate ToImmediate(LOperand *op, const Representation &r) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const LCodeGen::X87Stack & x87_stack() const
static const int kNumAllocatableRegisters
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
#define DECLARE_DO(type)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
void X87Fxch(X87Register reg, int other_slot=0)
int32_t ToInteger32(LConstantOperand *op) const
bool IsInteger32(LConstantOperand *op) const
void X87Free(X87Register reg)
Handle< Object > ToHandle(LConstantOperand *op) const
#define BASE_EMBEDDED
Definition: allocation.h:68
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
#define V8_OVERRIDE
Definition: v8config.h:402
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void X87Mov(X87Register reg, Operand src, X87OperandType operand=kX87DoubleOperand)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
virtual void Generate()=0
void X87CommitWrite(X87Register reg)
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
const XMMRegister xmm0