v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-x64.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
29 #define V8_X64_LITHIUM_CODEGEN_X64_H_
30 
31 #include "x64/lithium-x64.h"
32 
33 #include "checks.h"
34 #include "deoptimizer.h"
35 #include "lithium-codegen.h"
36 #include "safepoint-table.h"
37 #include "scopes.h"
38 #include "v8utils.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 // Forward declarations.
45 class LDeferredCode;
46 class SafepointGenerator;
47 
48 class LCodeGen: public LCodeGenBase {
49  public:
51  : LCodeGenBase(chunk, assembler, info),
52  deoptimizations_(4, info->zone()),
53  jump_table_(4, info->zone()),
54  deoptimization_literals_(8, info->zone()),
55  inlined_function_count_(0),
56  scope_(info->scope()),
57  translations_(info->zone()),
58  deferred_(8, info->zone()),
59  osr_pc_offset_(-1),
60  frame_is_built_(false),
61  safepoints_(info->zone()),
62  resolver_(this),
63  expected_safepoint_kind_(Safepoint::kSimple) {
64  PopulateDeoptimizationLiteralsWithInlinedFunctions();
65  }
66 
67  int LookupDestination(int block_id) const {
68  return chunk()->LookupDestination(block_id);
69  }
70 
71  bool IsNextEmittedBlock(int block_id) const {
72  return LookupDestination(block_id) == GetNextEmittedBlock();
73  }
74 
75  bool NeedsEagerFrame() const {
76  return GetStackSlotCount() > 0 ||
77  info()->is_non_deferred_calling() ||
78  !info()->IsStub() ||
79  info()->requires_frame();
80  }
81  bool NeedsDeferredFrame() const {
82  return !NeedsEagerFrame() && info()->is_deferred_calling();
83  }
84 
85  // Support for converting LOperands to assembler types.
86  Register ToRegister(LOperand* op) const;
88  bool IsInteger32Constant(LConstantOperand* op) const;
89  bool IsDehoistedKeyConstant(LConstantOperand* op) const;
90  bool IsSmiConstant(LConstantOperand* op) const;
91  int32_t ToInteger32(LConstantOperand* op) const;
92  Smi* ToSmi(LConstantOperand* op) const;
93  double ToDouble(LConstantOperand* op) const;
94  ExternalReference ToExternalReference(LConstantOperand* op) const;
95  Handle<Object> ToHandle(LConstantOperand* op) const;
96  Operand ToOperand(LOperand* op) const;
97 
98  // Try to generate code for the entire chunk, but it may fail if the
99  // chunk contains constructs we cannot handle. Returns true if the
100  // code generation attempt succeeded.
101  bool GenerateCode();
102 
103  // Finish the code by setting stack height, safepoint, and bailout
104  // information on it.
106 
107  // Deferred code support.
108  void DoDeferredNumberTagD(LNumberTagD* instr);
109  void DoDeferredNumberTagU(LNumberTagU* instr);
110  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
111  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
112  void DoDeferredStackCheck(LStackCheck* instr);
113  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
114  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
115  void DoDeferredAllocate(LAllocate* instr);
116  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
117  Label* map_check);
118  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
119 
120 // Parallel move support.
121  void DoParallelMove(LParallelMove* move);
122  void DoGap(LGap* instr);
123 
124  // Emit frame translation commands for an environment.
125  void WriteTranslation(LEnvironment* environment, Translation* translation);
126 
127  // Declare methods that deal with the individual node types.
128 #define DECLARE_DO(type) void Do##type(L##type* node);
130 #undef DECLARE_DO
131 
132  private:
133  StrictMode strict_mode() const { return info()->strict_mode(); }
134 
135  LPlatformChunk* chunk() const { return chunk_; }
136  Scope* scope() const { return scope_; }
137  HGraph* graph() const { return chunk()->graph(); }
138 
139  XMMRegister double_scratch0() const { return xmm0; }
140 
141  void EmitClassOfTest(Label* if_true,
142  Label* if_false,
143  Handle<String> class_name,
144  Register input,
145  Register temporary,
146  Register scratch);
147 
148  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
149 
150  void Abort(BailoutReason reason);
151 
152  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
153 
154 
155  void SaveCallerDoubles();
156  void RestoreCallerDoubles();
157 
158  // Code generation passes. Returns true if code generation should
159  // continue.
160  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
161  void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
162  bool GeneratePrologue();
163  bool GenerateDeferredCode();
164  bool GenerateJumpTable();
165  bool GenerateSafepointTable();
166 
167  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
168  void GenerateOsrPrologue();
169 
170  enum SafepointMode {
171  RECORD_SIMPLE_SAFEPOINT,
172  RECORD_SAFEPOINT_WITH_REGISTERS
173  };
174 
175  void CallCodeGeneric(Handle<Code> code,
176  RelocInfo::Mode mode,
177  LInstruction* instr,
178  SafepointMode safepoint_mode,
179  int argc);
180 
181 
182  void CallCode(Handle<Code> code,
183  RelocInfo::Mode mode,
184  LInstruction* instr);
185 
186  void CallRuntime(const Runtime::Function* function,
187  int num_arguments,
188  LInstruction* instr,
189  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
190 
191  void CallRuntime(Runtime::FunctionId id,
192  int num_arguments,
193  LInstruction* instr) {
194  const Runtime::Function* function = Runtime::FunctionForId(id);
195  CallRuntime(function, num_arguments, instr);
196  }
197 
198  void CallRuntimeFromDeferred(Runtime::FunctionId id,
199  int argc,
200  LInstruction* instr,
201  LOperand* context);
202 
203  void LoadContextFromDeferred(LOperand* context);
204 
205  enum RDIState {
206  RDI_UNINITIALIZED,
207  RDI_CONTAINS_TARGET
208  };
209 
210  // Generate a direct call to a known function. Expects the function
211  // to be in rdi.
212  void CallKnownFunction(Handle<JSFunction> function,
214  int arity,
215  LInstruction* instr,
216  RDIState rdi_state);
217 
218  void RecordSafepointWithLazyDeopt(LInstruction* instr,
219  SafepointMode safepoint_mode,
220  int argc);
221  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
222  Safepoint::DeoptMode mode);
223  void DeoptimizeIf(Condition cc,
224  LEnvironment* environment,
225  Deoptimizer::BailoutType bailout_type);
226  void DeoptimizeIf(Condition cc, LEnvironment* environment);
227  void ApplyCheckIf(Condition cc, LBoundsCheck* check);
228 
229  bool DeoptEveryNTimes() {
230  return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
231  }
232 
233  void AddToTranslation(LEnvironment* environment,
234  Translation* translation,
235  LOperand* op,
236  bool is_tagged,
237  bool is_uint32,
238  int* object_index_pointer,
239  int* dematerialized_index_pointer);
240  void PopulateDeoptimizationData(Handle<Code> code);
241  int DefineDeoptimizationLiteral(Handle<Object> literal);
242 
243  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
244 
245  Register ToRegister(int index) const;
246  XMMRegister ToDoubleRegister(int index) const;
247  Operand BuildFastArrayOperand(
248  LOperand* elements_pointer,
249  LOperand* key,
250  ElementsKind elements_kind,
251  uint32_t offset,
252  uint32_t additional_index = 0);
253 
254  Operand BuildSeqStringOperand(Register string,
255  LOperand* index,
256  String::Encoding encoding);
257 
258  void EmitIntegerMathAbs(LMathAbs* instr);
259  void EmitSmiMathAbs(LMathAbs* instr);
260 
261  // Support for recording safepoint and position information.
262  void RecordSafepoint(LPointerMap* pointers,
263  Safepoint::Kind kind,
264  int arguments,
265  Safepoint::DeoptMode mode);
266  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
267  void RecordSafepoint(Safepoint::DeoptMode mode);
268  void RecordSafepointWithRegisters(LPointerMap* pointers,
269  int arguments,
270  Safepoint::DeoptMode mode);
271  void RecordAndWritePosition(int position) V8_OVERRIDE;
272 
273  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
274  void EmitGoto(int block);
275 
276  // EmitBranch expects to be the last instruction of a block.
277  template<class InstrType>
278  void EmitBranch(InstrType instr, Condition cc);
279  template<class InstrType>
280  void EmitFalseBranch(InstrType instr, Condition cc);
281  void EmitNumberUntagD(
282  Register input,
283  XMMRegister result,
284  bool allow_undefined_as_nan,
285  bool deoptimize_on_minus_zero,
286  LEnvironment* env,
288 
289  // Emits optimized code for typeof x == "y". Modifies input register.
290  // Returns the condition on which a final split to
291  // true and false label should be made, to optimize fallthrough.
292  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
293 
294  // Emits optimized code for %_IsObject(x). Preserves input register.
295  // Returns the condition on which a final split to
296  // true and false label should be made, to optimize fallthrough.
297  Condition EmitIsObject(Register input,
298  Label* is_not_object,
299  Label* is_object);
300 
301  // Emits optimized code for %_IsString(x). Preserves input register.
302  // Returns the condition on which a final split to
303  // true and false label should be made, to optimize fallthrough.
304  Condition EmitIsString(Register input,
305  Register temp1,
306  Label* is_not_string,
307  SmiCheck check_needed);
308 
309  // Emits optimized code for %_IsConstructCall().
310  // Caller should branch on equal condition.
311  void EmitIsConstructCall(Register temp);
312 
313  // Emits code for pushing either a tagged constant, a (non-double)
314  // register, or a stack slot operand.
315  void EmitPushTaggedOperand(LOperand* operand);
316 
317  // Emits optimized code to deep-copy the contents of statically known
318  // object graphs (e.g. object literal boilerplate).
319  void EmitDeepCopy(Handle<JSObject> object,
320  Register result,
321  Register source,
322  int* offset,
324 
325  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
326  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
327  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
328  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
329  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
330  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
331  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
332 #ifdef _MSC_VER
333  // On windows, you may not access the stack more than one page below
334  // the most recently mapped page. To make the allocated area randomly
335  // accessible, we write an arbitrary value to each page in range
336  // rsp + offset - page_size .. rsp in turn.
337  void MakeSureStackPagesMapped(int offset);
338 #endif
339 
340  ZoneList<LEnvironment*> deoptimizations_;
341  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
342  ZoneList<Handle<Object> > deoptimization_literals_;
343  int inlined_function_count_;
344  Scope* const scope_;
345  TranslationBuffer translations_;
346  ZoneList<LDeferredCode*> deferred_;
347  int osr_pc_offset_;
348  bool frame_is_built_;
349 
350  // Builder that keeps track of safepoints in the code. The table
351  // itself is emitted at the end of the generated code.
352  SafepointTableBuilder safepoints_;
353 
354  // Compiler from a set of parallel moves to a sequential list of moves.
355  LGapResolver resolver_;
356 
357  Safepoint::Kind expected_safepoint_kind_;
358 
359  class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
360  public:
361  explicit PushSafepointRegistersScope(LCodeGen* codegen)
362  : codegen_(codegen) {
363  ASSERT(codegen_->info()->is_calling());
364  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
365  codegen_->masm_->PushSafepointRegisters();
366  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
367  }
368 
369  ~PushSafepointRegistersScope() {
370  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
371  codegen_->masm_->PopSafepointRegisters();
372  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
373  }
374 
375  private:
376  LCodeGen* codegen_;
377  };
378 
379  friend class LDeferredCode;
380  friend class LEnvironment;
381  friend class SafepointGenerator;
383 };
384 
385 
386 class LDeferredCode: public ZoneObject {
387  public:
388  explicit LDeferredCode(LCodeGen* codegen)
389  : codegen_(codegen),
390  external_exit_(NULL),
391  instruction_index_(codegen->current_instruction_) {
392  codegen->AddDeferredCode(this);
393  }
394 
395  virtual ~LDeferredCode() {}
396  virtual void Generate() = 0;
397  virtual LInstruction* instr() = 0;
398 
399  void SetExit(Label* exit) { external_exit_ = exit; }
400  Label* entry() { return &entry_; }
401  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
402  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
403  int instruction_index() const { return instruction_index_; }
404 
405  protected:
406  LCodeGen* codegen() const { return codegen_; }
407  MacroAssembler* masm() const { return codegen_->masm(); }
408 
409  private:
410  LCodeGen* codegen_;
411  Label entry_;
412  Label exit_;
413  Label done_;
414  Label* external_exit_;
415  int instruction_index_;
416 };
417 
418 } } // namespace v8::internal
419 
420 #endif // V8_X64_LITHIUM_CODEGEN_X64_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void FinishCode(Handle< Code > code)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
Smi * ToSmi(LConstantOperand *op) const
int int32_t
Definition: unicode.cc:47
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
Definition: checks.h:329
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
MacroAssembler * masm() const
bool IsDehoistedKeyConstant(LConstantOperand *op) const
bool IsInteger32Constant(LConstantOperand *op) const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:43
void DoDeferredStackCheck(LStackCheck *instr)
int LookupDestination(int block_id) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
#define DECLARE_DO(type)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
int32_t ToInteger32(LConstantOperand *op) const
Handle< Object > ToHandle(LConstantOperand *op) const
#define BASE_EMBEDDED
Definition: allocation.h:68
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
bool IsSmiConstant(LConstantOperand *op) const
bool NeedsDeferredFrame() const
#define V8_OVERRIDE
Definition: v8config.h:402
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
Definition: objects-inl.h:5190
virtual void Generate()=0
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
const XMMRegister xmm0