v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-x64.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
29 #define V8_X64_LITHIUM_CODEGEN_X64_H_
30 
31 #include "x64/lithium-x64.h"
32 
33 #include "checks.h"
34 #include "deoptimizer.h"
35 #include "safepoint-table.h"
36 #include "scopes.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 // Forward declarations.
43 class LDeferredCode;
44 class SafepointGenerator;
45 
46 class LCodeGen BASE_EMBEDDED {
47  public:
48  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
49  : zone_(info->zone()),
50  chunk_(static_cast<LPlatformChunk*>(chunk)),
51  masm_(assembler),
52  info_(info),
53  current_block_(-1),
54  current_instruction_(-1),
55  instructions_(chunk->instructions()),
56  deoptimizations_(4, info->zone()),
57  jump_table_(4, info->zone()),
58  deoptimization_literals_(8, info->zone()),
59  inlined_function_count_(0),
60  scope_(info->scope()),
61  status_(UNUSED),
62  translations_(info->zone()),
63  deferred_(8, info->zone()),
64  osr_pc_offset_(-1),
65  last_lazy_deopt_pc_(0),
66  safepoints_(info->zone()),
67  resolver_(this),
68  expected_safepoint_kind_(Safepoint::kSimple) {
69  PopulateDeoptimizationLiteralsWithInlinedFunctions();
70  }
71 
72  // Simple accessors.
73  MacroAssembler* masm() const { return masm_; }
74  CompilationInfo* info() const { return info_; }
75  Isolate* isolate() const { return info_->isolate(); }
76  Factory* factory() const { return isolate()->factory(); }
77  Heap* heap() const { return isolate()->heap(); }
78  Zone* zone() const { return zone_; }
79 
80  // Support for converting LOperands to assembler types.
81  Register ToRegister(LOperand* op) const;
82  XMMRegister ToDoubleRegister(LOperand* op) const;
83  bool IsInteger32Constant(LConstantOperand* op) const;
84  int ToInteger32(LConstantOperand* op) const;
85  double ToDouble(LConstantOperand* op) const;
86  bool IsTaggedConstant(LConstantOperand* op) const;
87  Handle<Object> ToHandle(LConstantOperand* op) const;
88  Operand ToOperand(LOperand* op) const;
89 
90  // Try to generate code for the entire chunk, but it may fail if the
91  // chunk contains constructs we cannot handle. Returns true if the
92  // code generation attempt succeeded.
93  bool GenerateCode();
94 
95  // Finish the code by setting stack height, safepoint, and bailout
96  // information on it.
97  void FinishCode(Handle<Code> code);
98 
99  // Deferred code support.
100  void DoDeferredNumberTagD(LNumberTagD* instr);
101  void DoDeferredNumberTagU(LNumberTagU* instr);
102  void DoDeferredTaggedToI(LTaggedToI* instr);
103  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
104  void DoDeferredStackCheck(LStackCheck* instr);
105  void DoDeferredRandom(LRandom* instr);
106  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
107  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
108  void DoDeferredAllocateObject(LAllocateObject* instr);
109  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
110  Label* map_check);
111 
112  void DoCheckMapCommon(Register reg, Handle<Map> map,
113  CompareMapMode mode, LEnvironment* env);
114 
115 // Parallel move support.
116  void DoParallelMove(LParallelMove* move);
117  void DoGap(LGap* instr);
118 
119  // Emit frame translation commands for an environment.
120  void WriteTranslation(LEnvironment* environment,
121  Translation* translation,
122  int* arguments_index,
123  int* arguments_count);
124 
125  // Declare methods that deal with the individual node types.
126 #define DECLARE_DO(type) void Do##type(L##type* node);
128 #undef DECLARE_DO
129 
130  private:
131  enum Status {
132  UNUSED,
133  GENERATING,
134  DONE,
135  ABORTED
136  };
137 
138  bool is_unused() const { return status_ == UNUSED; }
139  bool is_generating() const { return status_ == GENERATING; }
140  bool is_done() const { return status_ == DONE; }
141  bool is_aborted() const { return status_ == ABORTED; }
142 
143  StrictModeFlag strict_mode_flag() const {
144  return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
145  }
146 
147  LPlatformChunk* chunk() const { return chunk_; }
148  Scope* scope() const { return scope_; }
149  HGraph* graph() const { return chunk_->graph(); }
150 
151  int GetNextEmittedBlock(int block);
152 
153  void EmitClassOfTest(Label* if_true,
154  Label* if_false,
155  Handle<String> class_name,
156  Register input,
157  Register temporary,
158  Register scratch);
159 
160  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
161  int GetParameterCount() const { return scope()->num_parameters(); }
162 
163  void Abort(const char* reason);
164  void Comment(const char* format, ...);
165 
166  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
167 
168  // Code generation passes. Returns true if code generation should
169  // continue.
170  bool GeneratePrologue();
171  bool GenerateBody();
172  bool GenerateDeferredCode();
173  bool GenerateJumpTable();
174  bool GenerateSafepointTable();
175 
176  enum SafepointMode {
177  RECORD_SIMPLE_SAFEPOINT,
178  RECORD_SAFEPOINT_WITH_REGISTERS
179  };
180 
181  void CallCodeGeneric(Handle<Code> code,
182  RelocInfo::Mode mode,
183  LInstruction* instr,
184  SafepointMode safepoint_mode,
185  int argc);
186 
187 
188  void CallCode(Handle<Code> code,
189  RelocInfo::Mode mode,
190  LInstruction* instr);
191 
192  void CallRuntime(const Runtime::Function* function,
193  int num_arguments,
194  LInstruction* instr);
195 
196  void CallRuntime(Runtime::FunctionId id,
197  int num_arguments,
198  LInstruction* instr) {
199  const Runtime::Function* function = Runtime::FunctionForId(id);
200  CallRuntime(function, num_arguments, instr);
201  }
202 
203  void CallRuntimeFromDeferred(Runtime::FunctionId id,
204  int argc,
205  LInstruction* instr);
206 
207  enum RDIState {
208  RDI_UNINITIALIZED,
209  RDI_CONTAINS_TARGET
210  };
211 
212  // Generate a direct call to a known function. Expects the function
213  // to be in rdi.
214  void CallKnownFunction(Handle<JSFunction> function,
215  int arity,
216  LInstruction* instr,
217  CallKind call_kind,
218  RDIState rdi_state);
219 
220 
221  void RecordSafepointWithLazyDeopt(LInstruction* instr,
222  SafepointMode safepoint_mode,
223  int argc);
224  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
225  Safepoint::DeoptMode mode);
226  void DeoptimizeIf(Condition cc, LEnvironment* environment);
227 
228  void AddToTranslation(Translation* translation,
229  LOperand* op,
230  bool is_tagged,
231  bool is_uint32,
232  int arguments_index,
233  int arguments_count);
234  void PopulateDeoptimizationData(Handle<Code> code);
235  int DefineDeoptimizationLiteral(Handle<Object> literal);
236 
237  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
238 
239  Register ToRegister(int index) const;
240  XMMRegister ToDoubleRegister(int index) const;
241  Operand BuildFastArrayOperand(
242  LOperand* elements_pointer,
243  LOperand* key,
244  ElementsKind elements_kind,
245  uint32_t offset,
246  uint32_t additional_index = 0);
247 
248  // Specific math operations - used from DoUnaryMathOperation.
249  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
250  void DoMathAbs(LUnaryMathOperation* instr);
251  void DoMathFloor(LUnaryMathOperation* instr);
252  void DoMathRound(LUnaryMathOperation* instr);
253  void DoMathSqrt(LUnaryMathOperation* instr);
254  void DoMathPowHalf(LUnaryMathOperation* instr);
255  void DoMathLog(LUnaryMathOperation* instr);
256  void DoMathTan(LUnaryMathOperation* instr);
257  void DoMathCos(LUnaryMathOperation* instr);
258  void DoMathSin(LUnaryMathOperation* instr);
259 
260  // Support for recording safepoint and position information.
261  void RecordSafepoint(LPointerMap* pointers,
262  Safepoint::Kind kind,
263  int arguments,
264  Safepoint::DeoptMode mode);
265  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
266  void RecordSafepoint(Safepoint::DeoptMode mode);
267  void RecordSafepointWithRegisters(LPointerMap* pointers,
268  int arguments,
269  Safepoint::DeoptMode mode);
270  void RecordPosition(int position);
271 
272  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
273  void EmitGoto(int block);
274  void EmitBranch(int left_block, int right_block, Condition cc);
275  void EmitNumberUntagD(Register input,
276  XMMRegister result,
277  bool deoptimize_on_undefined,
278  bool deoptimize_on_minus_zero,
279  LEnvironment* env);
280 
281 
282  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
283  HValue* value,
284  LOperand* operand);
285 
286  // Emits optimized code for typeof x == "y". Modifies input register.
287  // Returns the condition on which a final split to
288  // true and false label should be made, to optimize fallthrough.
289  Condition EmitTypeofIs(Label* true_label,
290  Label* false_label,
291  Register input,
292  Handle<String> type_name);
293 
294  // Emits optimized code for %_IsObject(x). Preserves input register.
295  // Returns the condition on which a final split to
296  // true and false label should be made, to optimize fallthrough.
297  Condition EmitIsObject(Register input,
298  Label* is_not_object,
299  Label* is_object);
300 
301  // Emits optimized code for %_IsString(x). Preserves input register.
302  // Returns the condition on which a final split to
303  // true and false label should be made, to optimize fallthrough.
304  Condition EmitIsString(Register input,
305  Register temp1,
306  Label* is_not_string);
307 
308  // Emits optimized code for %_IsConstructCall().
309  // Caller should branch on equal condition.
310  void EmitIsConstructCall(Register temp);
311 
312  void EmitLoadFieldOrConstantFunction(Register result,
313  Register object,
314  Handle<Map> type,
315  Handle<String> name,
316  LEnvironment* env);
317 
318  // Emits code for pushing either a tagged constant, a (non-double)
319  // register, or a stack slot operand.
320  void EmitPushTaggedOperand(LOperand* operand);
321 
322  // Emits optimized code to deep-copy the contents of statically known
323  // object graphs (e.g. object literal boilerplate).
324  void EmitDeepCopy(Handle<JSObject> object,
325  Register result,
326  Register source,
327  int* offset);
328 
329  struct JumpTableEntry {
330  explicit inline JumpTableEntry(Address entry)
331  : label(),
332  address(entry) { }
333  Label label;
334  Address address;
335  };
336 
337  void EnsureSpaceForLazyDeopt(int space_needed);
338 
339  Zone* zone_;
340  LPlatformChunk* const chunk_;
341  MacroAssembler* const masm_;
342  CompilationInfo* const info_;
343 
344  int current_block_;
345  int current_instruction_;
346  const ZoneList<LInstruction*>* instructions_;
347  ZoneList<LEnvironment*> deoptimizations_;
348  ZoneList<JumpTableEntry> jump_table_;
349  ZoneList<Handle<Object> > deoptimization_literals_;
350  int inlined_function_count_;
351  Scope* const scope_;
352  Status status_;
353  TranslationBuffer translations_;
354  ZoneList<LDeferredCode*> deferred_;
355  int osr_pc_offset_;
356  int last_lazy_deopt_pc_;
357 
358  // Builder that keeps track of safepoints in the code. The table
359  // itself is emitted at the end of the generated code.
360  SafepointTableBuilder safepoints_;
361 
362  // Compiler from a set of parallel moves to a sequential list of moves.
363  LGapResolver resolver_;
364 
365  Safepoint::Kind expected_safepoint_kind_;
366 
367  class PushSafepointRegistersScope BASE_EMBEDDED {
368  public:
369  explicit PushSafepointRegistersScope(LCodeGen* codegen)
370  : codegen_(codegen) {
371  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
372  codegen_->masm_->PushSafepointRegisters();
373  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
374  }
375 
376  ~PushSafepointRegistersScope() {
377  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
378  codegen_->masm_->PopSafepointRegisters();
379  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
380  }
381 
382  private:
383  LCodeGen* codegen_;
384  };
385 
386  friend class LDeferredCode;
387  friend class LEnvironment;
388  friend class SafepointGenerator;
389  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
390 };
391 
392 
393 class LDeferredCode: public ZoneObject {
394  public:
395  explicit LDeferredCode(LCodeGen* codegen)
396  : codegen_(codegen),
397  external_exit_(NULL),
398  instruction_index_(codegen->current_instruction_) {
399  codegen->AddDeferredCode(this);
400  }
401 
402  virtual ~LDeferredCode() { }
403  virtual void Generate() = 0;
404  virtual LInstruction* instr() = 0;
405 
406  void SetExit(Label* exit) { external_exit_ = exit; }
407  Label* entry() { return &entry_; }
408  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
409  int instruction_index() const { return instruction_index_; }
410 
411  protected:
412  LCodeGen* codegen() const { return codegen_; }
413  MacroAssembler* masm() const { return codegen_->masm(); }
414 
415  private:
416  LCodeGen* codegen_;
417  Label entry_;
418  Label exit_;
419  Label* external_exit_;
420  int instruction_index_;
421 };
422 
423 } } // namespace v8::internal
424 
425 #endif // V8_X64_LITHIUM_CODEGEN_X64_H_
byte * Address
Definition: globals.h:157
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
MacroAssembler * masm() const
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:13274
#define ASSERT(condition)
Definition: checks.h:270
MacroAssembler * masm() const
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:49
#define DECLARE_DO(type)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
#define BASE_EMBEDDED
Definition: allocation.h:68
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
virtual LInstruction * instr()=0
virtual void Generate()=0
CompilationInfo * info() const
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Register ToRegister(int num)