v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-ia32.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
29 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_
30 
31 #include "ia32/lithium-ia32.h"
32 
33 #include "checks.h"
34 #include "deoptimizer.h"
35 #include "safepoint-table.h"
36 #include "scopes.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 // Forward declarations.
43 class LDeferredCode;
44 class LGapNode;
45 class SafepointGenerator;
46 
47 class LCodeGen BASE_EMBEDDED {
48  public:
49  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
50  : zone_(info->zone()),
51  chunk_(static_cast<LPlatformChunk*>(chunk)),
52  masm_(assembler),
53  info_(info),
54  current_block_(-1),
55  current_instruction_(-1),
56  instructions_(chunk->instructions()),
57  deoptimizations_(4, info->zone()),
58  deoptimization_literals_(8, info->zone()),
59  inlined_function_count_(0),
60  scope_(info->scope()),
61  status_(UNUSED),
62  translations_(info->zone()),
63  deferred_(8, info->zone()),
64  dynamic_frame_alignment_(false),
65  osr_pc_offset_(-1),
66  last_lazy_deopt_pc_(0),
67  safepoints_(info->zone()),
68  resolver_(this),
69  expected_safepoint_kind_(Safepoint::kSimple) {
70  PopulateDeoptimizationLiteralsWithInlinedFunctions();
71  }
72 
73  // Simple accessors.
74  MacroAssembler* masm() const { return masm_; }
75  CompilationInfo* info() const { return info_; }
76  Isolate* isolate() const { return info_->isolate(); }
77  Factory* factory() const { return isolate()->factory(); }
78  Heap* heap() const { return isolate()->heap(); }
79  Zone* zone() const { return zone_; }
80 
81  // Support for converting LOperands to assembler types.
82  Operand ToOperand(LOperand* op) const;
83  Register ToRegister(LOperand* op) const;
84  XMMRegister ToDoubleRegister(LOperand* op) const;
85 
86  bool IsInteger32(LConstantOperand* op) const;
87  Immediate ToInteger32Immediate(LOperand* op) const {
88  return Immediate(ToInteger32(LConstantOperand::cast(op)));
89  }
90 
91  Handle<Object> ToHandle(LConstantOperand* op) const;
92 
93  // The operand denoting the second word (the one with a higher address) of
94  // a double stack slot.
95  Operand HighOperand(LOperand* op);
96 
97  // Try to generate code for the entire chunk, but it may fail if the
98  // chunk contains constructs we cannot handle. Returns true if the
99  // code generation attempt succeeded.
100  bool GenerateCode();
101 
102  // Finish the code by setting stack height, safepoint, and bailout
103  // information on it.
104  void FinishCode(Handle<Code> code);
105 
106  // Deferred code support.
107  void DoDeferredNumberTagD(LNumberTagD* instr);
108 
109  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
110  void DoDeferredNumberTagI(LInstruction* instr,
111  LOperand* value,
112  IntegerSignedness signedness);
113 
114  void DoDeferredTaggedToI(LTaggedToI* instr);
115  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
116  void DoDeferredStackCheck(LStackCheck* instr);
117  void DoDeferredRandom(LRandom* instr);
118  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
119  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
120  void DoDeferredAllocateObject(LAllocateObject* instr);
121  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
122  Label* map_check);
123 
124  void DoCheckMapCommon(Register reg, Handle<Map> map,
125  CompareMapMode mode, LEnvironment* env);
126 
127  // Parallel move support.
128  void DoParallelMove(LParallelMove* move);
129  void DoGap(LGap* instr);
130 
131  // Emit frame translation commands for an environment.
132  void WriteTranslation(LEnvironment* environment,
133  Translation* translation,
134  int* arguments_index,
135  int* arguments_count);
136 
137  void EnsureRelocSpaceForDeoptimization();
138 
139  // Declare methods that deal with the individual node types.
140 #define DECLARE_DO(type) void Do##type(L##type* node);
142 #undef DECLARE_DO
143 
144  private:
145  enum Status {
146  UNUSED,
147  GENERATING,
148  DONE,
149  ABORTED
150  };
151 
152  bool is_unused() const { return status_ == UNUSED; }
153  bool is_generating() const { return status_ == GENERATING; }
154  bool is_done() const { return status_ == DONE; }
155  bool is_aborted() const { return status_ == ABORTED; }
156 
157  StrictModeFlag strict_mode_flag() const {
158  return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
159  }
160 
161  LPlatformChunk* chunk() const { return chunk_; }
162  Scope* scope() const { return scope_; }
163  HGraph* graph() const { return chunk_->graph(); }
164 
165  int GetNextEmittedBlock(int block);
166 
167  void EmitClassOfTest(Label* if_true,
168  Label* if_false,
169  Handle<String> class_name,
170  Register input,
171  Register temporary,
172  Register temporary2);
173 
174  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
175  int GetParameterCount() const { return scope()->num_parameters(); }
176 
177  void Abort(const char* reason);
178  void Comment(const char* format, ...);
179 
180  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
181 
182  // Code generation passes. Returns true if code generation should
183  // continue.
184  bool GeneratePrologue();
185  bool GenerateBody();
186  bool GenerateDeferredCode();
187  // Pad the reloc info to ensure that we have enough space to patch during
188  // deoptimization.
189  bool GenerateRelocPadding();
190  bool GenerateSafepointTable();
191 
192  enum SafepointMode {
193  RECORD_SIMPLE_SAFEPOINT,
194  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
195  };
196 
197  void CallCode(Handle<Code> code,
198  RelocInfo::Mode mode,
199  LInstruction* instr);
200 
201  void CallCodeGeneric(Handle<Code> code,
202  RelocInfo::Mode mode,
203  LInstruction* instr,
204  SafepointMode safepoint_mode);
205 
206  void CallRuntime(const Runtime::Function* fun,
207  int argc,
208  LInstruction* instr);
209 
210  void CallRuntime(Runtime::FunctionId id,
211  int argc,
212  LInstruction* instr) {
213  const Runtime::Function* function = Runtime::FunctionForId(id);
214  CallRuntime(function, argc, instr);
215  }
216 
217  void CallRuntimeFromDeferred(Runtime::FunctionId id,
218  int argc,
219  LInstruction* instr,
220  LOperand* context);
221 
222  enum EDIState {
223  EDI_UNINITIALIZED,
224  EDI_CONTAINS_TARGET
225  };
226 
227  // Generate a direct call to a known function. Expects the function
228  // to be in edi.
229  void CallKnownFunction(Handle<JSFunction> function,
230  int arity,
231  LInstruction* instr,
232  CallKind call_kind,
233  EDIState edi_state);
234 
235  void RecordSafepointWithLazyDeopt(LInstruction* instr,
236  SafepointMode safepoint_mode);
237 
238  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
239  Safepoint::DeoptMode mode);
240  void DeoptimizeIf(Condition cc, LEnvironment* environment);
241 
242  void AddToTranslation(Translation* translation,
243  LOperand* op,
244  bool is_tagged,
245  bool is_uint32,
246  int arguments_index,
247  int arguments_count);
248  void PopulateDeoptimizationData(Handle<Code> code);
249  int DefineDeoptimizationLiteral(Handle<Object> literal);
250 
251  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
252 
253  Register ToRegister(int index) const;
254  XMMRegister ToDoubleRegister(int index) const;
255  int ToInteger32(LConstantOperand* op) const;
256 
257  double ToDouble(LConstantOperand* op) const;
258  Operand BuildFastArrayOperand(LOperand* elements_pointer,
259  LOperand* key,
260  Representation key_representation,
261  ElementsKind elements_kind,
262  uint32_t offset,
263  uint32_t additional_index = 0);
264 
265  // Specific math operations - used from DoUnaryMathOperation.
266  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
267  void DoMathAbs(LUnaryMathOperation* instr);
268  void DoMathFloor(LUnaryMathOperation* instr);
269  void DoMathRound(LUnaryMathOperation* instr);
270  void DoMathSqrt(LUnaryMathOperation* instr);
271  void DoMathLog(LUnaryMathOperation* instr);
272  void DoMathTan(LUnaryMathOperation* instr);
273  void DoMathCos(LUnaryMathOperation* instr);
274  void DoMathSin(LUnaryMathOperation* instr);
275 
276  // Support for recording safepoint and position information.
277  void RecordSafepoint(LPointerMap* pointers,
278  Safepoint::Kind kind,
279  int arguments,
280  Safepoint::DeoptMode mode);
281  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
282  void RecordSafepoint(Safepoint::DeoptMode mode);
283  void RecordSafepointWithRegisters(LPointerMap* pointers,
284  int arguments,
285  Safepoint::DeoptMode mode);
286  void RecordPosition(int position);
287 
288  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
289  void EmitGoto(int block);
290  void EmitBranch(int left_block, int right_block, Condition cc);
291  void EmitNumberUntagD(Register input,
292  Register temp,
293  XMMRegister result,
294  bool deoptimize_on_undefined,
295  bool deoptimize_on_minus_zero,
296  LEnvironment* env);
297 
298  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
299  HValue* value,
300  LOperand* operand);
301 
302  // Emits optimized code for typeof x == "y". Modifies input register.
303  // Returns the condition on which a final split to
304  // true and false label should be made, to optimize fallthrough.
305  Condition EmitTypeofIs(Label* true_label,
306  Label* false_label,
307  Register input,
308  Handle<String> type_name);
309 
310  // Emits optimized code for %_IsObject(x). Preserves input register.
311  // Returns the condition on which a final split to
312  // true and false label should be made, to optimize fallthrough.
313  Condition EmitIsObject(Register input,
314  Register temp1,
315  Label* is_not_object,
316  Label* is_object);
317 
318  // Emits optimized code for %_IsString(x). Preserves input register.
319  // Returns the condition on which a final split to
320  // true and false label should be made, to optimize fallthrough.
321  Condition EmitIsString(Register input,
322  Register temp1,
323  Label* is_not_string);
324 
325  // Emits optimized code for %_IsConstructCall().
326  // Caller should branch on equal condition.
327  void EmitIsConstructCall(Register temp);
328 
329  void EmitLoadFieldOrConstantFunction(Register result,
330  Register object,
331  Handle<Map> type,
332  Handle<String> name,
333  LEnvironment* env);
334 
335  // Emits optimized code to deep-copy the contents of statically known
336  // object graphs (e.g. object literal boilerplate).
337  void EmitDeepCopy(Handle<JSObject> object,
338  Register result,
339  Register source,
340  int* offset);
341 
342  void EnsureSpaceForLazyDeopt();
343 
344  // Emits code for pushing either a tagged constant, a (non-double)
345  // register, or a stack slot operand.
346  void EmitPushTaggedOperand(LOperand* operand);
347 
348  Zone* zone_;
349  LPlatformChunk* const chunk_;
350  MacroAssembler* const masm_;
351  CompilationInfo* const info_;
352 
353  int current_block_;
354  int current_instruction_;
355  const ZoneList<LInstruction*>* instructions_;
356  ZoneList<LEnvironment*> deoptimizations_;
357  ZoneList<Handle<Object> > deoptimization_literals_;
358  int inlined_function_count_;
359  Scope* const scope_;
360  Status status_;
361  TranslationBuffer translations_;
362  ZoneList<LDeferredCode*> deferred_;
363  bool dynamic_frame_alignment_;
364  int osr_pc_offset_;
365  int last_lazy_deopt_pc_;
366 
367  // Builder that keeps track of safepoints in the code. The table
368  // itself is emitted at the end of the generated code.
369  SafepointTableBuilder safepoints_;
370 
371  // Compiler from a set of parallel moves to a sequential list of moves.
372  LGapResolver resolver_;
373 
374  Safepoint::Kind expected_safepoint_kind_;
375 
376  class PushSafepointRegistersScope BASE_EMBEDDED {
377  public:
378  explicit PushSafepointRegistersScope(LCodeGen* codegen)
379  : codegen_(codegen) {
380  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
381  codegen_->masm_->PushSafepointRegisters();
382  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
383  }
384 
385  ~PushSafepointRegistersScope() {
386  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
387  codegen_->masm_->PopSafepointRegisters();
388  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
389  }
390 
391  private:
392  LCodeGen* codegen_;
393  };
394 
395  friend class LDeferredCode;
396  friend class LEnvironment;
397  friend class SafepointGenerator;
398  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
399 };
400 
401 
402 class LDeferredCode: public ZoneObject {
403  public:
404  explicit LDeferredCode(LCodeGen* codegen)
405  : codegen_(codegen),
406  external_exit_(NULL),
407  instruction_index_(codegen->current_instruction_) {
408  codegen->AddDeferredCode(this);
409  }
410 
411  virtual ~LDeferredCode() { }
412  virtual void Generate() = 0;
413  virtual LInstruction* instr() = 0;
414 
415  void SetExit(Label* exit) { external_exit_ = exit; }
416  Label* entry() { return &entry_; }
417  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
418  int instruction_index() const { return instruction_index_; }
419 
420  protected:
421  LCodeGen* codegen() const { return codegen_; }
422  MacroAssembler* masm() const { return codegen_->masm(); }
423 
424  private:
425  LCodeGen* codegen_;
426  Label entry_;
427  Label exit_;
428  Label* external_exit_;
429  int instruction_index_;
430 };
431 
432 } } // namespace v8::internal
433 
434 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
MacroAssembler * masm() const
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:13274
#define ASSERT(condition)
Definition: checks.h:270
MacroAssembler * masm() const
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:49
Immediate ToInteger32Immediate(LOperand *op) const
#define DECLARE_DO(type)
static LConstantOperand * cast(LOperand *op)
Definition: lithium.h:271
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
#define BASE_EMBEDDED
Definition: allocation.h:68
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
virtual LInstruction * instr()=0
virtual void Generate()=0
CompilationInfo * info() const
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Register ToRegister(int num)