v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-arm.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
29 #define V8_ARM_LITHIUM_CODEGEN_ARM_H_
30 
31 #include "arm/lithium-arm.h"
33 #include "deoptimizer.h"
34 #include "safepoint-table.h"
35 #include "scopes.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 // Forward declarations.
41 class LDeferredCode;
42 class SafepointGenerator;
43 
44 class LCodeGen BASE_EMBEDDED {
45  public:
46  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
47  : zone_(info->zone()),
48  chunk_(static_cast<LPlatformChunk*>(chunk)),
49  masm_(assembler),
50  info_(info),
51  current_block_(-1),
52  current_instruction_(-1),
53  instructions_(chunk->instructions()),
54  deoptimizations_(4, info->zone()),
55  deopt_jump_table_(4, info->zone()),
56  deoptimization_literals_(8, info->zone()),
57  inlined_function_count_(0),
58  scope_(info->scope()),
59  status_(UNUSED),
60  translations_(info->zone()),
61  deferred_(8, info->zone()),
62  osr_pc_offset_(-1),
63  last_lazy_deopt_pc_(0),
64  safepoints_(info->zone()),
65  resolver_(this),
66  expected_safepoint_kind_(Safepoint::kSimple) {
67  PopulateDeoptimizationLiteralsWithInlinedFunctions();
68  }
69 
70 
71  // Simple accessors.
72  MacroAssembler* masm() const { return masm_; }
73  CompilationInfo* info() const { return info_; }
74  Isolate* isolate() const { return info_->isolate(); }
75  Factory* factory() const { return isolate()->factory(); }
76  Heap* heap() const { return isolate()->heap(); }
77  Zone* zone() const { return zone_; }
78 
79  // Support for converting LOperands to assembler types.
80  // LOperand must be a register.
81  Register ToRegister(LOperand* op) const;
82 
83  // LOperand is loaded into scratch, unless already a register.
84  Register EmitLoadRegister(LOperand* op, Register scratch);
85 
86  // LOperand must be a double register.
87  DoubleRegister ToDoubleRegister(LOperand* op) const;
88 
89  // LOperand is loaded into dbl_scratch, unless already a double register.
90  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
91  SwVfpRegister flt_scratch,
92  DoubleRegister dbl_scratch);
93  int ToInteger32(LConstantOperand* op) const;
94  double ToDouble(LConstantOperand* op) const;
95  Operand ToOperand(LOperand* op);
96  MemOperand ToMemOperand(LOperand* op) const;
97  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
98  MemOperand ToHighMemOperand(LOperand* op) const;
99 
100  bool IsInteger32(LConstantOperand* op) const;
101  Handle<Object> ToHandle(LConstantOperand* op) const;
102 
103  // Try to generate code for the entire chunk, but it may fail if the
104  // chunk contains constructs we cannot handle. Returns true if the
105  // code generation attempt succeeded.
106  bool GenerateCode();
107 
108  // Finish the code by setting stack height, safepoint, and bailout
109  // information on it.
110  void FinishCode(Handle<Code> code);
111 
112  // Deferred code support.
113  void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
114  LOperand* left_argument,
115  LOperand* right_argument,
116  Token::Value op);
117  void DoDeferredNumberTagD(LNumberTagD* instr);
118 
119  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
120  void DoDeferredNumberTagI(LInstruction* instr,
121  LOperand* value,
122  IntegerSignedness signedness);
123 
124  void DoDeferredTaggedToI(LTaggedToI* instr);
125  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
126  void DoDeferredStackCheck(LStackCheck* instr);
127  void DoDeferredRandom(LRandom* instr);
128  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
129  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
130  void DoDeferredAllocateObject(LAllocateObject* instr);
131  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
132  Label* map_check);
133 
134  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
135  CompareMapMode mode, LEnvironment* env);
136 
137  // Parallel move support.
138  void DoParallelMove(LParallelMove* move);
139  void DoGap(LGap* instr);
140 
141  MemOperand PrepareKeyedOperand(Register key,
142  Register base,
143  bool key_is_constant,
144  int constant_key,
145  int element_size,
146  int shift_size,
147  int additional_index,
148  int additional_offset);
149 
150  // Emit frame translation commands for an environment.
151  void WriteTranslation(LEnvironment* environment,
152  Translation* translation,
153  int* arguments_index,
154  int* arguments_count);
155 
156  // Declare methods that deal with the individual node types.
157 #define DECLARE_DO(type) void Do##type(L##type* node);
159 #undef DECLARE_DO
160 
161  private:
162  enum Status {
163  UNUSED,
164  GENERATING,
165  DONE,
166  ABORTED
167  };
168 
169  bool is_unused() const { return status_ == UNUSED; }
170  bool is_generating() const { return status_ == GENERATING; }
171  bool is_done() const { return status_ == DONE; }
172  bool is_aborted() const { return status_ == ABORTED; }
173 
174  StrictModeFlag strict_mode_flag() const {
175  return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
176  }
177 
178  LPlatformChunk* chunk() const { return chunk_; }
179  Scope* scope() const { return scope_; }
180  HGraph* graph() const { return chunk_->graph(); }
181 
182  Register scratch0() { return r9; }
183  DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
184 
185  int GetNextEmittedBlock(int block);
186  LInstruction* GetNextInstruction();
187 
188  void EmitClassOfTest(Label* if_true,
189  Label* if_false,
190  Handle<String> class_name,
191  Register input,
192  Register temporary,
193  Register temporary2);
194 
195  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
196  int GetParameterCount() const { return scope()->num_parameters(); }
197 
198  void Abort(const char* reason);
199  void Comment(const char* format, ...);
200 
201  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
202 
203  // Code generation passes. Returns true if code generation should
204  // continue.
205  bool GeneratePrologue();
206  bool GenerateBody();
207  bool GenerateDeferredCode();
208  bool GenerateDeoptJumpTable();
209  bool GenerateSafepointTable();
210 
211  enum SafepointMode {
212  RECORD_SIMPLE_SAFEPOINT,
213  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
214  };
215 
216  void CallCode(
217  Handle<Code> code,
218  RelocInfo::Mode mode,
219  LInstruction* instr,
221 
222  void CallCodeGeneric(
223  Handle<Code> code,
224  RelocInfo::Mode mode,
225  LInstruction* instr,
226  SafepointMode safepoint_mode,
228 
229  void CallRuntime(const Runtime::Function* function,
230  int num_arguments,
231  LInstruction* instr);
232 
233  void CallRuntime(Runtime::FunctionId id,
234  int num_arguments,
235  LInstruction* instr) {
236  const Runtime::Function* function = Runtime::FunctionForId(id);
237  CallRuntime(function, num_arguments, instr);
238  }
239 
240  void CallRuntimeFromDeferred(Runtime::FunctionId id,
241  int argc,
242  LInstruction* instr);
243 
244  enum R1State {
245  R1_UNINITIALIZED,
246  R1_CONTAINS_TARGET
247  };
248 
249  // Generate a direct call to a known function. Expects the function
250  // to be in r1.
251  void CallKnownFunction(Handle<JSFunction> function,
252  int arity,
253  LInstruction* instr,
254  CallKind call_kind,
255  R1State r1_state);
256 
257  void LoadHeapObject(Register result, Handle<HeapObject> object);
258 
259  void RecordSafepointWithLazyDeopt(LInstruction* instr,
260  SafepointMode safepoint_mode);
261 
262  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
263  Safepoint::DeoptMode mode);
264  void DeoptimizeIf(Condition cc, LEnvironment* environment);
265 
266  void AddToTranslation(Translation* translation,
267  LOperand* op,
268  bool is_tagged,
269  bool is_uint32,
270  int arguments_index,
271  int arguments_count);
272  void PopulateDeoptimizationData(Handle<Code> code);
273  int DefineDeoptimizationLiteral(Handle<Object> literal);
274 
275  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
276 
277  Register ToRegister(int index) const;
278  DoubleRegister ToDoubleRegister(int index) const;
279 
280  // Specific math operations - used from DoUnaryMathOperation.
281  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
282  void DoMathAbs(LUnaryMathOperation* instr);
283  void DoMathFloor(LUnaryMathOperation* instr);
284  void DoMathRound(LUnaryMathOperation* instr);
285  void DoMathSqrt(LUnaryMathOperation* instr);
286  void DoMathPowHalf(LUnaryMathOperation* instr);
287  void DoMathLog(LUnaryMathOperation* instr);
288  void DoMathTan(LUnaryMathOperation* instr);
289  void DoMathCos(LUnaryMathOperation* instr);
290  void DoMathSin(LUnaryMathOperation* instr);
291 
292  // Support for recording safepoint and position information.
293  void RecordSafepoint(LPointerMap* pointers,
294  Safepoint::Kind kind,
295  int arguments,
296  Safepoint::DeoptMode mode);
297  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
298  void RecordSafepoint(Safepoint::DeoptMode mode);
299  void RecordSafepointWithRegisters(LPointerMap* pointers,
300  int arguments,
301  Safepoint::DeoptMode mode);
302  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
303  int arguments,
304  Safepoint::DeoptMode mode);
305  void RecordPosition(int position);
306 
307  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
308  void EmitGoto(int block);
309  void EmitBranch(int left_block, int right_block, Condition cc);
310  void EmitNumberUntagD(Register input,
311  DoubleRegister result,
312  bool deoptimize_on_undefined,
313  bool deoptimize_on_minus_zero,
314  LEnvironment* env);
315 
316  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
317  HValue* value,
318  LOperand* operand);
319 
320  // Emits optimized code for typeof x == "y". Modifies input register.
321  // Returns the condition on which a final split to
322  // true and false label should be made, to optimize fallthrough.
323  Condition EmitTypeofIs(Label* true_label,
324  Label* false_label,
325  Register input,
326  Handle<String> type_name);
327 
328  // Emits optimized code for %_IsObject(x). Preserves input register.
329  // Returns the condition on which a final split to
330  // true and false label should be made, to optimize fallthrough.
331  Condition EmitIsObject(Register input,
332  Register temp1,
333  Label* is_not_object,
334  Label* is_object);
335 
336  // Emits optimized code for %_IsString(x). Preserves input register.
337  // Returns the condition on which a final split to
338  // true and false label should be made, to optimize fallthrough.
339  Condition EmitIsString(Register input,
340  Register temp1,
341  Label* is_not_string);
342 
343  // Emits optimized code for %_IsConstructCall().
344  // Caller should branch on equal condition.
345  void EmitIsConstructCall(Register temp1, Register temp2);
346 
347  void EmitLoadFieldOrConstantFunction(Register result,
348  Register object,
349  Handle<Map> type,
350  Handle<String> name,
351  LEnvironment* env);
352 
353  // Emits optimized code to deep-copy the contents of statically known
354  // object graphs (e.g. object literal boilerplate).
355  void EmitDeepCopy(Handle<JSObject> object,
356  Register result,
357  Register source,
358  int* offset);
359 
360  // Emit optimized code for integer division.
361  // Inputs are signed.
362  // All registers are clobbered.
363  // If 'remainder' is no_reg, it is not computed.
364  void EmitSignedIntegerDivisionByConstant(Register result,
365  Register dividend,
366  int32_t divisor,
367  Register remainder,
368  Register scratch,
369  LEnvironment* environment);
370 
371  struct JumpTableEntry {
372  explicit inline JumpTableEntry(Address entry)
373  : label(),
374  address(entry) { }
375  Label label;
376  Address address;
377  };
378 
379  void EnsureSpaceForLazyDeopt();
380 
381  Zone* zone_;
382  LPlatformChunk* const chunk_;
383  MacroAssembler* const masm_;
384  CompilationInfo* const info_;
385 
386  int current_block_;
387  int current_instruction_;
388  const ZoneList<LInstruction*>* instructions_;
389  ZoneList<LEnvironment*> deoptimizations_;
390  ZoneList<JumpTableEntry> deopt_jump_table_;
391  ZoneList<Handle<Object> > deoptimization_literals_;
392  int inlined_function_count_;
393  Scope* const scope_;
394  Status status_;
395  TranslationBuffer translations_;
396  ZoneList<LDeferredCode*> deferred_;
397  int osr_pc_offset_;
398  int last_lazy_deopt_pc_;
399 
400  // Builder that keeps track of safepoints in the code. The table
401  // itself is emitted at the end of the generated code.
402  SafepointTableBuilder safepoints_;
403 
404  // Compiler from a set of parallel moves to a sequential list of moves.
405  LGapResolver resolver_;
406 
407  Safepoint::Kind expected_safepoint_kind_;
408 
409  class PushSafepointRegistersScope BASE_EMBEDDED {
410  public:
411  PushSafepointRegistersScope(LCodeGen* codegen,
412  Safepoint::Kind kind)
413  : codegen_(codegen) {
414  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
415  codegen_->expected_safepoint_kind_ = kind;
416 
417  switch (codegen_->expected_safepoint_kind_) {
418  case Safepoint::kWithRegisters:
419  codegen_->masm_->PushSafepointRegisters();
420  break;
421  case Safepoint::kWithRegistersAndDoubles:
422  codegen_->masm_->PushSafepointRegistersAndDoubles();
423  break;
424  default:
425  UNREACHABLE();
426  }
427  }
428 
429  ~PushSafepointRegistersScope() {
430  Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
431  ASSERT((kind & Safepoint::kWithRegisters) != 0);
432  switch (kind) {
433  case Safepoint::kWithRegisters:
434  codegen_->masm_->PopSafepointRegisters();
435  break;
436  case Safepoint::kWithRegistersAndDoubles:
437  codegen_->masm_->PopSafepointRegistersAndDoubles();
438  break;
439  default:
440  UNREACHABLE();
441  }
442  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
443  }
444 
445  private:
446  LCodeGen* codegen_;
447  };
448 
449  friend class LDeferredCode;
450  friend class LEnvironment;
451  friend class SafepointGenerator;
452  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
453 };
454 
455 
456 class LDeferredCode: public ZoneObject {
457  public:
458  explicit LDeferredCode(LCodeGen* codegen)
459  : codegen_(codegen),
460  external_exit_(NULL),
461  instruction_index_(codegen->current_instruction_) {
462  codegen->AddDeferredCode(this);
463  }
464 
465  virtual ~LDeferredCode() { }
466  virtual void Generate() = 0;
467  virtual LInstruction* instr() = 0;
468 
469  void SetExit(Label* exit) { external_exit_ = exit; }
470  Label* entry() { return &entry_; }
471  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
472  int instruction_index() const { return instruction_index_; }
473 
474  protected:
475  LCodeGen* codegen() const { return codegen_; }
476  MacroAssembler* masm() const { return codegen_->masm(); }
477 
478  private:
479  LCodeGen* codegen_;
480  Label entry_;
481  Label exit_;
482  Label* external_exit_;
483  int instruction_index_;
484 };
485 
486 } } // namespace v8::internal
487 
488 #endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
byte * Address
Definition: globals.h:157
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
MacroAssembler * masm() const
int int32_t
Definition: unicode.cc:47
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:13274
#define ASSERT(condition)
Definition: checks.h:270
MacroAssembler * masm() const
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:49
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
const Register r9
#define kScratchDoubleReg
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
#define BASE_EMBEDDED
Definition: allocation.h:68
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
virtual LInstruction * instr()=0
virtual void Generate()=0
CompilationInfo * info() const
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Register ToRegister(int num)
#define DECLARE_DO(type)