v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium-codegen-mips.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
29 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
30 
31 #include "mips/lithium-mips.h"
33 #include "deoptimizer.h"
34 #include "safepoint-table.h"
35 #include "scopes.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 // Forward declarations.
41 class LDeferredCode;
42 class SafepointGenerator;
43 
44 class LCodeGen BASE_EMBEDDED {
45  public:
46  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
47  : zone_(info->zone()),
48  chunk_(static_cast<LPlatformChunk*>(chunk)),
49  masm_(assembler),
50  info_(info),
51  current_block_(-1),
52  current_instruction_(-1),
53  instructions_(chunk->instructions()),
54  deoptimizations_(4, info->zone()),
55  deopt_jump_table_(4, info->zone()),
56  deoptimization_literals_(8, info->zone()),
57  inlined_function_count_(0),
58  scope_(info->scope()),
59  status_(UNUSED),
60  translations_(info->zone()),
61  deferred_(8, info->zone()),
62  osr_pc_offset_(-1),
63  last_lazy_deopt_pc_(0),
64  safepoints_(info->zone()),
65  resolver_(this),
66  expected_safepoint_kind_(Safepoint::kSimple) {
67  PopulateDeoptimizationLiteralsWithInlinedFunctions();
68  }
69 
70 
71  // Simple accessors.
72  MacroAssembler* masm() const { return masm_; }
73  CompilationInfo* info() const { return info_; }
74  Isolate* isolate() const { return info_->isolate(); }
75  Factory* factory() const { return isolate()->factory(); }
76  Heap* heap() const { return isolate()->heap(); }
77  Zone* zone() const { return zone_; }
78 
79  // Support for converting LOperands to assembler types.
80  // LOperand must be a register.
81  Register ToRegister(LOperand* op) const;
82 
83  // LOperand is loaded into scratch, unless already a register.
84  Register EmitLoadRegister(LOperand* op, Register scratch);
85 
86  // LOperand must be a double register.
87  DoubleRegister ToDoubleRegister(LOperand* op) const;
88 
89  // LOperand is loaded into dbl_scratch, unless already a double register.
90  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
91  FloatRegister flt_scratch,
92  DoubleRegister dbl_scratch);
93  int ToInteger32(LConstantOperand* op) const;
94  double ToDouble(LConstantOperand* op) const;
95  Operand ToOperand(LOperand* op);
96  MemOperand ToMemOperand(LOperand* op) const;
97  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
98  MemOperand ToHighMemOperand(LOperand* op) const;
99 
100  bool IsInteger32(LConstantOperand* op) const;
101  Handle<Object> ToHandle(LConstantOperand* op) const;
102 
103  // Try to generate code for the entire chunk, but it may fail if the
104  // chunk contains constructs we cannot handle. Returns true if the
105  // code generation attempt succeeded.
106  bool GenerateCode();
107 
108  // Finish the code by setting stack height, safepoint, and bailout
109  // information on it.
110  void FinishCode(Handle<Code> code);
111 
112  void DoDeferredNumberTagD(LNumberTagD* instr);
113 
114  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
115  void DoDeferredNumberTagI(LInstruction* instr,
116  LOperand* value,
117  IntegerSignedness signedness);
118 
119  void DoDeferredTaggedToI(LTaggedToI* instr);
120  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
121  void DoDeferredStackCheck(LStackCheck* instr);
122  void DoDeferredRandom(LRandom* instr);
123  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
124  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
125  void DoDeferredAllocateObject(LAllocateObject* instr);
126  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
127  Label* map_check);
128 
129  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
130  CompareMapMode mode, LEnvironment* env);
131 
132  // Parallel move support.
133  void DoParallelMove(LParallelMove* move);
134  void DoGap(LGap* instr);
135 
136  MemOperand PrepareKeyedOperand(Register key,
137  Register base,
138  bool key_is_constant,
139  int constant_key,
140  int element_size,
141  int shift_size,
142  int additional_index,
143  int additional_offset);
144 
145  // Emit frame translation commands for an environment.
146  void WriteTranslation(LEnvironment* environment,
147  Translation* translation,
148  int* arguments_index,
149  int* arguments_count);
150 
151  // Declare methods that deal with the individual node types.
152 #define DECLARE_DO(type) void Do##type(L##type* node);
154 #undef DECLARE_DO
155 
156  private:
157  enum Status {
158  UNUSED,
159  GENERATING,
160  DONE,
161  ABORTED
162  };
163 
164  bool is_unused() const { return status_ == UNUSED; }
165  bool is_generating() const { return status_ == GENERATING; }
166  bool is_done() const { return status_ == DONE; }
167  bool is_aborted() const { return status_ == ABORTED; }
168 
169  StrictModeFlag strict_mode_flag() const {
170  return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
171  }
172 
173  LPlatformChunk* chunk() const { return chunk_; }
174  Scope* scope() const { return scope_; }
175  HGraph* graph() const { return chunk_->graph(); }
176 
177  Register scratch0() { return kLithiumScratchReg; }
178  Register scratch1() { return kLithiumScratchReg2; }
179  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
180 
181  int GetNextEmittedBlock(int block);
182  LInstruction* GetNextInstruction();
183 
184  void EmitClassOfTest(Label* if_true,
185  Label* if_false,
186  Handle<String> class_name,
187  Register input,
188  Register temporary,
189  Register temporary2);
190 
191  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
192  int GetParameterCount() const { return scope()->num_parameters(); }
193 
194  void Abort(const char* reason);
195  void Comment(const char* format, ...);
196 
197  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
198 
199  // Code generation passes. Returns true if code generation should
200  // continue.
201  bool GeneratePrologue();
202  bool GenerateBody();
203  bool GenerateDeferredCode();
204  bool GenerateDeoptJumpTable();
205  bool GenerateSafepointTable();
206 
207  enum SafepointMode {
208  RECORD_SIMPLE_SAFEPOINT,
209  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
210  };
211 
212  void CallCode(Handle<Code> code,
213  RelocInfo::Mode mode,
214  LInstruction* instr);
215 
216  void CallCodeGeneric(Handle<Code> code,
217  RelocInfo::Mode mode,
218  LInstruction* instr,
219  SafepointMode safepoint_mode);
220 
221  void CallRuntime(const Runtime::Function* function,
222  int num_arguments,
223  LInstruction* instr);
224 
225  void CallRuntime(Runtime::FunctionId id,
226  int num_arguments,
227  LInstruction* instr) {
228  const Runtime::Function* function = Runtime::FunctionForId(id);
229  CallRuntime(function, num_arguments, instr);
230  }
231 
232  void CallRuntimeFromDeferred(Runtime::FunctionId id,
233  int argc,
234  LInstruction* instr);
235 
236  enum A1State {
237  A1_UNINITIALIZED,
238  A1_CONTAINS_TARGET
239  };
240 
241  // Generate a direct call to a known function. Expects the function
242  // to be in a1.
243  void CallKnownFunction(Handle<JSFunction> function,
244  int arity,
245  LInstruction* instr,
246  CallKind call_kind,
247  A1State a1_state);
248 
249  void LoadHeapObject(Register result, Handle<HeapObject> object);
250 
251  void RecordSafepointWithLazyDeopt(LInstruction* instr,
252  SafepointMode safepoint_mode);
253 
254  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
255  Safepoint::DeoptMode mode);
256  void DeoptimizeIf(Condition cc,
257  LEnvironment* environment,
258  Register src1 = zero_reg,
259  const Operand& src2 = Operand(zero_reg));
260 
261  void AddToTranslation(Translation* translation,
262  LOperand* op,
263  bool is_tagged,
264  bool is_uint32,
265  int arguments_index,
266  int arguments_count);
267  void PopulateDeoptimizationData(Handle<Code> code);
268  int DefineDeoptimizationLiteral(Handle<Object> literal);
269 
270  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
271 
272  Register ToRegister(int index) const;
273  DoubleRegister ToDoubleRegister(int index) const;
274 
275  // Specific math operations - used from DoUnaryMathOperation.
276  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
277  void DoMathAbs(LUnaryMathOperation* instr);
278  void DoMathFloor(LUnaryMathOperation* instr);
279  void DoMathRound(LUnaryMathOperation* instr);
280  void DoMathSqrt(LUnaryMathOperation* instr);
281  void DoMathPowHalf(LUnaryMathOperation* instr);
282  void DoMathLog(LUnaryMathOperation* instr);
283  void DoMathTan(LUnaryMathOperation* instr);
284  void DoMathCos(LUnaryMathOperation* instr);
285  void DoMathSin(LUnaryMathOperation* instr);
286 
287  // Support for recording safepoint and position information.
288  void RecordSafepoint(LPointerMap* pointers,
289  Safepoint::Kind kind,
290  int arguments,
291  Safepoint::DeoptMode mode);
292  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
293  void RecordSafepoint(Safepoint::DeoptMode mode);
294  void RecordSafepointWithRegisters(LPointerMap* pointers,
295  int arguments,
296  Safepoint::DeoptMode mode);
297  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
298  int arguments,
299  Safepoint::DeoptMode mode);
300  void RecordPosition(int position);
301 
302  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
303  void EmitGoto(int block);
304  void EmitBranch(int left_block,
305  int right_block,
306  Condition cc,
307  Register src1,
308  const Operand& src2);
309  void EmitBranchF(int left_block,
310  int right_block,
311  Condition cc,
312  FPURegister src1,
313  FPURegister src2);
314  void EmitCmpI(LOperand* left, LOperand* right);
315  void EmitNumberUntagD(Register input,
316  DoubleRegister result,
317  bool deoptimize_on_undefined,
318  bool deoptimize_on_minus_zero,
319  LEnvironment* env);
320 
321  void DeoptIfTaggedButNotSmi(LEnvironment* environment,
322  HValue* value,
323  LOperand* operand);
324 
325  // Emits optimized code for typeof x == "y". Modifies input register.
326  // Returns the condition on which a final split to
327  // true and false label should be made, to optimize fallthrough.
328  // Returns two registers in cmp1 and cmp2 that can be used in the
329  // Branch instruction after EmitTypeofIs.
330  Condition EmitTypeofIs(Label* true_label,
331  Label* false_label,
332  Register input,
333  Handle<String> type_name,
334  Register& cmp1,
335  Operand& cmp2);
336 
337  // Emits optimized code for %_IsObject(x). Preserves input register.
338  // Returns the condition on which a final split to
339  // true and false label should be made, to optimize fallthrough.
340  Condition EmitIsObject(Register input,
341  Register temp1,
342  Register temp2,
343  Label* is_not_object,
344  Label* is_object);
345 
346  // Emits optimized code for %_IsString(x). Preserves input register.
347  // Returns the condition on which a final split to
348  // true and false label should be made, to optimize fallthrough.
349  Condition EmitIsString(Register input,
350  Register temp1,
351  Label* is_not_string);
352 
353  // Emits optimized code for %_IsConstructCall().
354  // Caller should branch on equal condition.
355  void EmitIsConstructCall(Register temp1, Register temp2);
356 
357  void EmitLoadFieldOrConstantFunction(Register result,
358  Register object,
359  Handle<Map> type,
360  Handle<String> name,
361  LEnvironment* env);
362 
363  // Emits optimized code to deep-copy the contents of statically known
364  // object graphs (e.g. object literal boilerplate).
365  void EmitDeepCopy(Handle<JSObject> object,
366  Register result,
367  Register source,
368  int* offset);
369 
370  struct JumpTableEntry {
371  explicit inline JumpTableEntry(Address entry)
372  : label(),
373  address(entry) { }
374  Label label;
375  Address address;
376  };
377 
378  void EnsureSpaceForLazyDeopt();
379 
380  Zone* zone_;
381  LPlatformChunk* const chunk_;
382  MacroAssembler* const masm_;
383  CompilationInfo* const info_;
384 
385  int current_block_;
386  int current_instruction_;
387  const ZoneList<LInstruction*>* instructions_;
388  ZoneList<LEnvironment*> deoptimizations_;
389  ZoneList<JumpTableEntry> deopt_jump_table_;
390  ZoneList<Handle<Object> > deoptimization_literals_;
391  int inlined_function_count_;
392  Scope* const scope_;
393  Status status_;
394  TranslationBuffer translations_;
395  ZoneList<LDeferredCode*> deferred_;
396  int osr_pc_offset_;
397  int last_lazy_deopt_pc_;
398 
399  // Builder that keeps track of safepoints in the code. The table
400  // itself is emitted at the end of the generated code.
401  SafepointTableBuilder safepoints_;
402 
403  // Compiler from a set of parallel moves to a sequential list of moves.
404  LGapResolver resolver_;
405 
406  Safepoint::Kind expected_safepoint_kind_;
407 
408  class PushSafepointRegistersScope BASE_EMBEDDED {
409  public:
410  PushSafepointRegistersScope(LCodeGen* codegen,
411  Safepoint::Kind kind)
412  : codegen_(codegen) {
413  ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
414  codegen_->expected_safepoint_kind_ = kind;
415 
416  switch (codegen_->expected_safepoint_kind_) {
417  case Safepoint::kWithRegisters:
418  codegen_->masm_->PushSafepointRegisters();
419  break;
420  case Safepoint::kWithRegistersAndDoubles:
421  codegen_->masm_->PushSafepointRegistersAndDoubles();
422  break;
423  default:
424  UNREACHABLE();
425  }
426  }
427 
428  ~PushSafepointRegistersScope() {
429  Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
430  ASSERT((kind & Safepoint::kWithRegisters) != 0);
431  switch (kind) {
432  case Safepoint::kWithRegisters:
433  codegen_->masm_->PopSafepointRegisters();
434  break;
435  case Safepoint::kWithRegistersAndDoubles:
436  codegen_->masm_->PopSafepointRegistersAndDoubles();
437  break;
438  default:
439  UNREACHABLE();
440  }
441  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
442  }
443 
444  private:
445  LCodeGen* codegen_;
446  };
447 
448  friend class LDeferredCode;
449  friend class LEnvironment;
450  friend class SafepointGenerator;
451  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
452 };
453 
454 
455 class LDeferredCode: public ZoneObject {
456  public:
457  explicit LDeferredCode(LCodeGen* codegen)
458  : codegen_(codegen),
459  external_exit_(NULL),
460  instruction_index_(codegen->current_instruction_) {
461  codegen->AddDeferredCode(this);
462  }
463 
464  virtual ~LDeferredCode() { }
465  virtual void Generate() = 0;
466  virtual LInstruction* instr() = 0;
467 
468  void SetExit(Label* exit) { external_exit_ = exit; }
469  Label* entry() { return &entry_; }
470  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
471  int instruction_index() const { return instruction_index_; }
472 
473  protected:
474  LCodeGen* codegen() const { return codegen_; }
475  MacroAssembler* masm() const { return codegen_->masm(); }
476 
477  private:
478  LCodeGen* codegen_;
479  Label entry_;
480  Label exit_;
481  Label* external_exit_;
482  int instruction_index_;
483 };
484 
485 } } // namespace v8::internal
486 
487 #endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
byte * Address
Definition: globals.h:157
#define kLithiumScratchReg2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
MacroAssembler * masm() const
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:13274
#define ASSERT(condition)
Definition: checks.h:270
MacroAssembler * masm() const
#define kLithiumScratchReg
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:49
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
#define kLithiumScratchDouble
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
#define BASE_EMBEDDED
Definition: allocation.h:68
#define DECLARE_DO(type)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
virtual LInstruction * instr()=0
virtual void Generate()=0
CompilationInfo * info() const
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Register ToRegister(int num)