v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
full-codegen.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_FULL_CODEGEN_H_
29 #define V8_FULL_CODEGEN_H_
30 
31 #include "v8.h"
32 
33 #include "allocation.h"
34 #include "assert-scope.h"
35 #include "ast.h"
36 #include "code-stubs.h"
37 #include "codegen.h"
38 #include "compiler.h"
39 #include "data-flow.h"
40 #include "globals.h"
41 #include "objects.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 // Forward declarations.
47 class JumpPatchSite;
48 
49 // AST node visitor which can tell whether a given statement will be breakable
50 // when the code is compiled by the full compiler in the debugger. This means
51 // that there will be an IC (load/store/call) in the code generated for the
52 // debugger to piggybag on.
53 class BreakableStatementChecker: public AstVisitor {
54  public:
55  explicit BreakableStatementChecker(Zone* zone) : is_breakable_(false) {
56  InitializeAstVisitor(zone);
57  }
58 
59  void Check(Statement* stmt);
60  void Check(Expression* stmt);
61 
62  bool is_breakable() { return is_breakable_; }
63 
64  private:
65  // AST node visit functions.
66 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
68 #undef DECLARE_VISIT
69 
70  bool is_breakable_;
71 
72  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
73  DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
74 };
75 
76 
77 // -----------------------------------------------------------------------------
78 // Full code generator.
79 
80 class FullCodeGenerator: public AstVisitor {
81  public:
82  enum State {
85  };
86 
88  : masm_(masm),
89  info_(info),
90  scope_(info->scope()),
91  nesting_stack_(NULL),
92  loop_depth_(0),
93  globals_(NULL),
94  context_(NULL),
95  bailout_entries_(info->HasDeoptimizationSupport()
96  ? info->function()->ast_node_count() : 0,
97  info->zone()),
98  back_edges_(2, info->zone()),
99  ic_total_count_(0) {
100  Initialize();
101  }
102 
103  void Initialize();
104 
105  static bool MakeCode(CompilationInfo* info);
106 
107  // Encode state and pc-offset as a BitField<type, start, size>.
108  // Only use 30 bits because we encode the result as a smi.
109  class StateField : public BitField<State, 0, 1> { };
110  class PcField : public BitField<unsigned, 1, 30-1> { };
111 
112  static const char* State2String(State state) {
113  switch (state) {
114  case NO_REGISTERS: return "NO_REGISTERS";
115  case TOS_REG: return "TOS_REG";
116  }
117  UNREACHABLE();
118  return NULL;
119  }
120 
121  static const int kMaxBackEdgeWeight = 127;
122 
123  // Platform-specific code size multiplier.
124 #if V8_TARGET_ARCH_IA32
125  static const int kCodeSizeMultiplier = 100;
126 #elif V8_TARGET_ARCH_X64
127  static const int kCodeSizeMultiplier = 162;
128 #elif V8_TARGET_ARCH_ARM
129  static const int kCodeSizeMultiplier = 142;
130 #elif V8_TARGET_ARCH_ARM64
131 // TODO(all): Copied ARM value. Check this is sensible for ARM64.
132  static const int kCodeSizeMultiplier = 142;
133 #elif V8_TARGET_ARCH_MIPS
134  static const int kCodeSizeMultiplier = 142;
135 #else
136 #error Unsupported target architecture.
137 #endif
138 
139  private:
140  class Breakable;
141  class Iteration;
142 
143  class TestContext;
144 
146  public:
147  explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
148  // Link into codegen's nesting stack.
149  previous_ = codegen->nesting_stack_;
150  codegen->nesting_stack_ = this;
151  }
152  virtual ~NestedStatement() {
153  // Unlink from codegen's nesting stack.
154  ASSERT_EQ(this, codegen_->nesting_stack_);
155  codegen_->nesting_stack_ = previous_;
156  }
157 
158  virtual Breakable* AsBreakable() { return NULL; }
159  virtual Iteration* AsIteration() { return NULL; }
160 
161  virtual bool IsContinueTarget(Statement* target) { return false; }
162  virtual bool IsBreakTarget(Statement* target) { return false; }
163 
164  // Notify the statement that we are exiting it via break, continue, or
165  // return and give it a chance to generate cleanup code. Return the
166  // next outer statement in the nesting stack. We accumulate in
167  // *stack_depth the amount to drop the stack and in *context_length the
168  // number of context chain links to unwind as we traverse the nesting
169  // stack from an exit to its target.
170  virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
171  return previous_;
172  }
173 
174  protected:
175  MacroAssembler* masm() { return codegen_->masm(); }
176 
177  FullCodeGenerator* codegen_;
178  NestedStatement* previous_;
179 
180  private:
182  };
183 
184  // A breakable statement such as a block.
185  class Breakable : public NestedStatement {
186  public:
187  Breakable(FullCodeGenerator* codegen, BreakableStatement* statement)
188  : NestedStatement(codegen), statement_(statement) {
189  }
190  virtual ~Breakable() {}
191 
192  virtual Breakable* AsBreakable() { return this; }
193  virtual bool IsBreakTarget(Statement* target) {
194  return statement() == target;
195  }
196 
197  BreakableStatement* statement() { return statement_; }
198  Label* break_label() { return &break_label_; }
199 
200  private:
201  BreakableStatement* statement_;
202  Label break_label_;
203  };
204 
205  // An iteration statement such as a while, for, or do loop.
206  class Iteration : public Breakable {
207  public:
208  Iteration(FullCodeGenerator* codegen, IterationStatement* statement)
209  : Breakable(codegen, statement) {
210  }
211  virtual ~Iteration() {}
212 
213  virtual Iteration* AsIteration() { return this; }
214  virtual bool IsContinueTarget(Statement* target) {
215  return statement() == target;
216  }
217 
218  Label* continue_label() { return &continue_label_; }
219 
220  private:
221  Label continue_label_;
222  };
223 
224  // A nested block statement.
225  class NestedBlock : public Breakable {
226  public:
227  NestedBlock(FullCodeGenerator* codegen, Block* block)
228  : Breakable(codegen, block) {
229  }
230  virtual ~NestedBlock() {}
231 
232  virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
233  if (statement()->AsBlock()->scope() != NULL) {
234  ++(*context_length);
235  }
236  return previous_;
237  };
238  };
239 
240  // The try block of a try/catch statement.
241  class TryCatch : public NestedStatement {
242  public:
243  explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {
244  }
245  virtual ~TryCatch() {}
246 
247  virtual NestedStatement* Exit(int* stack_depth, int* context_length);
248  };
249 
250  // The try block of a try/finally statement.
251  class TryFinally : public NestedStatement {
252  public:
253  TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
254  : NestedStatement(codegen), finally_entry_(finally_entry) {
255  }
256  virtual ~TryFinally() {}
257 
258  virtual NestedStatement* Exit(int* stack_depth, int* context_length);
259 
260  private:
261  Label* finally_entry_;
262  };
263 
264  // The finally block of a try/finally statement.
265  class Finally : public NestedStatement {
266  public:
267  static const int kElementCount = 5;
268 
269  explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
270  virtual ~Finally() {}
271 
272  virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
273  *stack_depth += kElementCount;
274  return previous_;
275  }
276  };
277 
278  // The body of a for/in loop.
279  class ForIn : public Iteration {
280  public:
281  static const int kElementCount = 5;
282 
283  ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
284  : Iteration(codegen, statement) {
285  }
286  virtual ~ForIn() {}
287 
288  virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
289  *stack_depth += kElementCount;
290  return previous_;
291  }
292  };
293 
294 
295  // The body of a with or catch.
296  class WithOrCatch : public NestedStatement {
297  public:
298  explicit WithOrCatch(FullCodeGenerator* codegen)
299  : NestedStatement(codegen) {
300  }
301  virtual ~WithOrCatch() {}
302 
303  virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
304  ++(*context_length);
305  return previous_;
306  }
307  };
308 
309  // Type of a member function that generates inline code for a native function.
310  typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
311 
312  static const InlineFunctionGenerator kInlineFunctionGenerators[];
313 
314  // A platform-specific utility to overwrite the accumulator register
315  // with a GC-safe value.
316  void ClearAccumulator();
317 
318  // Determine whether or not to inline the smi case for the given
319  // operation.
320  bool ShouldInlineSmiCase(Token::Value op);
321 
322  // Helper function to convert a pure value into a test context. The value
323  // is expected on the stack or the accumulator, depending on the platform.
324  // See the platform-specific implementation for details.
325  void DoTest(Expression* condition,
326  Label* if_true,
327  Label* if_false,
328  Label* fall_through);
329  void DoTest(const TestContext* context);
330 
331  // Helper function to split control flow and avoid a branch to the
332  // fall-through label if it is set up.
333 #if V8_TARGET_ARCH_MIPS
334  void Split(Condition cc,
335  Register lhs,
336  const Operand& rhs,
337  Label* if_true,
338  Label* if_false,
339  Label* fall_through);
340 #else // All non-mips arch.
341  void Split(Condition cc,
342  Label* if_true,
343  Label* if_false,
344  Label* fall_through);
345 #endif // V8_TARGET_ARCH_MIPS
346 
347  // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into
348  // a register. Emits a context chain walk if if necessary (so does
349  // SetVar) so avoid calling both on the same variable.
350  void GetVar(Register destination, Variable* var);
351 
352  // Assign to a known (PARAMETER, LOCAL, or CONTEXT) variable. If it's in
353  // the context, the write barrier will be emitted and source, scratch0,
354  // scratch1 will be clobbered. Emits a context chain walk if if necessary
355  // (so does GetVar) so avoid calling both on the same variable.
356  void SetVar(Variable* var,
357  Register source,
358  Register scratch0,
359  Register scratch1);
360 
361  // An operand used to read/write a stack-allocated (PARAMETER or LOCAL)
362  // variable. Writing does not need the write barrier.
363  MemOperand StackOperand(Variable* var);
364 
365  // An operand used to read/write a known (PARAMETER, LOCAL, or CONTEXT)
366  // variable. May emit code to traverse the context chain, loading the
367  // found context into the scratch register. Writing to this operand will
368  // need the write barrier if location is CONTEXT.
369  MemOperand VarOperand(Variable* var, Register scratch);
370 
371  void VisitForEffect(Expression* expr) {
372  EffectContext context(this);
373  Visit(expr);
374  PrepareForBailout(expr, NO_REGISTERS);
375  }
376 
377  void VisitForAccumulatorValue(Expression* expr) {
378  AccumulatorValueContext context(this);
379  Visit(expr);
380  PrepareForBailout(expr, TOS_REG);
381  }
382 
383  void VisitForStackValue(Expression* expr) {
384  StackValueContext context(this);
385  Visit(expr);
386  PrepareForBailout(expr, NO_REGISTERS);
387  }
388 
389  void VisitForControl(Expression* expr,
390  Label* if_true,
391  Label* if_false,
392  Label* fall_through) {
393  TestContext context(this, expr, if_true, if_false, fall_through);
394  Visit(expr);
395  // For test contexts, we prepare for bailout before branching, not at
396  // the end of the entire expression. This happens as part of visiting
397  // the expression.
398  }
399 
400  void VisitInDuplicateContext(Expression* expr);
401 
402  void VisitDeclarations(ZoneList<Declaration*>* declarations);
403  void DeclareModules(Handle<FixedArray> descriptions);
404  void DeclareGlobals(Handle<FixedArray> pairs);
405  int DeclareGlobalsFlags();
406 
407  // Generate code to allocate all (including nested) modules and contexts.
408  // Because of recursive linking and the presence of module alias declarations,
409  // this has to be a separate pass _before_ populating or executing any module.
410  void AllocateModules(ZoneList<Declaration*>* declarations);
411 
412  // Generate code to create an iterator result object. The "value" property is
413  // set to a value popped from the stack, and "done" is set according to the
414  // argument. The result object is left in the result register.
415  void EmitCreateIteratorResult(bool done);
416 
417  // Try to perform a comparison as a fast inlined literal compare if
418  // the operands allow it. Returns true if the compare operations
419  // has been matched and all code generated; false otherwise.
420  bool TryLiteralCompare(CompareOperation* compare);
421 
422  // Platform-specific code for comparing the type of a value with
423  // a given literal string.
424  void EmitLiteralCompareTypeof(Expression* expr,
425  Expression* sub_expr,
426  Handle<String> check);
427 
428  // Platform-specific code for equality comparison with a nil-like value.
429  void EmitLiteralCompareNil(CompareOperation* expr,
430  Expression* sub_expr,
431  NilValue nil);
432 
433  // Bailout support.
434  void PrepareForBailout(Expression* node, State state);
435  void PrepareForBailoutForId(BailoutId id, State state);
436 
437  // Feedback slot support. The feedback vector will be cleared during gc and
438  // collected by the type-feedback oracle.
439  Handle<FixedArray> FeedbackVector() {
440  return feedback_vector_;
441  }
442  void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
443  feedback_vector_->set(slot, *object);
444  }
445  void InitializeFeedbackVector();
446 
447  // Record a call's return site offset, used to rebuild the frame if the
448  // called function was inlined at the site.
449  void RecordJSReturnSite(Call* call);
450 
451  // Prepare for bailout before a test (or compare) and branch. If
452  // should_normalize, then the following comparison will not handle the
453  // canonical JS true value so we will insert a (dead) test against true at
454  // the actual bailout target from the optimized code. If not
455  // should_normalize, the true and false labels are ignored.
456  void PrepareForBailoutBeforeSplit(Expression* expr,
457  bool should_normalize,
458  Label* if_true,
459  Label* if_false);
460 
461  // If enabled, emit debug code for checking that the current context is
462  // neither a with nor a catch context.
463  void EmitDebugCheckDeclarationContext(Variable* variable);
464 
465  // This is meant to be called at loop back edges, |back_edge_target| is
466  // the jump target of the back edge and is used to approximate the amount
467  // of code inside the loop.
468  void EmitBackEdgeBookkeeping(IterationStatement* stmt,
469  Label* back_edge_target);
470  // Record the OSR AST id corresponding to a back edge in the code.
471  void RecordBackEdge(BailoutId osr_ast_id);
472  // Emit a table of back edge ids, pcs and loop depths into the code stream.
473  // Return the offset of the start of the table.
474  unsigned EmitBackEdgeTable();
475 
476  void EmitProfilingCounterDecrement(int delta);
477  void EmitProfilingCounterReset();
478 
479  // Emit code to pop values from the stack associated with nested statements
480  // like try/catch, try/finally, etc, running the finallies and unwinding the
481  // handlers as needed.
482  void EmitUnwindBeforeReturn();
483 
484  // Platform-specific return sequence
485  void EmitReturnSequence();
486 
487  // Platform-specific code sequences for calls
488  void EmitCallWithStub(Call* expr);
489  void EmitCallWithIC(Call* expr);
490  void EmitKeyedCallWithIC(Call* expr, Expression* key);
491 
492  // Platform-specific code for inline runtime calls.
493  InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
494 
495  void EmitInlineRuntimeCall(CallRuntime* expr);
496 
497 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
498  void Emit##name(CallRuntime* expr);
500 #undef EMIT_INLINE_RUNTIME_CALL
501 
502  // Platform-specific code for resuming generators.
503  void EmitGeneratorResume(Expression *generator,
504  Expression *value,
505  JSGeneratorObject::ResumeMode resume_mode);
506 
507  // Platform-specific code for loading variables.
508  void EmitLoadGlobalCheckExtensions(Variable* var,
509  TypeofState typeof_state,
510  Label* slow);
511  MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
512  void EmitDynamicLookupFastCase(Variable* var,
513  TypeofState typeof_state,
514  Label* slow,
515  Label* done);
516  void EmitVariableLoad(VariableProxy* proxy);
517 
518  void EmitAccessor(Expression* expression);
519 
520  // Expects the arguments and the function already pushed.
521  void EmitResolvePossiblyDirectEval(int arg_count);
522 
523  // Platform-specific support for allocating a new closure based on
524  // the given function info.
525  void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
526 
527  // Platform-specific support for compiling assignments.
528 
529  // Load a value from a named property.
530  // The receiver is left on the stack by the IC.
531  void EmitNamedPropertyLoad(Property* expr);
532 
533  // Load a value from a keyed property.
534  // The receiver and the key is left on the stack by the IC.
535  void EmitKeyedPropertyLoad(Property* expr);
536 
537  // Apply the compound assignment operator. Expects the left operand on top
538  // of the stack and the right one in the accumulator.
539  void EmitBinaryOp(BinaryOperation* expr,
540  Token::Value op,
542 
543  // Helper functions for generating inlined smi code for certain
544  // binary operations.
545  void EmitInlineSmiBinaryOp(BinaryOperation* expr,
546  Token::Value op,
548  Expression* left,
549  Expression* right);
550 
551  // Assign to the given expression as if via '='. The right-hand-side value
552  // is expected in the accumulator.
553  void EmitAssignment(Expression* expr);
554 
555  // Complete a variable assignment. The right-hand-side value is expected
556  // in the accumulator.
557  void EmitVariableAssignment(Variable* var,
558  Token::Value op);
559 
560  // Helper functions to EmitVariableAssignment
561  void EmitStoreToStackLocalOrContextSlot(Variable* var,
562  MemOperand location);
563  void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode);
564 
565  // Complete a named property assignment. The receiver is expected on top
566  // of the stack and the right-hand-side value in the accumulator.
567  void EmitNamedPropertyAssignment(Assignment* expr);
568 
569  // Complete a keyed property assignment. The receiver and key are
570  // expected on top of the stack and the right-hand-side value in the
571  // accumulator.
572  void EmitKeyedPropertyAssignment(Assignment* expr);
573 
574  void CallIC(Handle<Code> code,
575  TypeFeedbackId id = TypeFeedbackId::None());
576 
577  void CallLoadIC(ContextualMode mode,
578  TypeFeedbackId id = TypeFeedbackId::None());
579  void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
580 
581  void SetFunctionPosition(FunctionLiteral* fun);
582  void SetReturnPosition(FunctionLiteral* fun);
583  void SetStatementPosition(Statement* stmt);
584  void SetExpressionPosition(Expression* expr);
585  void SetStatementPosition(int pos);
586  void SetSourcePosition(int pos);
587 
588  // Non-local control flow support.
589  void EnterFinallyBlock();
590  void ExitFinallyBlock();
591 
592  // Loop nesting counter.
593  int loop_depth() { return loop_depth_; }
594  void increment_loop_depth() { loop_depth_++; }
595  void decrement_loop_depth() {
596  ASSERT(loop_depth_ > 0);
597  loop_depth_--;
598  }
599 
600  MacroAssembler* masm() { return masm_; }
601 
602  class ExpressionContext;
603  const ExpressionContext* context() { return context_; }
604  void set_new_context(const ExpressionContext* context) { context_ = context; }
605 
606  Handle<Script> script() { return info_->script(); }
607  bool is_eval() { return info_->is_eval(); }
608  bool is_native() { return info_->is_native(); }
609  StrictMode strict_mode() { return function()->strict_mode(); }
610  FunctionLiteral* function() { return info_->function(); }
611  Scope* scope() { return scope_; }
612 
613  static Register result_register();
614  static Register context_register();
615 
616  // Set fields in the stack frame. Offsets are the frame pointer relative
617  // offsets defined in, e.g., StandardFrameConstants.
618  void StoreToFrameField(int frame_offset, Register value);
619 
620  // Load a value from the current context. Indices are defined as an enum
621  // in v8::internal::Context.
622  void LoadContextField(Register dst, int context_index);
623 
624  // Push the function argument for the runtime functions PushWithContext
625  // and PushCatchContext.
626  void PushFunctionArgumentForContextAllocation();
627 
628  // AST node visit functions.
629 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
631 #undef DECLARE_VISIT
632 
633  void VisitComma(BinaryOperation* expr);
634  void VisitLogicalExpression(BinaryOperation* expr);
635  void VisitArithmeticExpression(BinaryOperation* expr);
636 
637  void VisitForTypeofValue(Expression* expr);
638 
639  void Generate();
640  void PopulateDeoptimizationData(Handle<Code> code);
641  void PopulateTypeFeedbackInfo(Handle<Code> code);
642 
643  Handle<FixedArray> handler_table() { return handler_table_; }
644 
645  struct BailoutEntry {
646  BailoutId id;
647  unsigned pc_and_state;
648  };
649 
650  struct BackEdgeEntry {
651  BailoutId id;
652  unsigned pc;
653  uint32_t loop_depth;
654  };
655 
656  class ExpressionContext BASE_EMBEDDED {
657  public:
658  explicit ExpressionContext(FullCodeGenerator* codegen)
659  : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
660  codegen->set_new_context(this);
661  }
662 
663  virtual ~ExpressionContext() {
664  codegen_->set_new_context(old_);
665  }
666 
667  Isolate* isolate() const { return codegen_->isolate(); }
668 
669  // Convert constant control flow (true or false) to the result expected for
670  // this expression context.
671  virtual void Plug(bool flag) const = 0;
672 
673  // Emit code to convert a pure value (in a register, known variable
674  // location, as a literal, or on top of the stack) into the result
675  // expected according to this expression context.
676  virtual void Plug(Register reg) const = 0;
677  virtual void Plug(Variable* var) const = 0;
678  virtual void Plug(Handle<Object> lit) const = 0;
679  virtual void Plug(Heap::RootListIndex index) const = 0;
680  virtual void PlugTOS() const = 0;
681 
682  // Emit code to convert pure control flow to a pair of unbound labels into
683  // the result expected according to this expression context. The
684  // implementation will bind both labels unless it's a TestContext, which
685  // won't bind them at this point.
686  virtual void Plug(Label* materialize_true,
687  Label* materialize_false) const = 0;
688 
689  // Emit code to discard count elements from the top of stack, then convert
690  // a pure value into the result expected according to this expression
691  // context.
692  virtual void DropAndPlug(int count, Register reg) const = 0;
693 
694  // Set up branch labels for a test expression. The three Label** parameters
695  // are output parameters.
696  virtual void PrepareTest(Label* materialize_true,
697  Label* materialize_false,
698  Label** if_true,
699  Label** if_false,
700  Label** fall_through) const = 0;
701 
702  // Returns true if we are evaluating only for side effects (i.e. if the
703  // result will be discarded).
704  virtual bool IsEffect() const { return false; }
705 
706  // Returns true if we are evaluating for the value (in accu/on stack).
707  virtual bool IsAccumulatorValue() const { return false; }
708  virtual bool IsStackValue() const { return false; }
709 
710  // Returns true if we are branching on the value rather than materializing
711  // it. Only used for asserts.
712  virtual bool IsTest() const { return false; }
713 
714  protected:
715  FullCodeGenerator* codegen() const { return codegen_; }
716  MacroAssembler* masm() const { return masm_; }
717  MacroAssembler* masm_;
718 
719  private:
720  const ExpressionContext* old_;
721  FullCodeGenerator* codegen_;
722  };
723 
724  class AccumulatorValueContext : public ExpressionContext {
725  public:
726  explicit AccumulatorValueContext(FullCodeGenerator* codegen)
727  : ExpressionContext(codegen) { }
728 
729  virtual void Plug(bool flag) const;
730  virtual void Plug(Register reg) const;
731  virtual void Plug(Label* materialize_true, Label* materialize_false) const;
732  virtual void Plug(Variable* var) const;
733  virtual void Plug(Handle<Object> lit) const;
734  virtual void Plug(Heap::RootListIndex) const;
735  virtual void PlugTOS() const;
736  virtual void DropAndPlug(int count, Register reg) const;
737  virtual void PrepareTest(Label* materialize_true,
738  Label* materialize_false,
739  Label** if_true,
740  Label** if_false,
741  Label** fall_through) const;
742  virtual bool IsAccumulatorValue() const { return true; }
743  };
744 
745  class StackValueContext : public ExpressionContext {
746  public:
747  explicit StackValueContext(FullCodeGenerator* codegen)
748  : ExpressionContext(codegen) { }
749 
750  virtual void Plug(bool flag) const;
751  virtual void Plug(Register reg) const;
752  virtual void Plug(Label* materialize_true, Label* materialize_false) const;
753  virtual void Plug(Variable* var) const;
754  virtual void Plug(Handle<Object> lit) const;
755  virtual void Plug(Heap::RootListIndex) const;
756  virtual void PlugTOS() const;
757  virtual void DropAndPlug(int count, Register reg) const;
758  virtual void PrepareTest(Label* materialize_true,
759  Label* materialize_false,
760  Label** if_true,
761  Label** if_false,
762  Label** fall_through) const;
763  virtual bool IsStackValue() const { return true; }
764  };
765 
766  class TestContext : public ExpressionContext {
767  public:
768  TestContext(FullCodeGenerator* codegen,
769  Expression* condition,
770  Label* true_label,
771  Label* false_label,
772  Label* fall_through)
773  : ExpressionContext(codegen),
774  condition_(condition),
775  true_label_(true_label),
776  false_label_(false_label),
777  fall_through_(fall_through) { }
778 
779  static const TestContext* cast(const ExpressionContext* context) {
780  ASSERT(context->IsTest());
781  return reinterpret_cast<const TestContext*>(context);
782  }
783 
784  Expression* condition() const { return condition_; }
785  Label* true_label() const { return true_label_; }
786  Label* false_label() const { return false_label_; }
787  Label* fall_through() const { return fall_through_; }
788 
789  virtual void Plug(bool flag) const;
790  virtual void Plug(Register reg) const;
791  virtual void Plug(Label* materialize_true, Label* materialize_false) const;
792  virtual void Plug(Variable* var) const;
793  virtual void Plug(Handle<Object> lit) const;
794  virtual void Plug(Heap::RootListIndex) const;
795  virtual void PlugTOS() const;
796  virtual void DropAndPlug(int count, Register reg) const;
797  virtual void PrepareTest(Label* materialize_true,
798  Label* materialize_false,
799  Label** if_true,
800  Label** if_false,
801  Label** fall_through) const;
802  virtual bool IsTest() const { return true; }
803 
804  private:
805  Expression* condition_;
806  Label* true_label_;
807  Label* false_label_;
808  Label* fall_through_;
809  };
810 
811  class EffectContext : public ExpressionContext {
812  public:
813  explicit EffectContext(FullCodeGenerator* codegen)
814  : ExpressionContext(codegen) { }
815 
816  virtual void Plug(bool flag) const;
817  virtual void Plug(Register reg) const;
818  virtual void Plug(Label* materialize_true, Label* materialize_false) const;
819  virtual void Plug(Variable* var) const;
820  virtual void Plug(Handle<Object> lit) const;
821  virtual void Plug(Heap::RootListIndex) const;
822  virtual void PlugTOS() const;
823  virtual void DropAndPlug(int count, Register reg) const;
824  virtual void PrepareTest(Label* materialize_true,
825  Label* materialize_false,
826  Label** if_true,
827  Label** if_false,
828  Label** fall_through) const;
829  virtual bool IsEffect() const { return true; }
830  };
831 
832  MacroAssembler* masm_;
833  CompilationInfo* info_;
834  Scope* scope_;
835  Label return_label_;
836  NestedStatement* nesting_stack_;
837  int loop_depth_;
838  ZoneList<Handle<Object> >* globals_;
839  Handle<FixedArray> modules_;
840  int module_index_;
841  const ExpressionContext* context_;
842  ZoneList<BailoutEntry> bailout_entries_;
843  GrowableBitVector prepared_bailout_ids_;
844  ZoneList<BackEdgeEntry> back_edges_;
845  int ic_total_count_;
846  Handle<FixedArray> handler_table_;
847  Handle<FixedArray> feedback_vector_;
848  Handle<Cell> profiling_counter_;
849  bool generate_debug_code_;
850 
851  friend class NestedStatement;
852 
855 };
856 
857 
858 // A map from property names to getter/setter pairs allocated in the zone.
859 class AccessorTable: public TemplateHashMap<Literal,
860  ObjectLiteral::Accessors,
861  ZoneAllocationPolicy> {
862  public:
863  explicit AccessorTable(Zone* zone) :
864  TemplateHashMap<Literal, ObjectLiteral::Accessors,
865  ZoneAllocationPolicy>(Literal::Match,
866  ZoneAllocationPolicy(zone)),
867  zone_(zone) { }
868 
869  Iterator lookup(Literal* literal) {
870  Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
871  if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
872  return it;
873  }
874 
875  private:
876  Zone* zone_;
877 };
878 
879 
881  public:
883  ASSERT(code->kind() == Code::FUNCTION);
884  instruction_start_ = code->instruction_start();
885  Address table_address = instruction_start_ + code->back_edge_table_offset();
886  length_ = Memory::uint32_at(table_address);
887  start_ = table_address + kTableLengthSize;
888  }
889 
890  uint32_t length() { return length_; }
891 
892  BailoutId ast_id(uint32_t index) {
893  return BailoutId(static_cast<int>(
894  Memory::uint32_at(entry_at(index) + kAstIdOffset)));
895  }
896 
897  uint32_t loop_depth(uint32_t index) {
898  return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
899  }
900 
901  uint32_t pc_offset(uint32_t index) {
902  return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
903  }
904 
905  Address pc(uint32_t index) {
906  return instruction_start_ + pc_offset(index);
907  }
908 
913  };
914 
915  // Patch all interrupts with allowed loop depth in the unoptimized code to
916  // unconditionally call replacement_code.
917  static void Patch(Isolate* isolate,
918  Code* unoptimized_code);
919 
920  // Patch the back edge to the target state, provided the correct callee.
921  static void PatchAt(Code* unoptimized_code,
922  Address pc,
923  BackEdgeState target_state,
924  Code* replacement_code);
925 
926  // Change all patched back edges back to normal interrupts.
927  static void Revert(Isolate* isolate,
928  Code* unoptimized_code);
929 
930  // Change a back edge patched for on-stack replacement to perform a
931  // stack check first.
932  static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
933 
934  // Revert the patch by AddStackCheck.
935  static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
936 
937  // Return the current patch state of the back edge.
938  static BackEdgeState GetBackEdgeState(Isolate* isolate,
939  Code* unoptimized_code,
940  Address pc_after);
941 
942 #ifdef DEBUG
943  // Verify that all back edges of a certain loop depth are patched.
944  static bool Verify(Isolate* isolate,
945  Code* unoptimized_code,
946  int loop_nesting_level);
947 #endif // DEBUG
948 
949  private:
950  Address entry_at(uint32_t index) {
951  ASSERT(index < length_);
952  return start_ + index * kEntrySize;
953  }
954 
955  static const int kTableLengthSize = kIntSize;
956  static const int kAstIdOffset = 0 * kIntSize;
957  static const int kPcOffsetOffset = 1 * kIntSize;
958  static const int kLoopDepthOffset = 2 * kIntSize;
959  static const int kEntrySize = 3 * kIntSize;
960 
961  Address start_;
962  Address instruction_start_;
963  uint32_t length_;
964 };
965 
966 
967 } } // namespace v8::internal
968 
969 #endif // V8_FULL_CODEGEN_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
#define INLINE_FUNCTION_LIST(F)
Definition: runtime.h:643
static void RemoveStackCheck(Handle< Code > code, uint32_t pc_offset)
static void Patch(Isolate *isolate, Code *unoptimized_code)
#define DECLARE_VISIT(type)
Definition: full-codegen.h:629
BailoutId ast_id(uint32_t index)
Definition: full-codegen.h:892
static bool MakeCode(CompilationInfo *info)
static TypeFeedbackId None()
Definition: utils.h:1149
Handle< Script > script() const
Definition: compiler.h:83
BackEdgeTable(Code *code, DisallowHeapAllocation *required)
Definition: full-codegen.h:882
Iterator find(Literal *key, bool insert=false, ZoneAllocationPolicyallocator=ZoneAllocationPolicy())
Definition: hashmap.h:356
#define ASSERT(condition)
Definition: checks.h:329
static const int kMaxBackEdgeWeight
Definition: full-codegen.h:121
static void AddStackCheck(Handle< Code > code, uint32_t pc_offset)
static BackEdgeState GetBackEdgeState(Isolate *isolate, Code *unoptimized_code, Address pc_after)
const int kIntSize
Definition: globals.h:263
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction pairs(ARM only)") DEFINE_bool(enable_unaligned_accesses
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
FullCodeGenerator(MacroAssembler *masm, CompilationInfo *info)
Definition: full-codegen.h:87
uint32_t loop_depth(uint32_t index)
Definition: full-codegen.h:897
NilValue
Definition: v8.h:133
static void PatchAt(Code *unoptimized_code, Address pc, BackEdgeState target_state, Code *replacement_code)
byte * instruction_start()
Definition: objects-inl.h:5857
void check(i::Vector< const uint8_t > string)
FunctionLiteral * function() const
Definition: compiler.h:77
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
const Register pc
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()
Definition: ast.h:2878
static const char * State2String(State state)
Definition: full-codegen.h:112
unsigned back_edge_table_offset()
Definition: objects-inl.h:4501
#define BASE_EMBEDDED
Definition: allocation.h:68
OverwriteMode
Definition: ic.h:690
uint32_t pc_offset(uint32_t index)
Definition: full-codegen.h:901
#define AST_NODE_LIST(V)
Definition: ast.h:121
Address pc(uint32_t index)
Definition: full-codegen.h:905
#define EMIT_INLINE_RUNTIME_CALL(name, x, y)
Definition: full-codegen.h:497
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static void Revert(Isolate *isolate, Code *unoptimized_code)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
Iterator lookup(Literal *literal)
Definition: full-codegen.h:869
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
TypeofState
Definition: codegen.h:69