28 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
29 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_
47 class SafepointGenerator;
49 class LCodeGen:
public LCodeGenBase {
52 : LCodeGenBase(chunk, assembler, info),
53 deoptimizations_(4, info->zone()),
54 jump_table_(4, info->zone()),
55 deoptimization_literals_(8, info->zone()),
56 inlined_function_count_(0),
57 scope_(info->scope()),
58 translations_(info->zone()),
59 deferred_(8, info->zone()),
60 dynamic_frame_alignment_(
false),
61 support_aligned_spilled_doubles_(
false),
63 frame_is_built_(
false),
64 x87_stack_(assembler),
65 safepoints_(info->zone()),
67 expected_safepoint_kind_(Safepoint::kSimple) {
68 PopulateDeoptimizationLiteralsWithInlinedFunctions();
72 return chunk()->LookupDestination(block_id);
80 return GetStackSlotCount() > 0 ||
81 info()->is_non_deferred_calling() ||
83 info()->requires_frame();
96 bool IsSmi(LConstantOperand* op)
const;
100 double ToDouble(LConstantOperand* op)
const;
121 x87_stack_.Fxch(reg, other_slot);
124 x87_stack_.Free(reg);
129 return x87_stack_.depth() == 0;
177 #define DECLARE_DO(type) void Do##type(L##type* node);
184 Scope* scope()
const {
return scope_; }
186 XMMRegister double_scratch0()
const {
return xmm0; }
188 void EmitClassOfTest(Label* if_true,
190 Handle<String> class_name,
193 Register temporary2);
195 int GetStackSlotCount()
const {
return chunk()->spill_slot_count(); }
201 void SaveCallerDoubles();
202 void RestoreCallerDoubles();
206 void GenerateBodyInstructionPre(LInstruction* instr)
V8_OVERRIDE;
207 void GenerateBodyInstructionPost(LInstruction* instr)
V8_OVERRIDE;
208 bool GeneratePrologue();
209 bool GenerateDeferredCode();
210 bool GenerateJumpTable();
211 bool GenerateSafepointTable();
214 void GenerateOsrPrologue();
217 RECORD_SIMPLE_SAFEPOINT,
218 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
221 void CallCode(Handle<Code> code,
222 RelocInfo::Mode
mode,
223 LInstruction* instr);
225 void CallCodeGeneric(Handle<Code> code,
226 RelocInfo::Mode
mode,
228 SafepointMode safepoint_mode);
230 void CallRuntime(
const Runtime::Function* fun,
237 LInstruction* instr) {
239 CallRuntime(
function, argc, instr);
247 void LoadContextFromDeferred(LOperand* context);
256 void CallKnownFunction(Handle<JSFunction>
function,
262 void RecordSafepointWithLazyDeopt(LInstruction* instr,
263 SafepointMode safepoint_mode);
265 void RegisterEnvironmentForDeoptimization(
LEnvironment* environment,
266 Safepoint::DeoptMode
mode);
273 bool DeoptEveryNTimes() {
274 return FLAG_deopt_every_n_times != 0 && !
info()->IsStub();
278 Translation* translation,
282 int* object_index_pointer,
283 int* dematerialized_index_pointer);
284 void PopulateDeoptimizationData(Handle<Code> code);
285 int DefineDeoptimizationLiteral(Handle<Object> literal);
287 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
294 ExternalReference ToExternalReference(LConstantOperand* op)
const;
296 Operand BuildFastArrayOperand(LOperand* elements_pointer,
298 Representation key_representation,
301 uint32_t additional_index = 0);
303 Operand BuildSeqStringOperand(Register
string,
307 void EmitIntegerMathAbs(LMathAbs* instr);
310 void RecordSafepoint(LPointerMap* pointers,
311 Safepoint::Kind kind,
313 Safepoint::DeoptMode
mode);
314 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode
mode);
315 void RecordSafepoint(Safepoint::DeoptMode
mode);
316 void RecordSafepointWithRegisters(LPointerMap* pointers,
318 Safepoint::DeoptMode
mode);
320 void RecordAndWritePosition(
int position)
V8_OVERRIDE;
323 void EmitGoto(
int block);
326 template<
class InstrType>
328 template<
class InstrType>
329 void EmitFalseBranch(InstrType instr,
Condition cc);
330 void EmitNumberUntagD(
334 bool allow_undefined_as_nan,
335 bool deoptimize_on_minus_zero,
339 void EmitNumberUntagDNoSSE2(
343 bool allow_undefined_as_nan,
344 bool deoptimize_on_minus_zero,
351 Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
358 Label* is_not_object,
366 Label* is_not_string,
371 void EmitIsConstructCall(Register temp);
375 void EmitDeepCopy(Handle<JSObject>
object,
381 void EnsureSpaceForLazyDeopt(
int space_needed)
V8_OVERRIDE;
382 void DoLoadKeyedExternalArray(LLoadKeyed* instr);
383 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
384 void DoLoadKeyedFixedArray(LLoadKeyed* instr);
385 void DoStoreKeyedExternalArray(LStoreKeyed* instr);
386 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
387 void DoStoreKeyedFixedArray(LStoreKeyed* instr);
389 void EmitReturn(LReturn* instr,
bool dynamic_frame_alignment);
393 void EmitPushTaggedOperand(LOperand* operand);
397 void EmitFlushX87ForDeopt();
398 void FlushX87StackIfNecessary(LInstruction* instr) {
399 x87_stack_.FlushIfNecessary(instr,
this);
408 void MakeSureStackPagesMapped(
int offset);
414 int inlined_function_count_;
416 TranslationBuffer translations_;
418 bool dynamic_frame_alignment_;
419 bool support_aligned_spilled_doubles_;
421 bool frame_is_built_;
426 : stack_depth_(0), is_mutable_(
true), masm_(masm) { }
427 explicit X87Stack(
const X87Stack& other)
428 : stack_depth_(other.stack_depth_), is_mutable_(
false), masm_(masm()) {
429 for (
int i = 0; i < stack_depth_; i++) {
430 stack_[i] = other.stack_[i];
433 bool operator==(
const X87Stack& other)
const {
434 if (stack_depth_ != other.stack_depth_)
return false;
435 for (
int i = 0; i < stack_depth_; i++) {
436 if (!stack_[i].is(other.stack_[i]))
return false;
440 bool Contains(X87Register reg);
441 void Fxch(X87Register reg,
int other_slot = 0);
442 void Free(X87Register reg);
443 void PrepareToWrite(X87Register reg);
444 void CommitWrite(X87Register reg);
445 void FlushIfNecessary(LInstruction* instr,
LCodeGen* cgen);
446 void LeavingBlock(
int current_block_id, LGoto* goto_instr);
447 int depth()
const {
return stack_depth_; }
452 void push(X87Register reg) {
455 stack_[stack_depth_] = reg;
459 MacroAssembler* masm()
const {
return masm_; }
462 int ArrayIndex(X87Register reg);
468 MacroAssembler* masm_;
474 SafepointTableBuilder safepoints_;
479 Safepoint::Kind expected_safepoint_kind_;
483 explicit PushSafepointRegistersScope(
LCodeGen* codegen)
484 : codegen_(codegen) {
485 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
486 codegen_->masm_->PushSafepointRegisters();
487 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
488 ASSERT(codegen_->info()->is_calling());
491 ~PushSafepointRegistersScope() {
492 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
493 codegen_->masm_->PopSafepointRegisters();
494 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
508 class LDeferredCode :
public ZoneObject {
512 external_exit_(
NULL),
513 instruction_index_(codegen->current_instruction_),
514 x87_stack_(x87_stack) {
515 codegen->AddDeferredCode(
this);
524 Label*
exit() {
return external_exit_ !=
NULL ? external_exit_ : &exit_; }
527 const LCodeGen::X87Stack&
x87_stack()
const {
return x87_stack_; }
537 Label* external_exit_;
539 int instruction_index_;
540 LCodeGen::X87Stack x87_stack_;
545 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void FinishCode(Handle< Code > code)
LDeferredCode(LCodeGen *codegen, const LCodeGen::X87Stack &x87_stack)
void EnsureRelocSpaceForDeoptimization()
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
void SetExit(Label *exit)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
bool NeedsEagerFrame() const
bool IsSmi(LConstantOperand *op) const
Operand HighOperand(LOperand *op)
void X87PrepareBinaryOp(X87Register left, X87Register right, X87Register result)
void X87LoadForUsage(X87Register reg)
X87Register ToX87Register(LOperand *op) const
static const Function * FunctionForId(FunctionId id)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
LCodeGen * codegen() const
MacroAssembler * masm() const
int instruction_index() const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
void DoDeferredStackCheck(LStackCheck *instr)
void X87PrepareToWrite(X87Register reg)
int LookupDestination(int block_id) const
Immediate ToImmediate(LOperand *op, const Representation &r) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const LCodeGen::X87Stack & x87_stack() const
static const int kNumAllocatableRegisters
friend class LEnvironment
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
void X87Fxch(X87Register reg, int other_slot=0)
int32_t ToInteger32(LConstantOperand *op) const
bool IsInteger32(LConstantOperand *op) const
void X87Free(X87Register reg)
Handle< Object > ToHandle(LConstantOperand *op) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
bool NeedsDeferredFrame() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void X87Mov(X87Register reg, Operand src, X87OperandType operand=kX87DoubleOperand)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
virtual void Generate()=0
void X87CommitWrite(X87Register reg)
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
friend class SafepointGenerator
friend class LGapResolver
friend class LDeferredCode