28 #ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
29 #define V8_X64_LITHIUM_CODEGEN_X64_H_
46 class SafepointGenerator;
48 class LCodeGen:
public LCodeGenBase {
51 : LCodeGenBase(chunk, assembler, info),
52 deoptimizations_(4, info->zone()),
53 jump_table_(4, info->zone()),
54 deoptimization_literals_(8, info->zone()),
55 inlined_function_count_(0),
56 scope_(info->scope()),
57 translations_(info->zone()),
58 deferred_(8, info->zone()),
60 frame_is_built_(
false),
61 safepoints_(info->zone()),
63 expected_safepoint_kind_(Safepoint::kSimple) {
64 PopulateDeoptimizationLiteralsWithInlinedFunctions();
68 return chunk()->LookupDestination(block_id);
76 return GetStackSlotCount() > 0 ||
77 info()->is_non_deferred_calling() ||
79 info()->requires_frame();
92 Smi*
ToSmi(LConstantOperand* op)
const;
93 double ToDouble(LConstantOperand* op)
const;
94 ExternalReference ToExternalReference(LConstantOperand* op)
const;
109 void DoDeferredNumberTagU(LNumberTagU* instr);
128 #define DECLARE_DO(type) void Do##type(L##type* node);
135 LPlatformChunk* chunk()
const {
return chunk_; }
136 Scope* scope()
const {
return scope_; }
137 HGraph* graph()
const {
return chunk()->graph(); }
139 XMMRegister double_scratch0()
const {
return xmm0; }
141 void EmitClassOfTest(Label* if_true,
143 Handle<String> class_name,
148 int GetStackSlotCount()
const {
return chunk()->spill_slot_count(); }
155 void SaveCallerDoubles();
156 void RestoreCallerDoubles();
160 void GenerateBodyInstructionPre(LInstruction* instr)
V8_OVERRIDE;
161 void GenerateBodyInstructionPost(LInstruction* instr)
V8_OVERRIDE;
162 bool GeneratePrologue();
163 bool GenerateDeferredCode();
164 bool GenerateJumpTable();
165 bool GenerateSafepointTable();
168 void GenerateOsrPrologue();
171 RECORD_SIMPLE_SAFEPOINT,
172 RECORD_SAFEPOINT_WITH_REGISTERS
175 void CallCodeGeneric(Handle<Code> code,
176 RelocInfo::Mode
mode,
178 SafepointMode safepoint_mode,
182 void CallCode(Handle<Code> code,
183 RelocInfo::Mode
mode,
184 LInstruction* instr);
186 void CallRuntime(
const Runtime::Function*
function,
193 LInstruction* instr) {
195 CallRuntime(
function, num_arguments, instr);
203 void LoadContextFromDeferred(LOperand* context);
212 void CallKnownFunction(Handle<JSFunction>
function,
218 void RecordSafepointWithLazyDeopt(LInstruction* instr,
219 SafepointMode safepoint_mode,
221 void RegisterEnvironmentForDeoptimization(
LEnvironment* environment,
222 Safepoint::DeoptMode
mode);
229 bool DeoptEveryNTimes() {
230 return FLAG_deopt_every_n_times != 0 && !
info()->IsStub();
234 Translation* translation,
238 int* object_index_pointer,
239 int* dematerialized_index_pointer);
240 void PopulateDeoptimizationData(Handle<Code> code);
241 int DefineDeoptimizationLiteral(Handle<Object> literal);
243 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
247 Operand BuildFastArrayOperand(
248 LOperand* elements_pointer,
252 uint32_t additional_index = 0);
254 Operand BuildSeqStringOperand(Register
string,
258 void EmitIntegerMathAbs(LMathAbs* instr);
259 void EmitSmiMathAbs(LMathAbs* instr);
262 void RecordSafepoint(LPointerMap* pointers,
263 Safepoint::Kind kind,
265 Safepoint::DeoptMode
mode);
266 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode
mode);
267 void RecordSafepoint(Safepoint::DeoptMode
mode);
268 void RecordSafepointWithRegisters(LPointerMap* pointers,
270 Safepoint::DeoptMode
mode);
271 void RecordAndWritePosition(
int position)
V8_OVERRIDE;
274 void EmitGoto(
int block);
277 template<
class InstrType>
279 template<
class InstrType>
280 void EmitFalseBranch(InstrType instr,
Condition cc);
281 void EmitNumberUntagD(
284 bool allow_undefined_as_nan,
285 bool deoptimize_on_minus_zero,
292 Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
298 Label* is_not_object,
306 Label* is_not_string,
311 void EmitIsConstructCall(Register temp);
315 void EmitPushTaggedOperand(LOperand* operand);
319 void EmitDeepCopy(Handle<JSObject>
object,
325 void EnsureSpaceForLazyDeopt(
int space_needed)
V8_OVERRIDE;
326 void DoLoadKeyedExternalArray(LLoadKeyed* instr);
327 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
328 void DoLoadKeyedFixedArray(LLoadKeyed* instr);
329 void DoStoreKeyedExternalArray(LStoreKeyed* instr);
330 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
331 void DoStoreKeyedFixedArray(LStoreKeyed* instr);
337 void MakeSureStackPagesMapped(
int offset);
340 ZoneList<LEnvironment*> deoptimizations_;
341 ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
342 ZoneList<Handle<Object> > deoptimization_literals_;
343 int inlined_function_count_;
345 TranslationBuffer translations_;
346 ZoneList<LDeferredCode*> deferred_;
348 bool frame_is_built_;
352 SafepointTableBuilder safepoints_;
357 Safepoint::Kind expected_safepoint_kind_;
361 explicit PushSafepointRegistersScope(
LCodeGen* codegen)
362 : codegen_(codegen) {
363 ASSERT(codegen_->info()->is_calling());
364 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
365 codegen_->masm_->PushSafepointRegisters();
366 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
369 ~PushSafepointRegistersScope() {
370 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
371 codegen_->masm_->PopSafepointRegisters();
372 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
386 class LDeferredCode:
public ZoneObject {
390 external_exit_(
NULL),
391 instruction_index_(codegen->current_instruction_) {
392 codegen->AddDeferredCode(
this);
401 Label*
exit() {
return external_exit_ !=
NULL ? external_exit_ : &exit_; }
414 Label* external_exit_;
415 int instruction_index_;
420 #endif // V8_X64_LITHIUM_CODEGEN_X64_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void FinishCode(Handle< Code > code)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
void SetExit(Label *exit)
Smi * ToSmi(LConstantOperand *op) const
bool NeedsEagerFrame() const
LDeferredCode(LCodeGen *codegen)
static const Function * FunctionForId(FunctionId id)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
LCodeGen * codegen() const
MacroAssembler * masm() const
int instruction_index() const
bool IsDehoistedKeyConstant(LConstantOperand *op) const
bool IsInteger32Constant(LConstantOperand *op) const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
void DoDeferredStackCheck(LStackCheck *instr)
int LookupDestination(int block_id) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
friend class LEnvironment
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
int32_t ToInteger32(LConstantOperand *op) const
Handle< Object > ToHandle(LConstantOperand *op) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
bool IsSmiConstant(LConstantOperand *op) const
bool NeedsDeferredFrame() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
virtual void Generate()=0
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
friend class SafepointGenerator
friend class LGapResolver
friend class LDeferredCode