28 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
29 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
45 class SafepointGenerator;
46 class BranchGenerator;
48 class LCodeGen:
public LCodeGenBase {
51 : LCodeGenBase(chunk, assembler, info),
52 deoptimizations_(4, info->zone()),
53 deopt_jump_table_(4, info->zone()),
54 deoptimization_literals_(8, info->zone()),
55 inlined_function_count_(0),
56 scope_(info->scope()),
57 translations_(info->zone()),
58 deferred_(8, info->zone()),
60 frame_is_built_(
false),
61 safepoints_(info->zone()),
63 expected_safepoint_kind_(Safepoint::kSimple) {
64 PopulateDeoptimizationLiteralsWithInlinedFunctions();
71 return chunk()->LookupDestination(block_id);
79 return GetStackSlotCount() > 0 ||
80 info()->is_non_deferred_calling() ||
82 info()->requires_frame();
114 bool IsSmi(LConstantOperand* op)
const;
117 Smi*
ToSmi(LConstantOperand* op)
const;
118 double ToDouble(LConstantOperand* op)
const;
122 #define DECLARE_DO(type) void Do##type(L##type* node);
136 void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
138 Label* allocation_entry);
141 void DoDeferredNumberTagU(LInstruction* instr,
156 void EmitGoto(
int block);
157 void DoGap(LGap* instr);
163 template<
class InstrType>
164 void EmitBranchGeneric(InstrType instr,
165 const BranchGenerator& branch);
167 template<
class InstrType>
168 void EmitBranch(InstrType instr,
Condition condition);
170 template<
class InstrType>
171 void EmitCompareAndBranch(InstrType instr,
176 template<
class InstrType>
177 void EmitTestAndBranch(InstrType instr,
179 const Register& value,
182 template<
class InstrType>
183 void EmitBranchIfNonZeroNumber(InstrType instr,
184 const FPRegister& value,
185 const FPRegister& scratch);
187 template<
class InstrType>
188 void EmitBranchIfHeapNumber(InstrType instr,
189 const Register& value);
191 template<
class InstrType>
192 void EmitBranchIfRoot(InstrType instr,
193 const Register& value,
200 void EmitDeepCopy(Handle<JSObject>
object,
210 Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
213 int DefineDeoptimizationLiteral(Handle<Object> literal);
214 void PopulateDeoptimizationData(Handle<Code>
code);
215 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
217 MemOperand BuildSeqStringOperand(Register
string,
221 void DeoptimizeBranch(
223 BranchType branch_type, Register reg = NoReg,
int bit = -1,
228 void DeoptimizeIfZero(Register rt,
LEnvironment* environment);
229 void DeoptimizeIfNotZero(Register rt,
LEnvironment* environment);
230 void DeoptimizeIfNegative(Register rt,
LEnvironment* environment);
231 void DeoptimizeIfSmi(Register rt,
LEnvironment* environment);
232 void DeoptimizeIfNotSmi(Register rt,
LEnvironment* environment);
233 void DeoptimizeIfRoot(Register rt,
236 void DeoptimizeIfNotRoot(Register rt,
240 void DeoptimizeIfBitSet(Register rt,
int bit,
LEnvironment* environment);
241 void DeoptimizeIfBitClear(Register rt,
int bit,
LEnvironment* environment);
244 MemOperand PrepareKeyedExternalArrayOperand(Register key,
248 bool key_is_constant,
251 int additional_index);
252 void CalcKeyedArrayBaseRegister(Register base,
258 void RegisterEnvironmentForDeoptimization(
LEnvironment* environment,
259 Safepoint::DeoptMode
mode);
261 int GetStackSlotCount()
const {
return chunk()->spill_slot_count(); }
271 Translation* translation,
275 int* object_index_pointer,
276 int* dematerialized_index_pointer);
278 void SaveCallerDoubles();
279 void RestoreCallerDoubles();
282 void GenerateBodyInstructionPre(LInstruction* instr)
V8_OVERRIDE;
283 bool GeneratePrologue();
284 bool GenerateDeferredCode();
285 bool GenerateDeoptJumpTable();
286 bool GenerateSafepointTable();
289 void GenerateOsrPrologue();
292 RECORD_SIMPLE_SAFEPOINT,
293 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
296 void CallCode(Handle<Code> code,
297 RelocInfo::Mode
mode,
298 LInstruction* instr);
300 void CallCodeGeneric(Handle<Code> code,
301 RelocInfo::Mode
mode,
303 SafepointMode safepoint_mode);
305 void CallRuntime(
const Runtime::Function*
function,
312 LInstruction* instr) {
314 CallRuntime(
function, num_arguments, instr);
317 void LoadContextFromDeferred(LOperand* context);
327 void CallKnownFunction(Handle<JSFunction>
function,
331 Register function_reg = NoReg);
334 void RecordAndWritePosition(
int position)
V8_OVERRIDE;
335 void RecordSafepoint(LPointerMap* pointers,
336 Safepoint::Kind kind,
338 Safepoint::DeoptMode
mode);
339 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode
mode);
340 void RecordSafepoint(Safepoint::DeoptMode
mode);
341 void RecordSafepointWithRegisters(LPointerMap* pointers,
343 Safepoint::DeoptMode
mode);
344 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
346 Safepoint::DeoptMode
mode);
347 void RecordSafepointWithLazyDeopt(LInstruction* instr,
348 SafepointMode safepoint_mode);
350 void EnsureSpaceForLazyDeopt(
int space_needed)
V8_OVERRIDE;
352 ZoneList<LEnvironment*> deoptimizations_;
353 ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
354 ZoneList<Handle<Object> > deoptimization_literals_;
355 int inlined_function_count_;
357 TranslationBuffer translations_;
358 ZoneList<LDeferredCode*> deferred_;
360 bool frame_is_built_;
364 SafepointTableBuilder safepoints_;
369 Safepoint::Kind expected_safepoint_kind_;
375 PushSafepointRegistersScope(
LCodeGen* codegen,
376 Safepoint::Kind kind)
377 : codegen_(codegen) {
378 ASSERT(codegen_->info()->is_calling());
379 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
380 codegen_->expected_safepoint_kind_ = kind;
382 UseScratchRegisterScope temps(codegen_->masm_);
385 Register to_be_pushed_lr =
387 codegen_->masm_->Mov(to_be_pushed_lr,
lr);
388 switch (codegen_->expected_safepoint_kind_) {
389 case Safepoint::kWithRegisters: {
391 codegen_->masm_->CallStub(&stub);
394 case Safepoint::kWithRegistersAndDoubles: {
396 codegen_->masm_->CallStub(&stub);
404 ~PushSafepointRegistersScope() {
405 Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
406 ASSERT((kind & Safepoint::kWithRegisters) != 0);
408 case Safepoint::kWithRegisters: {
410 codegen_->masm_->CallStub(&stub);
413 case Safepoint::kWithRegistersAndDoubles: {
415 codegen_->masm_->CallStub(&stub);
421 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
434 class LDeferredCode:
public ZoneObject {
438 external_exit_(
NULL),
439 instruction_index_(codegen->current_instruction_) {
440 codegen->AddDeferredCode(
this);
449 Label*
exit() {
return (external_exit_ !=
NULL) ? external_exit_ : &exit_; }
460 Label* external_exit_;
461 int instruction_index_;
475 : codegen_(codegen) { }
479 virtual void Emit(Label* label)
const = 0;
480 virtual void EmitInverted(Label* label)
const = 0;
490 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void FinishCode(Handle< Code > code)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
void SetExit(Label *exit)
Smi * ToSmi(LConstantOperand *op) const
bool NeedsEagerFrame() const
bool IsSmi(LConstantOperand *op) const
MacroAssembler * masm() const
LDeferredCode(LCodeGen *codegen)
static const Function * FunctionForId(FunctionId id)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
LCodeGen * codegen() const
MacroAssembler * masm() const
int instruction_index() const
bool IsInteger32Constant(LConstantOperand *op) const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
void DoDeferredStackCheck(LStackCheck *instr)
int LookupDestination(int block_id) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
friend class LEnvironment
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
int32_t ToInteger32(LConstantOperand *op) const
Handle< Object > ToHandle(LConstantOperand *op) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
double ToDouble(LConstantOperand *op) const
static Register to_be_pushed_lr()
Operand ToOperand32U(LOperand *op)
bool NeedsDeferredFrame() const
virtual ~BranchGenerator()
Operand ToOperand32I(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
LinkRegisterStatus GetLinkRegisterState() const
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
virtual void Generate()=0
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
Register ToRegister32(LOperand *op) const
MemOperand ToMemOperand(LOperand *op) const
BranchGenerator(LCodeGen *codegen)
bool IsNextEmittedBlock(int block_id) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
friend class SafepointGenerator
friend class LGapResolver
friend class LDeferredCode