28 #ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
29 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
44 class SafepointGenerator;
46 class LCodeGen:
public LCodeGenBase {
49 : LCodeGenBase(chunk, assembler, info),
50 deoptimizations_(4, info->zone()),
51 deopt_jump_table_(4, info->zone()),
52 deoptimization_literals_(8, info->zone()),
53 inlined_function_count_(0),
54 scope_(info->scope()),
55 translations_(info->zone()),
56 deferred_(8, info->zone()),
58 frame_is_built_(
false),
59 safepoints_(info->zone()),
61 expected_safepoint_kind_(Safepoint::kSimple) {
62 PopulateDeoptimizationLiteralsWithInlinedFunctions();
67 return chunk()->LookupDestination(block_id);
75 return GetStackSlotCount() > 0 ||
76 info()->is_non_deferred_calling() ||
78 info()->requires_frame();
104 Smi*
ToSmi(LConstantOperand* op)
const;
105 double ToDouble(LConstantOperand* op)
const;
112 bool IsSmi(LConstantOperand* op)
const;
150 bool key_is_constant,
154 int additional_index,
155 int additional_offset);
161 #define DECLARE_DO(type) void Do##type(L##type* node);
168 Scope* scope()
const {
return scope_; }
174 LInstruction* GetNextInstruction();
176 void EmitClassOfTest(Label* if_true,
178 Handle<String> class_name,
181 Register temporary2);
183 int GetStackSlotCount()
const {
return chunk()->spill_slot_count(); }
189 void SaveCallerDoubles();
190 void RestoreCallerDoubles();
194 void GenerateBodyInstructionPre(LInstruction* instr)
V8_OVERRIDE;
195 bool GeneratePrologue();
196 bool GenerateDeferredCode();
197 bool GenerateDeoptJumpTable();
198 bool GenerateSafepointTable();
201 void GenerateOsrPrologue();
204 RECORD_SIMPLE_SAFEPOINT,
205 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
208 void CallCode(Handle<Code> code,
209 RelocInfo::Mode
mode,
210 LInstruction* instr);
212 void CallCodeGeneric(Handle<Code> code,
213 RelocInfo::Mode
mode,
215 SafepointMode safepoint_mode);
217 void CallRuntime(
const Runtime::Function*
function,
224 LInstruction* instr) {
226 CallRuntime(
function, num_arguments, instr);
229 void LoadContextFromDeferred(LOperand* context);
242 void CallKnownFunction(Handle<JSFunction>
function,
248 void RecordSafepointWithLazyDeopt(LInstruction* instr,
249 SafepointMode safepoint_mode);
251 void RegisterEnvironmentForDeoptimization(
LEnvironment* environment,
252 Safepoint::DeoptMode
mode);
256 Register src1 = zero_reg,
257 const Operand& src2 = Operand(zero_reg));
260 Register src1 = zero_reg,
261 const Operand& src2 = Operand(zero_reg));
264 Register src1 = zero_reg,
265 const Operand& src2 = Operand(zero_reg));
268 Translation* translation,
272 int* object_index_pointer,
273 int* dematerialized_index_pointer);
274 void PopulateDeoptimizationData(Handle<Code> code);
275 int DefineDeoptimizationLiteral(Handle<Object> literal);
277 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
282 MemOperand BuildSeqStringOperand(Register
string,
286 void EmitIntegerMathAbs(LMathAbs* instr);
289 void RecordSafepoint(LPointerMap* pointers,
290 Safepoint::Kind kind,
292 Safepoint::DeoptMode
mode);
293 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode
mode);
294 void RecordSafepoint(Safepoint::DeoptMode
mode);
295 void RecordSafepointWithRegisters(LPointerMap* pointers,
297 Safepoint::DeoptMode
mode);
298 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
300 Safepoint::DeoptMode
mode);
302 void RecordAndWritePosition(
int position)
V8_OVERRIDE;
305 void EmitGoto(
int block);
308 template<
class InstrType>
309 void EmitBranch(InstrType instr,
312 const Operand& src2);
313 template<
class InstrType>
314 void EmitBranchF(InstrType instr,
318 template<
class InstrType>
319 void EmitFalseBranch(InstrType instr,
322 const Operand& src2);
323 template<
class InstrType>
324 void EmitFalseBranchF(InstrType instr,
328 void EmitCmpI(LOperand* left, LOperand* right);
329 void EmitNumberUntagD(Register input,
331 bool allow_undefined_as_nan,
332 bool deoptimize_on_minus_zero,
341 Condition EmitTypeofIs(Label* true_label,
344 Handle<String> type_name,
354 Label* is_not_object,
362 Label* is_not_string,
367 void EmitIsConstructCall(Register temp1, Register temp2);
371 void EmitDeepCopy(Handle<JSObject>
object,
380 void EmitSignedIntegerDivisionByConstant(Register result,
388 void EnsureSpaceForLazyDeopt(
int space_needed)
V8_OVERRIDE;
389 void DoLoadKeyedExternalArray(LLoadKeyed* instr);
390 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
391 void DoLoadKeyedFixedArray(LLoadKeyed* instr);
392 void DoStoreKeyedExternalArray(LStoreKeyed* instr);
393 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
394 void DoStoreKeyedFixedArray(LStoreKeyed* instr);
396 ZoneList<LEnvironment*> deoptimizations_;
397 ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
398 ZoneList<Handle<Object> > deoptimization_literals_;
399 int inlined_function_count_;
401 TranslationBuffer translations_;
402 ZoneList<LDeferredCode*> deferred_;
404 bool frame_is_built_;
408 SafepointTableBuilder safepoints_;
413 Safepoint::Kind expected_safepoint_kind_;
417 PushSafepointRegistersScope(
LCodeGen* codegen,
418 Safepoint::Kind kind)
419 : codegen_(codegen) {
420 ASSERT(codegen_->info()->is_calling());
421 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
422 codegen_->expected_safepoint_kind_ = kind;
424 switch (codegen_->expected_safepoint_kind_) {
425 case Safepoint::kWithRegisters: {
427 codegen_->masm_->push(ra);
428 codegen_->masm_->CallStub(&stub1);
431 case Safepoint::kWithRegistersAndDoubles: {
433 codegen_->masm_->push(ra);
434 codegen_->masm_->CallStub(&stub2);
442 ~PushSafepointRegistersScope() {
443 Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
444 ASSERT((kind & Safepoint::kWithRegisters) != 0);
446 case Safepoint::kWithRegisters: {
448 codegen_->masm_->push(ra);
449 codegen_->masm_->CallStub(&stub1);
452 case Safepoint::kWithRegistersAndDoubles: {
454 codegen_->masm_->push(ra);
455 codegen_->masm_->CallStub(&stub2);
461 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
475 class LDeferredCode :
public ZoneObject {
479 external_exit_(
NULL),
480 instruction_index_(codegen->current_instruction_) {
481 codegen->AddDeferredCode(
this);
490 Label*
exit() {
return external_exit_ !=
NULL ? external_exit_ : &exit_; }
501 Label* external_exit_;
502 int instruction_index_;
507 #endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define kLithiumScratchReg2
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void FinishCode(Handle< Code > code)
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
void SetExit(Label *exit)
Smi * ToSmi(LConstantOperand *op) const
bool NeedsEagerFrame() const
Register EmitLoadRegister(LOperand *op, Register scratch)
bool IsSmi(LConstantOperand *op) const
LDeferredCode(LCodeGen *codegen)
static const Function * FunctionForId(FunctionId id)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
#define ASSERT(condition)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
LCodeGen * codegen() const
MacroAssembler * masm() const
#define kLithiumScratchReg
int instruction_index() const
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
void DoDeferredStackCheck(LStackCheck *instr)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
int LookupDestination(int block_id) const
Operand ToOperand(LOperand *op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
friend class LEnvironment
#define kLithiumScratchDouble
void DoDeferredNumberTagD(LNumberTagD *instr)
void check(i::Vector< const uint8_t > string)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void DoDeferredAllocate(LAllocate *instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
int32_t ToInteger32(LConstantOperand *op) const
bool IsInteger32(LConstantOperand *op) const
Handle< Object > ToHandle(LConstantOperand *op) const
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoParallelMove(LParallelMove *move)
double ToDouble(LConstantOperand *op) const
bool NeedsDeferredFrame() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void WriteTranslation(LEnvironment *environment, Translation *translation)
virtual LInstruction * instr()=0
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int additional_index, int additional_offset)
kInstanceClassNameOffset kNeedsAccessCheckBit kRemovePrototypeBit kIsExpressionBit kAllowLazyCompilation kUsesArguments formal_parameter_count
virtual void Generate()=0
Register ToRegister(LOperand *op) const
void DoDeferredTaggedToI(LTaggedToI *instr)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
MemOperand ToMemOperand(LOperand *op) const
bool IsNextEmittedBlock(int block_id) const
RAStatus GetRAState() const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
friend class SafepointGenerator
friend class LGapResolver
friend class LDeferredCode