28 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
29 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_
45 class SafepointGenerator;
50 : zone_(info->zone()),
55 current_instruction_(-1),
57 deoptimizations_(4, info->zone()),
58 deoptimization_literals_(8, info->zone()),
59 inlined_function_count_(0),
60 scope_(info->scope()),
62 translations_(info->zone()),
63 deferred_(8, info->zone()),
64 dynamic_frame_alignment_(
false),
66 last_lazy_deopt_pc_(0),
67 safepoints_(info->zone()),
69 expected_safepoint_kind_(Safepoint::kSimple) {
70 PopulateDeoptimizationLiteralsWithInlinedFunctions();
78 Heap*
heap()
const {
return isolate()->heap(); }
82 Operand ToOperand(
LOperand* op)
const;
112 IntegerSignedness signedness);
117 void DoDeferredRandom(
LRandom* instr);
129 void DoGap(
LGap* instr);
133 Translation* translation,
134 int* arguments_index,
135 int* arguments_count);
137 void EnsureRelocSpaceForDeoptimization();
140 #define DECLARE_DO(type) void Do##type(L##type* node);
152 bool is_unused()
const {
return status_ == UNUSED; }
153 bool is_generating()
const {
return status_ == GENERATING; }
154 bool is_done()
const {
return status_ ==
DONE; }
155 bool is_aborted()
const {
return status_ == ABORTED; }
161 LPlatformChunk* chunk()
const {
return chunk_; }
162 Scope* scope()
const {
return scope_; }
163 HGraph* graph()
const {
return chunk_->graph(); }
165 int GetNextEmittedBlock(
int block);
167 void EmitClassOfTest(Label* if_true,
169 Handle<String> class_name,
172 Register temporary2);
174 int GetStackSlotCount()
const {
return chunk()->spill_slot_count(); }
175 int GetParameterCount()
const {
return scope()->num_parameters(); }
177 void Abort(
const char* reason);
178 void Comment(
const char* format, ...);
180 void AddDeferredCode(LDeferredCode*
code) { deferred_.Add(code, zone()); }
184 bool GeneratePrologue();
186 bool GenerateDeferredCode();
189 bool GenerateRelocPadding();
190 bool GenerateSafepointTable();
193 RECORD_SIMPLE_SAFEPOINT,
194 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
197 void CallCode(Handle<Code> code,
198 RelocInfo::Mode mode,
199 LInstruction* instr);
201 void CallCodeGeneric(Handle<Code> code,
202 RelocInfo::Mode mode,
204 SafepointMode safepoint_mode);
206 void CallRuntime(
const Runtime::Function* fun,
208 LInstruction* instr);
212 LInstruction* instr) {
214 CallRuntime(
function, argc, instr);
229 void CallKnownFunction(Handle<JSFunction>
function,
235 void RecordSafepointWithLazyDeopt(LInstruction* instr,
236 SafepointMode safepoint_mode);
238 void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
239 Safepoint::DeoptMode mode);
240 void DeoptimizeIf(
Condition cc, LEnvironment* environment);
242 void AddToTranslation(Translation* translation,
247 int arguments_count);
248 void PopulateDeoptimizationData(Handle<Code> code);
249 int DefineDeoptimizationLiteral(Handle<Object> literal);
251 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
254 XMMRegister ToDoubleRegister(
int index)
const;
255 int ToInteger32(LConstantOperand* op)
const;
257 double ToDouble(LConstantOperand* op)
const;
258 Operand BuildFastArrayOperand(LOperand* elements_pointer,
260 Representation key_representation,
263 uint32_t additional_index = 0);
266 void EmitIntegerMathAbs(LUnaryMathOperation* instr);
267 void DoMathAbs(LUnaryMathOperation* instr);
268 void DoMathFloor(LUnaryMathOperation* instr);
269 void DoMathRound(LUnaryMathOperation* instr);
270 void DoMathSqrt(LUnaryMathOperation* instr);
271 void DoMathLog(LUnaryMathOperation* instr);
272 void DoMathTan(LUnaryMathOperation* instr);
273 void DoMathCos(LUnaryMathOperation* instr);
274 void DoMathSin(LUnaryMathOperation* instr);
277 void RecordSafepoint(LPointerMap* pointers,
278 Safepoint::Kind kind,
280 Safepoint::DeoptMode mode);
281 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
282 void RecordSafepoint(Safepoint::DeoptMode mode);
283 void RecordSafepointWithRegisters(LPointerMap* pointers,
285 Safepoint::DeoptMode mode);
286 void RecordPosition(
int position);
289 void EmitGoto(
int block);
290 void EmitBranch(
int left_block,
int right_block,
Condition cc);
291 void EmitNumberUntagD(Register input,
294 bool deoptimize_on_undefined,
295 bool deoptimize_on_minus_zero,
298 void DeoptIfTaggedButNotSmi(LEnvironment* environment,
305 Condition EmitTypeofIs(Label* true_label,
308 Handle<String> type_name);
315 Label* is_not_object,
323 Label* is_not_string);
327 void EmitIsConstructCall(Register temp);
329 void EmitLoadFieldOrConstantFunction(Register result,
337 void EmitDeepCopy(Handle<JSObject>
object,
342 void EnsureSpaceForLazyDeopt();
346 void EmitPushTaggedOperand(LOperand* operand);
349 LPlatformChunk*
const chunk_;
350 MacroAssembler*
const masm_;
351 CompilationInfo*
const info_;
354 int current_instruction_;
355 const ZoneList<LInstruction*>* instructions_;
356 ZoneList<LEnvironment*> deoptimizations_;
357 ZoneList<Handle<Object> > deoptimization_literals_;
358 int inlined_function_count_;
361 TranslationBuffer translations_;
362 ZoneList<LDeferredCode*> deferred_;
363 bool dynamic_frame_alignment_;
365 int last_lazy_deopt_pc_;
369 SafepointTableBuilder safepoints_;
372 LGapResolver resolver_;
374 Safepoint::Kind expected_safepoint_kind_;
378 explicit PushSafepointRegistersScope(LCodeGen* codegen)
379 : codegen_(codegen) {
380 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
381 codegen_->masm_->PushSafepointRegisters();
382 codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
385 ~PushSafepointRegistersScope() {
386 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
387 codegen_->masm_->PopSafepointRegisters();
388 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
395 friend class LDeferredCode;
396 friend class LEnvironment;
397 friend class SafepointGenerator;
402 class LDeferredCode:
public ZoneObject {
406 external_exit_(
NULL),
407 instruction_index_(codegen->current_instruction_) {
408 codegen->AddDeferredCode(
this);
417 Label*
exit() {
return external_exit_ !=
NULL ? external_exit_ : &exit_; }
421 LCodeGen*
codegen()
const {
return codegen_; }
428 Label* external_exit_;
429 int instruction_index_;
434 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
void SetExit(Label *exit)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
MacroAssembler * masm() const
LDeferredCode(LCodeGen *codegen)
static const Function * FunctionForId(FunctionId id)
#define ASSERT(condition)
LCodeGen * codegen() const
MacroAssembler * masm() const
int instruction_index() const
bool is_uint32(int64_t x)
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Immediate ToInteger32Immediate(LOperand *op) const
static LConstantOperand * cast(LOperand *op)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
activate correct semantics for inheriting readonliness false
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
Isolate * isolate() const
virtual LInstruction * instr()=0
virtual void Generate()=0
CompilationInfo * info() const
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Register ToRegister(int num)
Factory * factory() const