32 #if V8_TARGET_ARCH_IA32
35 #elif V8_TARGET_ARCH_X64
38 #elif V8_TARGET_ARCH_ARM
41 #elif V8_TARGET_ARCH_MIPS
44 #elif V8_TARGET_ARCH_ARM64
48 #error "Unknown architecture."
73 const char* register_name =
75 stream->
Add(
"(=%s)", register_name);
80 const char* double_register_name =
82 stream->
Add(
"(=%s)", double_register_name);
100 stream->
Add(
"[constant:%d]",
index());
106 stream->
Add(
"[double_stack:%d]",
index());
118 template<LOperand::Kind kOperandKind,
int kNumCachedOperands>
119 LSubKindOperand<kOperandKind, kNumCachedOperands>*
120 LSubKindOperand<kOperandKind, kNumCachedOperands>::cache =
NULL;
123 template<LOperand::Kind kOperandKind,
int kNumCachedOperands>
124 void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
126 cache =
new LSubKindOperand[kNumCachedOperands];
127 for (
int i = 0; i < kNumCachedOperands; i++) {
128 cache[i].ConvertTo(kOperandKind, i);
133 template<LOperand::Kind kOperandKind,
int kNumCachedOperands>
134 void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
140 #define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
142 #undef LITHIUM_OPERAND_SETUP
147 #define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
149 #undef LITHIUM_OPERAND_TEARDOWN
153 bool LParallelMove::IsRedundant()
const {
154 for (
int i = 0; i < move_operands_.length(); ++i) {
155 if (!move_operands_[i].IsRedundant())
return false;
161 void LParallelMove::PrintDataTo(StringStream* stream)
const {
163 for (
int i = 0; i < move_operands_.length(); ++i) {
164 if (!move_operands_[i].IsEliminated()) {
165 LOperand* source = move_operands_[i].source();
166 LOperand* destination = move_operands_[i].destination();
167 if (!first) stream->Add(
" ");
169 if (source->Equals(destination)) {
170 destination->PrintTo(stream);
172 destination->PrintTo(stream);
174 source->PrintTo(stream);
182 void LEnvironment::PrintTo(StringStream* stream) {
183 stream->Add(
"[id=%d|", ast_id().ToInt());
184 if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
185 stream->Add(
"deopt_id=%d|", deoptimization_index());
187 stream->Add(
"parameters=%d|", parameter_count());
188 stream->Add(
"arguments_stack_height=%d|", arguments_stack_height());
189 for (
int i = 0; i < values_.length(); ++i) {
190 if (i != 0) stream->Add(
";");
191 if (values_[i] ==
NULL) {
192 stream->Add(
"[hole]");
194 values_[i]->PrintTo(stream);
201 void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
203 if (op->IsStackSlot() && op->index() < 0)
return;
204 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
205 pointer_operands_.Add(op, zone);
209 void LPointerMap::RemovePointer(LOperand* op) {
211 if (op->IsStackSlot() && op->index() < 0)
return;
212 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
213 for (
int i = 0; i < pointer_operands_.length(); ++i) {
214 if (pointer_operands_[i]->Equals(op)) {
215 pointer_operands_.Remove(i);
222 void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
224 if (op->IsStackSlot() && op->index() < 0)
return;
225 ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
226 untagged_operands_.Add(op, zone);
230 void LPointerMap::PrintTo(StringStream* stream) {
232 for (
int i = 0; i < pointer_operands_.length(); ++i) {
233 if (i != 0) stream->Add(
";");
234 pointer_operands_[i]->PrintTo(stream);
254 : spill_slot_count_(0),
257 instructions_(32, graph->zone()),
258 pointer_maps_(8, graph->zone()),
259 inlined_closures_(1, graph->zone()) {
264 HBasicBlock* block = graph_->blocks()->at(block_id);
265 int first_instruction = block->first_instruction_index();
266 return LLabel::cast(instructions_[first_instruction]);
272 while (cur->replacement() !=
NULL) {
273 cur = cur->replacement();
275 return cur->block_id();
280 ASSERT(!label->HasReplacement());
281 return label->label();
286 LPhase phase(
"L_Mark empty blocks",
this);
287 for (
int i = 0; i <
graph()->blocks()->length(); ++i) {
288 HBasicBlock* block =
graph()->blocks()->at(i);
289 int first = block->first_instruction_index();
290 int last = block->last_instruction_index();
294 LLabel* label = LLabel::cast(first_instr);
295 if (last_instr->IsGoto()) {
296 LGoto* goto_instr = LGoto::cast(last_instr);
297 if (label->IsRedundant() &&
298 !label->is_loop_header()) {
299 bool can_eliminate =
true;
300 for (
int i = first + 1; i < last && can_eliminate; ++i) {
305 can_eliminate =
false;
308 can_eliminate =
false;
312 label->set_replacement(
GetLabel(goto_instr->block_id()));
321 LInstructionGap* gap =
new(graph_->zone()) LInstructionGap(block);
325 instructions_.Add(gap,
zone());
326 index = instructions_.length();
327 instructions_.Add(instr,
zone());
329 index = instructions_.length();
330 instructions_.Add(instr,
zone());
331 instructions_.Add(gap,
zone());
341 return LConstantOperand::Create(constant->id(),
zone());
360 return (1 +
info()->scope()->num_parameters() - index) *
371 return instructions_[index]->IsGap();
376 while (!
IsGapAt(index)) index--;
388 return HConstant::cast(graph_->LookupValue(operand->index()));
393 LConstantOperand* operand)
const {
394 return graph_->LookupValue(operand->index())->representation();
401 graph->DisallowAddingNewValues();
402 int values = graph->GetMaximumValueID();
408 LAllocator allocator(values, graph);
409 LChunkBuilder builder(info, graph, &allocator);
410 LChunk* chunk = builder.Build();
413 if (!allocator.Allocate(chunk)) {
419 allocator.assigned_double_registers());
428 CodeStartLinePosInfoRecordEvent(
440 code->set_is_crankshafted(
true);
441 void* jit_handler_data =
444 CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
455 allocated_double_registers_ = allocated_registers;
457 BitVector::Iterator iterator(doubles);
458 while (!iterator.Done()) {
471 LEnvironment* LChunkBuilderBase::CreateEnvironment(
472 HEnvironment* hydrogen_env,
473 int* argument_index_accumulator,
475 if (hydrogen_env ==
NULL)
return NULL;
477 LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
478 argument_index_accumulator,
479 objects_to_materialize);
480 BailoutId ast_id = hydrogen_env->ast_id();
481 ASSERT(!ast_id.IsNone() ||
483 int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
484 LEnvironment* result =
485 new(zone()) LEnvironment(hydrogen_env->closure(),
486 hydrogen_env->frame_type(),
488 hydrogen_env->parameter_count(),
492 hydrogen_env->entry(),
494 int argument_index = *argument_index_accumulator;
498 for (
int i = 0; i < hydrogen_env->length(); ++i) {
499 if (hydrogen_env->is_special_index(i))
continue;
502 HValue* value = hydrogen_env->values()->at(i);
503 CHECK(!value->IsPushArgument());
504 if (value->IsArgumentsObject() || value->IsCapturedObject()) {
505 op = LEnvironment::materialization_marker();
515 for (
int i = 0; i < hydrogen_env->length(); ++i) {
516 if (hydrogen_env->is_special_index(i))
continue;
518 HValue* value = hydrogen_env->values()->at(i);
519 if (value->IsArgumentsObject() || value->IsCapturedObject()) {
520 AddObjectToMaterialize(value, objects_to_materialize, result);
525 *argument_index_accumulator = argument_index;
553 void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
554 ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
555 int object_index = objects_to_materialize->length();
557 objects_to_materialize->Add(value, zone());
559 int previously_materialized_object = -1;
560 for (
int prev = 0; prev < object_index; ++prev) {
561 if (objects_to_materialize->at(prev) == value) {
562 previously_materialized_object = prev;
568 int length = value->OperandCount();
569 bool is_arguments = value->IsArgumentsObject();
570 if (previously_materialized_object >= 0) {
571 result->AddDuplicateObject(previously_materialized_object);
574 result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
577 for (
int i = is_arguments ? 1 : 0; i < length; ++i) {
579 HValue* arg_value = value->OperandAt(i);
580 if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
582 op = LEnvironment::materialization_marker();
584 ASSERT(!arg_value->IsPushArgument());
587 op = UseAny(arg_value);
590 arg_value->representation(),
594 for (
int i = is_arguments ? 1 : 0; i < length; ++i) {
595 HValue* arg_value = value->OperandAt(i);
596 if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
597 AddObjectToMaterialize(arg_value, objects_to_materialize, result);
603 LInstruction* LChunkBuilder::CheckElideControlInstruction(
604 HControlInstruction* instr) {
605 HBasicBlock* successor;
606 if (!instr->KnownSuccessorBlock(&successor))
return NULL;
607 return new(zone()) LGoto(successor);
612 if (ShouldProduceTraceOutput()) {
613 isolate()->GetHTracer()->TraceLithium(
name(), chunk_);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void FinishCode(Handle< Code > code)
static LUnallocated * cast(LOperand *op)
static LGap * cast(LInstruction *instr)
int GetParameterStackSlot(int index) const
BitVector * allocated_double_registers()
int fixed_slot_index() const
static void TearDownCaches()
int StackSlotOffset(int index)
LParallelMove * GetOrCreateParallelMove(InnerPosition pos, Zone *zone)
int ParameterAt(int index)
#define LITHIUM_OPERAND_SETUP(name, type, number)
void set_bailout_reason(BailoutReason reason)
LLabel * GetLabel(int block_id) const
#define ASSERT(condition)
static void SetUpCaches()
int num_parameters() const
#define LITHIUM_OPERAND_TEARDOWN(name, type, number)
LChunk(CompilationInfo *info, HGraph *graph)
Representation representation() const
static const char * AllocationIndexToString(int index)
LGap * GetGapAt(int index) const
static void PrintCode(Handle< Code > code, CompilationInfo *info)
void Add(Vector< const char > format, Vector< FmtElm > elms)
int virtual_register() const
static const int kMaxVirtualRegisters
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
bool saves_caller_doubles() const
Code::Flags flags() const
void set_allocated_double_registers(BitVector *allocated_registers)
BasicPolicy basic_policy() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static LChunk * NewChunk(HGraph *graph)
static void MakeCodePrologue(CompilationInfo *info, const char *kind)
int fixed_register_index() const
bool IsGapAt(int index) const
virtual bool IsControl() const
LPointerMap * pointer_map() const
static const int kFixedFrameSizeFromFp
virtual bool IsGap() const
void AddInstruction(LInstruction *instruction, HBasicBlock *block)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
CompilationInfo * info() const
bool CheckFlag(Flag f) const
static Handle< T > null()
void AddGapMove(int index, LOperand *from, LOperand *to)
Isolate * isolate() const
LConstantOperand * DefineConstantOperand(HConstant *constant)
PositionsRecorder * positions_recorder()
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static const char * AllocationIndexToString(int index)
Representation LookupLiteralRepresentation(LConstantOperand *operand) const
Label * GetAssemblyLabel(int block_id) const
bool HasPointerMap() const
virtual void AbortedCodeGeneration()
int NearestGapPos(int index) const
int LookupDestination(int block_id) const
static Handle< Code > MakeCodeEpilogue(MacroAssembler *masm, Code::Flags flags, CompilationInfo *info)
#define LITHIUM_OPERAND_LIST(V)
#define LOG_CODE_EVENT(isolate, Call)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
HConstant * LookupConstant(LConstantOperand *operand) const
HValue * hydrogen_value() const
void PrintTo(StringStream *stream)
ExtendedPolicy extended_policy() const
const ZoneList< LInstruction * > * instructions() const