38 #define LITHIUM_OPERAND_LIST(V) \
39 V(ConstantOperand, CONSTANT_OPERAND, 128) \
40 V(StackSlot, STACK_SLOT, 128) \
41 V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
42 V(Register, REGISTER, 16) \
43 V(DoubleRegister, DOUBLE_REGISTER, 16)
62 #define LITHIUM_OPERAND_PREDICATE(name, type, number) \
63 bool Is##name() const { return kind() == type; }
67 #undef LITHIUM_OPERAND_PREDICATE
157 ASSERT(op->IsUnallocated());
275 : source_(source), destination_(destination) {
287 return destination_ ==
NULL && source_ !=
NULL;
292 return !IsEliminated() && source()->Equals(operand);
298 return IsEliminated() || source_->Equals(destination_) || IsIgnored();
302 return destination_ !=
NULL && destination_->IsIgnored();
309 return source_ ==
NULL;
318 template<LOperand::Kind kOperandKind,
int kNumCachedOperands>
319 class LSubKindOperand
V8_FINAL :
public LOperand {
323 if (index < kNumCachedOperands)
return &cache[index];
324 return new(zone) LSubKindOperand(index);
329 return reinterpret_cast<LSubKindOperand*
>(op);
332 static void SetUpCache();
333 static void TearDownCache();
336 static LSubKindOperand* cache;
339 explicit LSubKindOperand(
int index) : LOperand(kOperandKind, index) { }
343 #define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
344 typedef LSubKindOperand<LOperand::type, number> L##name;
346 #undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
349 class LParallelMove
V8_FINAL :
public ZoneObject {
354 move_operands_.Add(LMoveOperands(from, to), zone);
357 bool IsRedundant()
const;
360 return &move_operands_;
370 class LPointerMap
V8_FINAL :
public ZoneObject {
373 : pointer_operands_(8, zone),
374 untagged_operands_(0, zone),
375 lithium_position_(-1) { }
378 for (
int i = 0; i < untagged_operands_.length(); ++i) {
379 RemovePointer(untagged_operands_[i]);
381 untagged_operands_.Clear();
382 return &pointer_operands_;
387 ASSERT(lithium_position_ == -1);
388 lithium_position_ = pos;
399 int lithium_position_;
403 class LEnvironment
V8_FINAL :
public ZoneObject {
412 HEnterInlined* entry,
415 frame_type_(frame_type),
416 arguments_stack_height_(argument_count),
417 deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
418 translation_index_(-1),
420 translation_size_(value_count),
421 parameter_count_(parameter_count),
423 values_(value_count, zone),
424 is_tagged_(value_count, zone),
425 is_uint32_(value_count, zone),
426 object_mapping_(0, zone),
441 LEnvironment*
outer()
const {
return outer_; }
442 HEnterInlined*
entry() {
return entry_; }
448 values_.Add(operand, zone());
451 is_tagged_.Add(values_.length() - 1, zone());
455 is_uint32_.Add(values_.length() - 1, zone());
460 return is_tagged_.Contains(index);
464 return is_uint32_.Contains(index);
468 uint32_t encoded = LengthOrDupeField::encode(length) |
469 IsArgumentsField::encode(is_arguments) |
470 IsDuplicateField::encode(
false);
471 object_mapping_.Add(encoded, zone());
475 uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
476 IsDuplicateField::encode(
true);
477 object_mapping_.Add(encoded, zone());
481 ASSERT(ObjectIsDuplicateAt(index));
482 return LengthOrDupeField::decode(object_mapping_[index]);
486 ASSERT(!ObjectIsDuplicateAt(index));
487 return LengthOrDupeField::decode(object_mapping_[index]);
491 ASSERT(!ObjectIsDuplicateAt(index));
492 return IsArgumentsField::decode(object_mapping_[index]);
496 return IsDuplicateField::decode(object_mapping_[index]);
500 int translation_index,
502 ASSERT(!HasBeenRegistered());
503 deoptimization_index_ = deoptimization_index;
504 translation_index_ = translation_index;
505 pc_offset_ = pc_offset;
508 return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
524 int arguments_stack_height_;
525 int deoptimization_index_;
526 int translation_index_;
528 int translation_size_;
529 int parameter_count_;
535 GrowableBitVector is_tagged_;
536 GrowableBitVector is_uint32_;
541 LEnvironment* outer_;
542 HEnterInlined* entry_;
552 limit_(env !=
NULL ? env->values()->length() : 0),
562 return env_->values()->at(
current_);
571 LEnvironment*
env() {
return env_; }
575 return op ==
NULL || op->IsConstantOperand();
579 void SkipUninteresting() {
595 : current_iterator_(env) {
599 bool Done() {
return current_iterator_.Done(); }
602 ASSERT(!current_iterator_.Done());
604 return current_iterator_.Current();
608 current_iterator_.Advance();
613 void SkipUninteresting() {
614 while (current_iterator_.env() !=
NULL && current_iterator_.Done()) {
615 current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
619 ShallowIterator current_iterator_;
623 class LPlatformChunk;
642 HGraph*
graph()
const {
return graph_; }
651 LLabel*
GetLabel(
int block_id)
const;
656 return &inlined_closures_;
660 inlined_closures_.Add(closure,
zone());
669 return allocated_double_registers_;
679 HGraph*
const graph_;
690 : argument_count_(0),
700 LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
701 int* argument_index_accumulator,
703 void AddObjectToMaterialize(
HValue* value,
705 LEnvironment* result);
727 : CompilationPhase(name, chunk->
info()),
734 DISALLOW_COPY_AND_ASSIGN(
LPhase);
740 #endif // V8_LITHIUM_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static LUnallocated * cast(LOperand *op)
void set_lithium_position(int pos)
#define LITHIUM_OPERAND_PREDICATE(name, type, number)
int GetParameterStackSlot(int index) const
BitVector * allocated_double_registers()
DeepIterator(LEnvironment *env)
STATIC_ASSERT(kKindFieldWidth==3)
int fixed_slot_index() const
int translation_size() const
static void TearDownCaches()
int StackSlotOffset(int index)
bool HasFixedSlotPolicy() const
static const uint32_t kSize
static LOperand * materialization_marker()
void AddDuplicateObject(int dupe_of)
const ZoneList< LOperand * > * GetNormalizedOperands()
int ParameterAt(int index)
LOperand(Kind kind, int index)
LChunkBuilderBase(Zone *zone)
LUnallocated * CopyUnconstrained(Zone *zone)
bool HasFixedRegisterPolicy() const
bool HasUint32ValueAt(int index) const
LPhase(const char *name, LChunk *chunk)
LEnvironment(Handle< JSFunction > closure, FrameType frame_type, BailoutId ast_id, int parameter_count, int argument_count, int value_count, LEnvironment *outer, HEnterInlined *entry, Zone *zone)
#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number)
int arguments_stack_height() const
int translation_index() const
LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
LLabel * GetLabel(int block_id) const
const ZoneList< LMoveOperands > * move_operands() const
#define ASSERT(condition)
bool HasSameAsInputPolicy() const
static void SetUpCaches()
bool HasRegisterPolicy() const
bool ObjectIsArgumentsAt(int index)
void AddMove(LOperand *from, LOperand *to, Zone *zone)
LChunk(CompilationInfo *info, HGraph *graph)
static const int kMaxFixedSlotIndex
bool Equals(LOperand *other) const
FrameType frame_type() const
LOperand * destination() const
LGap * GetGapAt(int index) const
const ZoneList< LOperand * > * values() const
void AddValue(LOperand *operand, Representation representation, bool is_uint32)
static const int kFixedSlotIndexWidth
void set_virtual_register(unsigned id)
bool HasBeenRegistered() const
int virtual_register() const
static const int kMaxVirtualRegisters
void set_source(LOperand *operand)
void ConvertTo(Kind kind, int index)
LMoveOperands(LOperand *source, LOperand *destination)
static uint32_t update(uint32_tprevious, unsignedvalue)
int spill_slot_count() const
bool HasTaggedValueAt(int index) const
bool Blocks(LOperand *operand) const
void set_allocated_double_registers(BitVector *allocated_registers)
BasicPolicy basic_policy() const
static const uint32_t kShift
static LChunk * NewChunk(HGraph *graph)
void Register(int deoptimization_index, int translation_index, int pc_offset)
int fixed_register_index() const
int ObjectDuplicateOfAt(int index)
bool IsGapAt(int index) const
void AddInstruction(LInstruction *instruction, HBasicBlock *block)
ShallowIterator(LEnvironment *env)
const ZoneList< Handle< JSFunction > > * inlined_closures() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
CompilationInfo * info() const
int parameter_count() const
LEnvironment * outer() const
virtual ~LChunkBuilderBase()
bool ObjectIsDuplicateAt(int index)
void AddGapMove(int index, LOperand *from, LOperand *to)
Isolate * isolate() const
const ZoneList< LPointerMap * > * pointer_maps() const
int lithium_position() const
LConstantOperand * DefineConstantOperand(HConstant *constant)
bool HasFixedPolicy() const
LOperand * source() const
bool IsEliminated() const
static LSubKindOperand * cast(LOperand *op)
LUnallocated(ExtendedPolicy policy, int index)
bool IsSmiOrTagged() const
bool HasAnyPolicy() const
Representation LookupLiteralRepresentation(LConstantOperand *operand) const
LUnallocated(ExtendedPolicy policy)
Label * GetAssemblyLabel(int block_id) const
static const int kMinFixedSlotIndex
static Kind decode(uint32_tvalue)
int NearestGapPos(int index) const
int ObjectLengthAt(int index)
Handle< JSFunction > closure() const
int LookupDestination(int block_id) const
#define LITHIUM_OPERAND_LIST(V)
static LSubKindOperand * Create(int index, Zone *zone)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
void set_destination(LOperand *operand)
LUnallocated(BasicPolicy policy, int index)
HConstant * LookupConstant(LConstantOperand *operand) const
bool HasWritableRegisterPolicy() const
LParallelMove(Zone *zone)
void PrintTo(StringStream *stream)
ExtendedPolicy extended_policy() const
static uint32_t encode(Kindvalue)
bool HasFixedDoubleRegisterPolicy() const
const ZoneList< LInstruction * > * instructions() const
int deoptimization_index() const
static const int kKindFieldWidth
void AddNewObject(int length, bool is_arguments)
static const unsigned kMax
void AddInlinedClosure(Handle< JSFunction > closure)