28 #ifndef V8_LITHIUM_ALLOCATOR_H_
29 #define V8_LITHIUM_ALLOCATOR_H_
79 return value_ / kStep;
85 return (value_ & (kStep - 1)) == 0;
133 static const int kStep = 2;
160 inline explicit TempIterator(LInstruction* instr);
162 inline LOperand* Current();
163 inline void Advance();
166 inline void SkipUninteresting();
167 LInstruction* instr_;
176 inline explicit InputIterator(LInstruction* instr);
178 inline LOperand* Current();
179 inline void Advance();
182 inline void SkipUninteresting();
183 LInstruction* instr_;
191 inline explicit UseIterator(LInstruction* instr);
193 inline LOperand* Current();
194 inline void Advance();
197 InputIterator input_iterator_;
198 DeepIterator env_iterator_;
206 : start_(start), end_(end), next_(
NULL) {
234 LifetimePosition start_;
235 LifetimePosition end_;
260 LOperand*
const operand_;
261 LOperand*
const hint_;
262 LifetimePosition
const pos_;
265 bool register_beneficial_;
284 int id()
const {
return id_; }
329 return current_hint_operand_;
334 if (pos !=
NULL)
return pos->
hint();
345 return last_interval_->
end();
353 spill_start_index_ =
Min(start, spill_start_index_);
383 void ConvertOperands(
Zone* zone);
385 void AdvanceLastProcessedMarker(
UseInterval* to_start_of,
391 int assigned_register_;
403 int spill_start_index_;
411 LAllocator(
int first_virtual_register, HGraph* graph);
413 static void TraceAlloc(
const char* msg, ...);
416 bool HasTaggedValue(
int virtual_register)
const;
419 RegisterKind RequiredRegisterKind(
int virtual_register)
const;
421 bool Allocate(
LChunk* chunk);
425 return &fixed_live_ranges_;
428 return &fixed_double_live_ranges_;
431 LPlatformChunk*
chunk()
const {
return chunk_; }
432 HGraph*
graph()
const {
return graph_; }
438 allocation_ok_ =
false;
442 return next_virtual_register_++;
451 has_osr_entry_ =
true;
459 return assigned_registers_;
462 return assigned_double_registers_;
466 void MeetRegisterConstraints();
468 void BuildLiveRanges();
469 void AllocateGeneralRegisters();
470 void AllocateDoubleRegisters();
471 void ConnectRanges();
472 void ResolveControlFlow();
473 void PopulatePointerMaps();
474 void AllocateRegisters();
475 bool CanEagerlyResolveControlFlow(HBasicBlock* block)
const;
476 inline bool SafePointsAreInOrder()
const;
479 void InitializeLivenessAnalysis();
480 BitVector* ComputeLiveOut(HBasicBlock* block);
481 void AddInitialIntervals(HBasicBlock* block,
BitVector* live_out);
482 void ProcessInstructions(HBasicBlock* block,
BitVector* live);
483 void MeetRegisterConstraints(HBasicBlock* block);
487 void ResolvePhis(HBasicBlock* block);
502 void AddToUnhandledSorted(
LiveRange* range);
503 void AddToUnhandledUnsorted(
LiveRange* range);
504 void SortUnhandled();
505 bool UnhandledIsSorted();
508 void InactiveToHandled(
LiveRange* range);
514 bool TryAllocateFreeReg(
LiveRange* range);
515 void AllocateBlockedReg(
LiveRange* range);
553 void SplitAndSpillIntersecting(
LiveRange* range);
564 void ResolveControlFlow(
LiveRange* range,
568 inline void SetLiveRangeAssignedRegister(
LiveRange* range,
int reg);
578 int RegisterCount()
const;
579 static int FixedLiveRangeID(
int index) {
return -index - 1; }
580 static int FixedDoubleLiveRangeID(
int index);
581 LiveRange* FixedLiveRangeFor(
int index);
582 LiveRange* FixedDoubleLiveRangeFor(
int index);
583 LiveRange* LiveRangeFor(
int index);
584 HPhi* LookupPhi(LOperand* operand)
const;
585 LGap* GetLastGap(HBasicBlock* block);
587 const char* RegisterName(
int allocation_index);
589 inline bool IsGapAt(
int index);
591 inline LInstruction* InstructionAt(
int index);
593 inline LGap* GapAt(
int index);
597 LPlatformChunk* chunk_;
601 ZoneList<BitVector*> live_in_sets_;
604 ZoneList<LiveRange*> live_ranges_;
607 EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
609 EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
610 fixed_double_live_ranges_;
611 ZoneList<LiveRange*> unhandled_live_ranges_;
612 ZoneList<LiveRange*> active_live_ranges_;
613 ZoneList<LiveRange*> inactive_live_ranges_;
614 ZoneList<LiveRange*> reusable_slots_;
617 int next_virtual_register_;
618 int first_artificial_register_;
619 GrowableBitVector double_artificial_registers_;
624 BitVector* assigned_registers_;
625 BitVector* assigned_double_registers_;
635 LifetimePosition allocation_finger_;
648 LAllocator* allocator_;
649 unsigned allocator_zone_start_allocation_size_;
657 #endif // V8_LITHIUM_ALLOCATOR_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
LOperand * operand() const
UseInterval * next() const
LifetimePosition FirstIntersection(LiveRange *other)
bool HasAllocatedSpillOperand() const
void MakeSpilled(Zone *zone)
LAllocatorPhase(const char *name, LAllocator *allocator)
int spill_start_index() const
static LifetimePosition MaxPosition()
LiveRange(int id, Zone *zone)
LiveRange * parent() const
static LifetimePosition Invalid()
BitVector * assigned_registers()
void ShortenTo(LifetimePosition start)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
UsePosition * NextUsePosition(LifetimePosition start)
static LifetimePosition FromInstructionIndex(int index)
bool CanCover(LifetimePosition position) const
void SetSpillStartIndex(int start)
#define ASSERT(condition)
LifetimePosition InstructionStart() const
LifetimePosition NextInstruction() const
LifetimePosition pos() const
bool Covers(LifetimePosition position)
LOperand * CreateAssignedOperand(Zone *zone)
static const int kMaxVirtualRegisters
void set_assigned_register(int reg, Zone *zone)
LifetimePosition End() const
bool CanBeSpilled(LifetimePosition pos)
bool HasRegisterAssigned() const
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
LifetimePosition end() const
void SetSpillOperand(LOperand *operand)
LOperand * current_hint_operand() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
const Vector< LiveRange * > * fixed_double_live_ranges() const
UseInterval * first_interval() const
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
void SplitAt(LifetimePosition pos, Zone *zone)
LifetimePosition PrevInstruction() const
bool Contains(LifetimePosition point) const
const ZoneList< LiveRange * > * live_ranges() const
LOperand * FirstHint() const
void AddUsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint, Zone *zone)
LifetimePosition start() const
BitVector * assigned_double_registers()
LifetimePosition Start() const
UseInterval(LifetimePosition start, LifetimePosition end)
UsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
Isolate * isolate() const
bool RequiresRegister() const
int assigned_register() const
UsePosition * first_pos() const
#define IS_POWER_OF_TWO(x)
UsePosition * NextRegisterPosition(LifetimePosition start)
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
LifetimePosition Intersect(const UseInterval *other) const
bool IsInstructionStart() const
RegisterKind Kind() const
UsePosition * next() const
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
LOperand * GetSpillOperand() const
int InstructionIndex() const
LPlatformChunk * chunk() const
const Vector< LiveRange * > * fixed_live_ranges() const
static const int kInvalidAssignment
LifetimePosition InstructionEnd() const
bool RegisterIsBeneficial() const