28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_
59 return MarkBitFrom(reinterpret_cast<Address>(obj));
65 return !mark_bit.
Get() && mark_bit.
Next().
Get();
71 return mark_bit.
Get() && !mark_bit.
Next().
Get();
77 return !mark_bit.
Get();
83 return mark_bit.
Get() && mark_bit.
Next().
Get();
105 BlackToGrey(MarkBitFrom(obj));
123 static const char* ColorName(ObjectColor color) {
125 case BLACK_OBJECT:
return "black";
126 case WHITE_OBJECT:
return "white";
127 case GREY_OBJECT:
return "grey";
128 case IMPOSSIBLE_COLOR:
return "impossible";
133 static ObjectColor Color(HeapObject*
obj) {
134 return Color(Marking::MarkBitFrom(obj));
137 static ObjectColor Color(MarkBit mark_bit) {
138 if (IsBlack(mark_bit))
return BLACK_OBJECT;
139 if (IsWhite(mark_bit))
return WHITE_OBJECT;
140 if (IsGrey(mark_bit))
return GREY_OBJECT;
142 return IMPOSSIBLE_COLOR;
149 MarkBit from_mark_bit = MarkBitFrom(from);
150 MarkBit to_mark_bit = MarkBitFrom(to);
151 bool is_black =
false;
152 if (from_mark_bit.
Get()) {
156 if (from_mark_bit.
Next().
Get()) {
172 : array_(
NULL), top_(0), bottom_(0), mask_(0), overflowed_(
false) { }
183 inline bool IsFull() {
return ((top_ + 1) & mask_) == bottom_; }
185 inline bool IsEmpty() {
return top_ == bottom_; }
197 ASSERT(object->IsHeapObject());
199 Marking::BlackToGrey(
object);
203 array_[top_] = object;
204 top_ = ((top_ + 1) & mask_);
209 ASSERT(object->IsHeapObject());
213 array_[top_] = object;
214 top_ = ((top_ + 1) & mask_);
220 top_ = ((top_ - 1) & mask_);
222 ASSERT(object->IsHeapObject());
227 ASSERT(object->IsHeapObject());
231 bottom_ = ((bottom_ - 1) & mask_);
232 array_[bottom_] = object;
238 int top() {
return top_; }
282 : idx_(0), chain_length_(1), next_(next_buffer) {
284 chain_length_ = next_->chain_length_ + 1;
293 slots_[idx_++] = slot;
309 return "EMBEDDED_OBJECT_SLOT";
311 return "RELOCATED_CODE_OBJECT";
313 return "CODE_TARGET_SLOT";
315 return "CODE_ENTRY_SLOT";
317 return "DEBUG_TARGET_SLOT";
319 return "JS_RETURN_SLOT";
321 return "NUMBER_OF_SLOT_TYPES";
323 return "UNKNOWN SlotType";
333 if (buffer ==
NULL)
return 0;
334 return static_cast<int>(buffer->idx_ +
348 bool code_slots_filtering_required) {
349 while (buffer !=
NULL) {
350 if (code_slots_filtering_required) {
355 buffer = buffer->
next();
365 return buffer !=
NULL && buffer->chain_length_ >= kChainLengthThreshold;
379 *buffer_address = buffer;
396 static const int kChainLengthThreshold = 15;
399 intptr_t chain_length_;
419 jsfunction_candidates_head_(
NULL),
420 shared_function_info_candidates_head_(
NULL),
421 optimized_code_map_holder_head_(
NULL) {}
424 if (GetNextCandidate(shared_info) ==
NULL) {
425 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
426 shared_function_info_candidates_head_ = shared_info;
431 ASSERT(function->code() ==
function->shared()->code());
432 if (GetNextCandidate(
function)->IsUndefined()) {
433 SetNextCandidate(
function, jsfunction_candidates_head_);
434 jsfunction_candidates_head_ =
function;
439 if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
440 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
441 optimized_code_map_holder_head_ = code_map_holder;
450 ProcessOptimizedCodeMaps();
451 ProcessSharedFunctionInfoCandidates();
452 ProcessJSFunctionCandidates();
456 EvictOptimizedCodeMaps();
457 EvictJSFunctionCandidates();
458 EvictSharedFunctionInfoCandidates();
464 void ProcessOptimizedCodeMaps();
465 void ProcessJSFunctionCandidates();
466 void ProcessSharedFunctionInfoCandidates();
467 void EvictOptimizedCodeMaps();
468 void EvictJSFunctionCandidates();
469 void EvictSharedFunctionInfoCandidates();
476 static JSFunction* GetNextCandidate(JSFunction* candidate) {
477 Object* next_candidate = candidate->next_function_link();
478 return reinterpret_cast<JSFunction*
>(next_candidate);
481 static void SetNextCandidate(JSFunction* candidate,
482 JSFunction* next_candidate) {
483 candidate->set_next_function_link(next_candidate);
486 static void ClearNextCandidate(JSFunction* candidate,
Object* undefined) {
487 ASSERT(undefined->IsUndefined());
491 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
492 Object* next_candidate = candidate->code()->gc_metadata();
493 return reinterpret_cast<SharedFunctionInfo*
>(next_candidate);
496 static void SetNextCandidate(SharedFunctionInfo* candidate,
497 SharedFunctionInfo* next_candidate) {
498 candidate->code()->set_gc_metadata(next_candidate);
501 static void ClearNextCandidate(SharedFunctionInfo* candidate) {
505 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
508 return reinterpret_cast<SharedFunctionInfo*
>(next_map);
511 static void SetNextCodeMap(SharedFunctionInfo* holder,
512 SharedFunctionInfo* next_holder) {
517 static void ClearNextCodeMap(SharedFunctionInfo* holder) {
523 JSFunction* jsfunction_candidates_head_;
524 SharedFunctionInfo* shared_function_info_candidates_head_;
525 SharedFunctionInfo* optimized_code_map_holder_head_;
532 class ThreadLocalTop;
544 typedef MaybeObject* (*AllocationFunction)(
Heap*
heap,
603 bool in_use() {
return state_ > PREPARE_GC; }
604 bool are_map_pointers_encoded() {
return state_ == UPDATE_POINTERS; }
638 void VerifyMarkbitsAreClean();
639 static void VerifyMarkbitsAreClean(
PagedSpace* space);
640 static void VerifyMarkbitsAreClean(
NewSpace* space);
641 void VerifyWeakEmbeddedObjectsInOptimizedCode();
642 void VerifyOmittedMapChecks();
647 template<SweepingParallelism type>
652 INLINE(
static bool ShouldSkipEvacuationSlotRecording(
Object** anchor)) {
654 ShouldSkipEvacuationSlotRecording();
659 ShouldSkipEvacuationSlotRecording();
664 IsEvacuationCandidate();
668 if (FLAG_trace_fragmentation) {
669 PrintF(
"Page %p is too popular. Disabling evacuation.\n",
670 reinterpret_cast<void*>(page));
682 evacuation_candidates_.RemoveElement(page);
706 return encountered_weak_collections_;
709 encountered_weak_collections_ = weak_collection;
738 return sequential_sweeping_;
755 bool MarkInvalidatedCode();
757 void RemoveDeadInvalidatedCode();
758 void ProcessInvalidatedCode(ObjectVisitor* visitor);
760 void UnlinkEvacuationCandidates();
761 void ReleaseEvacuationCandidates();
763 void StartSweeperThreads();
766 enum CollectorState {
771 ENCODE_FORWARDING_ADDRESSES,
777 CollectorState state_;
782 bool sweep_precisely_;
784 bool reduce_memory_footprint_;
786 bool abort_incremental_marking_;
794 bool was_marked_incrementally_;
797 bool sweeping_pending_;
799 Semaphore pending_sweeper_jobs_semaphore_;
801 bool sequential_sweeping_;
807 SlotsBufferAllocator slots_buffer_allocator_;
809 SlotsBuffer* migration_slots_buffer_;
831 void PrepareThreadForCodeFlushing(
Isolate*
isolate, ThreadLocalTop* top);
833 void PrepareForCodeFlushing();
836 void MarkLiveObjects();
857 void MarkImplicitRefGroups();
861 void ProcessMarkingDeque();
869 void ProcessEphemeralMarking(ObjectVisitor* visitor);
874 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
880 void EmptyMarkingDeque();
885 void RefillMarkingDeque();
889 void ProcessMapCaches();
893 static bool IsUnmarkedHeapObject(
Object** p);
898 void ClearNonLiveReferences();
899 void ClearNonLivePrototypeTransitions(
Map*
map);
909 void ReattachInitialMaps();
914 void ProcessWeakCollections();
919 void ClearWeakCollections();
937 int DiscoverAndPromoteBlackObjectsOnPage(
NewSpace* new_space,
940 void EvacuateNewSpace();
942 void EvacuateLiveObjectsFromPage(
Page* p);
944 void EvacuatePages();
946 void EvacuateNewSpaceAndCandidates();
952 void ParallelSweepSpacesComplete();
954 void ParallelSweepSpaceComplete(
PagedSpace* space);
957 friend class MarkObjectVisitor;
960 friend class UnmarkObjectVisitor;
967 Object* encountered_weak_collections_;
968 bool have_code_to_deoptimize_;
984 last_cell_index_ = Bitmap::IndexToCell(
985 Bitmap::CellAlignIndex(
986 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
987 cell_base_ = chunk_->area_start();
988 cell_index_ = Bitmap::IndexToCell(
989 Bitmap::CellAlignIndex(
990 chunk_->AddressToMarkbitIndex(cell_base_)));
991 cells_ = chunk_->markbits()->cells();
994 inline bool Done() {
return cell_index_ == last_cell_index_; }
996 inline bool HasNext() {
return cell_index_ < last_cell_index_ - 1; }
999 ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
1000 chunk_->AddressToMarkbitIndex(cell_base_))));
1001 return &cells_[cell_index_];
1005 ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
1006 chunk_->AddressToMarkbitIndex(cell_base_))));
1018 unsigned int last_cell_index_;
1019 unsigned int cell_index_;
1027 collector_(collector) {
1028 collector_->set_sequential_sweeping(
true);
1032 collector_->set_sequential_sweeping(
false);
1044 #endif // V8_MARK_COMPACT_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void ClearEvacuationCandidate()
INLINE(static void GreyToBlack(MarkBit markbit))
static const char * kGreyBitPattern
static bool IsTypedSlot(ObjectSlot slot)
void WaitUntilSweepingCompleted()
bool AreSweeperThreadsActivated()
void EvictCandidate(SharedFunctionInfo *shared_info)
INLINE(static void AnyToGrey(MarkBit markbit))
INLINE(static void WhiteToGrey(MarkBit markbit))
uint32_t RoundDownToPowerOf2(uint32_t x)
void set(int index, Object *value)
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
void AddCandidate(JSFunction *function)
INLINE(static bool IsGrey(MarkBit mark_bit))
static const char * kWhiteBitPattern
bool abort_incremental_marking() const
void Prepare(GCTracer *tracer)
static const uint32_t kSingleFreeEncoding
INLINE(static bool IsWhite(MarkBit mark_bit))
void AddOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
static MemoryChunk * FromAddress(Address a)
INLINE(static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode))
CodeFlusher * code_flusher()
kSerializedDataOffset Object
bool IsConcurrentSweepingInProgress()
void MarkWeakObjectToCodeTable()
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
#define ASSERT(condition)
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
static void IncrementLiveBytesFromGC(Address address, int by)
bool StartCompaction(CompactionMode mode)
void UpdateSlotsWithFilter(Heap *heap)
static const char * kBlackBitPattern
void AddCandidate(SharedFunctionInfo *shared_info)
INLINE(static bool IsBlack(MarkBit mark_bit))
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
Isolate * isolate() const
static Object ** RawField(HeapObject *obj, int offset)
static int SizeOfChain(SlotsBuffer *buffer)
bool TryPromoteObject(HeapObject *object, int object_size)
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
void set_encountered_weak_collections(Object *weak_collection)
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
bool HasSpaceForTypedSlot()
void(* EncodingFunction)(Heap *heap, HeapObject *old_object, int object_size, Object *new_object, int *offset)
SlotsBuffer(SlotsBuffer *next_buffer)
void AddEvacuationCandidate(Page *p)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
void set_undefined(int index)
void Initialize(Address low, Address high)
void CollectEvacuationCandidates(PagedSpace *space)
MarkBit::CellType * CurrentCell()
void InvalidateCode(Code *code)
INLINE(void PushBlack(HeapObject *object))
Address CurrentCellBase()
static const uint32_t kMultiFreeEncoding
INLINE(HeapObject *Pop())
INLINE(void PushGrey(HeapObject *object))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void IteratePointersToFromSpace(ObjectVisitor *v)
void DeallocateBuffer(SlotsBuffer *buffer)
static const int kNextFunctionLinkOffset
INLINE(static void BlackToGrey(HeapObject *obj))
INLINE(static MarkBit MarkBitFrom(HeapObject *obj))
INLINE(void EvictEvacuationCandidate(Page *page))
void(* ProcessNonLiveFunction)(HeapObject *object, Isolate *isolate)
static const int kNumberOfElements
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
INLINE(static void BlackToGrey(MarkBit markbit))
void DeallocateChain(SlotsBuffer **buffer_address)
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
bool is_code_flushing_enabled() const
bool is_compacting() const
void Add(ObjectSlot slot)
void MarkAllocationSite(AllocationSite *site)
INLINE(static bool IsImpossible(MarkBit mark_bit))
INLINE(static void MarkBlack(MarkBit mark_bit))
friend class MarkingVisitor
Object * encountered_weak_collections()
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
~SequentialSweepingScope()
MarkBitCellIterator(MemoryChunk *chunk)
SequentialSweepingScope(MarkCompactCollector *collector)
intptr_t RefillFreeLists(PagedSpace *space)
void set_sequential_sweeping(bool sequential_sweeping)
static FixedArray * cast(Object *obj)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
void RecordCodeTargetPatch(Address pc, Code *target)
static const int kNextMapIndex
static const char * SlotTypeToString(SlotType type)
void EvictAllCandidates()
static const char * kImpossibleBitPattern
CodeFlusher(Isolate *isolate)
MarkingParity marking_parity()
static intptr_t SweepConservatively(PagedSpace *space, FreeList *free_list, Page *p)
INLINE(void UnshiftGrey(HeapObject *object))
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
INLINE(static MarkBit MarkBitFrom(Address addr))
void SweepInParallel(PagedSpace *space)
void TransferMark(Address old_start, Address new_start)
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
void EnableCodeFlushing(bool enable)
AllocationSpace identity()
void RecordCodeEntrySlot(Address slot, Code *target)
int(MarkCompactCollector::* LiveObjectCallback)(HeapObject *obj)
bool sequential_sweeping() const