45 marking_deque_memory_(
NULL),
46 marking_deque_memory_committed_(
false),
50 old_generation_space_available_at_start_of_incremental_(0),
51 old_generation_space_used_at_start_of_incremental_(0),
52 steps_count_since_last_gc_(0),
53 steps_took_since_last_gc_(0),
57 no_marking_scope_depth_(0),
58 unscanned_bytes_of_large_object_(0) {
63 delete marking_deque_memory_;
70 if (BaseRecordWrite(obj, slot, value) && slot !=
NULL) {
71 MarkBit obj_bit = Marking::MarkBitFrom(obj);
72 if (Marking::IsBlack(obj_bit)) {
84 ASSERT(obj->IsHeapObject());
90 marking->write_barriers_invoked_since_last_step_ +=
97 marking->RecordWrite(obj, slot, *slot);
105 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
106 RecordWriteIntoCode(host, &rinfo, value);
114 GcSafeFindCodeForInnerPointer(pc);
115 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
116 RecordWriteIntoCode(host, &rinfo, value);
124 if (BaseRecordWrite(host, slot, value)) {
127 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
136 if (Marking::IsWhite(value_bit)) {
137 MarkBit obj_bit = Marking::MarkBitFrom(obj);
138 if (Marking::IsBlack(obj_bit)) {
146 if (is_compacting_) {
147 MarkBit obj_bit = Marking::MarkBitFrom(obj);
148 if (Marking::IsBlack(obj_bit)) {
157 static void MarkObjectGreyDoNotEnqueue(
Object*
obj) {
158 if (obj->IsHeapObject()) {
161 if (Marking::IsBlack(mark_bit)) {
165 Marking::AnyToGrey(mark_bit);
170 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
173 ASSERT(!Marking::IsImpossible(mark_bit));
174 if (mark_bit.Get())
return;
177 ASSERT(Marking::IsBlack(mark_bit));
181 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
184 ASSERT(!Marking::IsImpossible(mark_bit));
185 if (Marking::IsBlack(mark_bit))
return;
186 Marking::MarkBlack(mark_bit);
188 ASSERT(Marking::IsBlack(mark_bit));
199 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
208 if (FLAG_use_marking_progress_bar &&
221 int end_offset =
Min(object_size,
223 int already_scanned_offset = start_offset;
224 bool scan_until_end =
false;
226 VisitPointersWithAnchor(heap,
230 start_offset = end_offset;
233 }
while (scan_until_end && start_offset < object_size);
235 if (start_offset < object_size) {
238 object_size - (start_offset - already_scanned_offset));
241 FixedArrayVisitor::Visit(map,
object);
250 MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
251 VisitNativeContext(map, context);
268 MarkObject(heap, obj);
273 for (
Object** p = start; p < end; p++) {
277 MarkObject(heap, obj);
286 for (
Object** p = start; p < end; p++) {
290 MarkObject(heap, obj);
298 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
300 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->
Size());
301 }
else if (Marking::IsWhite(mark_bit)) {
310 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
311 if (Marking::IsWhite(mark_bit)) {
314 heap_object->
Size());
326 : incremental_marking_(incremental_marking) {
330 MarkObjectByPointer(p);
334 for (
Object** p = start; p < end; p++) MarkObjectByPointer(p);
338 void MarkObjectByPointer(
Object** p) {
340 if (!obj->IsHeapObject())
return;
343 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
345 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->
Size());
347 if (Marking::IsWhite(mark_bit)) {
353 IncrementalMarking* incremental_marking_;
364 bool is_compacting) {
399 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
401 PageIterator it(space);
402 while (it.has_next()) {
409 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
411 NewSpacePageIterator it(space);
412 while (it.has_next()) {
413 NewSpacePage* p = it.next();
419 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
421 DeactivateIncrementalWriteBarrierForSpace(heap_->
old_data_space());
422 DeactivateIncrementalWriteBarrierForSpace(heap_->
cell_space());
424 DeactivateIncrementalWriteBarrierForSpace(heap_->
map_space());
425 DeactivateIncrementalWriteBarrierForSpace(heap_->
code_space());
426 DeactivateIncrementalWriteBarrierForSpace(heap_->
new_space());
429 while (lop->is_valid()) {
431 lop = lop->next_page();
436 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
437 PageIterator it(space);
438 while (it.has_next()) {
445 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
446 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
447 while (it.has_next()) {
448 NewSpacePage* p = it.next();
454 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
457 ActivateIncrementalWriteBarrier(heap_->
cell_space());
459 ActivateIncrementalWriteBarrier(heap_->
map_space());
460 ActivateIncrementalWriteBarrier(heap_->
code_space());
461 ActivateIncrementalWriteBarrier(heap_->
new_space());
464 while (lop->is_valid()) {
466 lop = lop->next_page();
473 static const intptr_t kActivationThreshold = 8 *
MB;
477 static const intptr_t kActivationThreshold = 0;
482 return FLAG_incremental_marking &&
483 FLAG_incremental_marking_steps &&
507 static void PatchIncrementalMarkingRecordWriteStubs(
512 for (
int i = 0; i < capacity; i++) {
514 if (stubs->
IsKey(k)) {
517 if (CodeStub::MajorKeyFromKey(key) ==
518 CodeStub::RecordWrite) {
529 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
530 if (marking_deque_memory_ ==
NULL) {
531 marking_deque_memory_ =
new VirtualMemory(4 *
MB);
533 if (!marking_deque_memory_committed_) {
534 bool success = marking_deque_memory_->
Commit(
535 reinterpret_cast<Address>(marking_deque_memory_->
address()),
536 marking_deque_memory_->
size(),
539 marking_deque_memory_committed_ =
true;
545 if (state_ ==
STOPPED && marking_deque_memory_committed_) {
546 bool success = marking_deque_memory_->
Uncommit(
547 reinterpret_cast<Address>(marking_deque_memory_->
address()),
548 marking_deque_memory_->
size());
550 marking_deque_memory_committed_ =
false;
556 if (FLAG_trace_incremental_marking) {
557 PrintF(
"[IncrementalMarking] Start\n");
559 ASSERT(FLAG_incremental_marking);
560 ASSERT(FLAG_incremental_marking_steps);
571 if (FLAG_trace_incremental_marking) {
572 PrintF(
"[IncrementalMarking] Start sweeping.\n");
581 void IncrementalMarking::StartMarking(CompactionFlag
flag) {
582 if (FLAG_trace_incremental_marking) {
583 PrintF(
"[IncrementalMarking] Start marking\n");
595 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
597 EnsureMarkingDequeIsCommitted();
601 size_t size = marking_deque_memory_->
size();
602 if (FLAG_force_marking_deque_overflows) size = 64 *
kPointerSize;
605 ActivateIncrementalWriteBarrier();
609 if (FLAG_verify_heap) {
617 if (FLAG_cleanup_code_caches_at_gc) {
620 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
624 IncrementalMarkingRootMarkingVisitor visitor(
this);
630 if (FLAG_trace_incremental_marking) {
631 PrintF(
"[IncrementalMarking] Running\n");
637 if (!IsMarking())
return;
640 while (it.has_next()) {
647 if (!IsMarking())
return;
649 int current = marking_deque_.
bottom();
650 int mask = marking_deque_.
mask();
651 int limit = marking_deque_.
top();
653 int new_top = current;
655 Map* filler_map = heap_->one_pointer_filler_map();
657 while (current != limit) {
659 ASSERT(obj->IsHeapObject());
660 current = ((current + 1) & mask);
663 if (map_word.IsForwardingAddress()) {
664 HeapObject* dest = map_word.ToForwardingAddress();
665 array[new_top] = dest;
666 new_top = ((new_top + 1) & mask);
669 MarkBit mark_bit = Marking::MarkBitFrom(obj);
670 ASSERT(Marking::IsGrey(mark_bit) ||
671 (obj->
IsFiller() && Marking::IsWhite(mark_bit)));
674 }
else if (obj->
map() != filler_map) {
677 array[new_top] =
obj;
678 new_top = ((new_top + 1) & mask);
681 MarkBit mark_bit = Marking::MarkBitFrom(obj);
683 ASSERT(Marking::IsGrey(mark_bit) ||
684 (obj->
IsFiller() && Marking::IsWhite(mark_bit)) ||
686 Marking::IsBlack(mark_bit)));
690 marking_deque_.
set_top(new_top);
692 steps_took_since_last_gc_ = 0;
693 steps_count_since_last_gc_ = 0;
699 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
700 if (Marking::IsWhite(map_mark_bit)) {
704 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
706 MarkBit mark_bit = Marking::MarkBitFrom(obj);
707 #if ENABLE_SLOW_ASSERTS
710 (obj->
IsFiller() && Marking::IsWhite(mark_bit)) ||
712 Marking::IsBlack(mark_bit)));
714 MarkBlackOrKeepBlack(obj, mark_bit, size);
718 void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
719 Map* filler_map = heap_->one_pointer_filler_map();
720 while (!marking_deque_.
IsEmpty() && bytes_to_process > 0) {
721 HeapObject* obj = marking_deque_.Pop();
725 Map* map = obj->
map();
726 if (map == filler_map)
continue;
729 unscanned_bytes_of_large_object_ = 0;
730 VisitObject(map, obj, size);
731 bytes_to_process -= (size - unscanned_bytes_of_large_object_);
736 void IncrementalMarking::ProcessMarkingDeque() {
737 Map* filler_map = heap_->one_pointer_filler_map();
738 while (!marking_deque_.
IsEmpty()) {
739 HeapObject* obj = marking_deque_.Pop();
743 Map* map = obj->map();
744 if (map == filler_map)
continue;
746 VisitObject(map, obj, obj->SizeFromMap(map));
754 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
756 if (FLAG_trace_incremental_marking) {
757 PrintF(
"[IncrementalMarking] Hurry\n");
762 ProcessMarkingDeque();
764 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
766 double delta = end - start;
768 if (FLAG_trace_incremental_marking) {
769 PrintF(
"[IncrementalMarking] Complete (hurry), spent %d ms.\n",
770 static_cast<int>(delta));
775 if (FLAG_cleanup_code_caches_at_gc) {
777 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
783 while (!context->IsUndefined()) {
788 if (!cache->IsUndefined()) {
789 MarkBit mark_bit = Marking::MarkBitFrom(cache);
790 if (Marking::IsGrey(mark_bit)) {
791 Marking::GreyToBlack(mark_bit);
802 if (FLAG_trace_incremental_marking) {
803 PrintF(
"[IncrementalMarking] Aborting.\n");
809 PatchIncrementalMarkingRecordWriteStubs(heap_,
811 DeactivateIncrementalWriteBarrier();
813 if (is_compacting_) {
825 is_compacting_ =
false;
832 is_compacting_ =
false;
836 PatchIncrementalMarkingRecordWriteStubs(heap_,
838 DeactivateIncrementalWriteBarrier();
852 if (FLAG_trace_incremental_marking) {
853 PrintF(
"[IncrementalMarking] Complete (normal).\n");
875 !FLAG_incremental_marking ||
876 !FLAG_incremental_marking_steps ||
881 allocated_ += allocated_bytes;
884 write_barriers_invoked_since_last_step_ <
889 if (state_ ==
MARKING && no_marking_scope_depth_ > 0)
return;
897 intptr_t bytes_to_process =
898 marking_speed_ *
Max(allocated_, write_barriers_invoked_since_last_step_);
900 write_barriers_invoked_since_last_step_ = 0;
902 bytes_scanned_ += bytes_to_process;
906 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
907 FLAG_print_cumulative_gc_stat) {
916 }
else if (state_ ==
MARKING) {
917 ProcessMarkingDeque(bytes_to_process);
922 steps_count_since_last_gc_++;
924 bool speed_up =
false;
928 PrintPID(
"Speed up marking after %d steps\n",
929 static_cast<int>(kMarkingSpeedAccellerationInterval));
934 bool space_left_is_very_small =
935 (old_generation_space_available_at_start_of_incremental_ < 10 *
MB);
937 bool only_1_nth_of_space_that_was_available_still_left =
938 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
939 old_generation_space_available_at_start_of_incremental_);
941 if (space_left_is_very_small ||
942 only_1_nth_of_space_that_was_available_still_left) {
943 if (FLAG_trace_gc)
PrintPID(
"Speed up marking because of low space left\n");
947 bool size_of_old_space_multiplied_by_n_during_marking =
949 (marking_speed_ + 1) *
950 old_generation_space_used_at_start_of_incremental_);
951 if (size_of_old_space_multiplied_by_n_during_marking) {
954 PrintPID(
"Speed up marking because of heap size increase\n");
959 - old_generation_space_used_at_start_of_incremental_;
960 intptr_t delay = marking_speed_ *
MB;
964 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
966 PrintPID(
"Speed up marking because marker was not keeping up\n");
974 PrintPID(
"Postponing speeding up marking until marking starts\n");
978 marking_speed_ =
static_cast<int>(
980 static_cast<intptr_t>(marking_speed_ * 1.3)));
982 PrintPID(
"Marking speed increased to %d\n", marking_speed_);
987 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
988 FLAG_print_cumulative_gc_stat) {
990 double delta = (end - start);
991 longest_step_ = Max(longest_step_, delta);
992 steps_took_ += delta;
993 steps_took_since_last_gc_ += delta;
999 void IncrementalMarking::ResetStepCounters() {
1002 longest_step_ = 0.0;
1003 old_generation_space_available_at_start_of_incremental_ =
1004 SpaceLeftInOldSpace();
1005 old_generation_space_used_at_start_of_incremental_ =
1007 steps_count_since_last_gc_ = 0;
1008 steps_took_since_last_gc_ = 0;
1009 bytes_rescanned_ = 0;
1012 write_barriers_invoked_since_last_step_ = 0;
1016 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void VisitPointer(Object **p)
Object * KeyAt(int entry)
#define SLOW_ASSERT(condition)
static Mode GetMode(Code *stub)
static const int kStartOffset
bool NextGCIsLikelyToBeFull()
static VisitorDispatchTable< Callback > table_
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool InNewSpace(Object *object)
static const intptr_t kWriteBarriersInvokedThreshold
uint32_t NumberToUint32(Object *number)
static int SizeOf(Map *map, HeapObject *object)
bool EnsureSweepersProgressed(int step_size)
void CompletelyClearInstanceofCache()
static MemoryChunk * FromAddress(Address a)
static HeapObject * cast(Object *obj)
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
void MarkWeakObjectToCodeTable()
INLINE(static void VisitPointer(Heap *heap, Object **p))
static const int kWriteBarrierCounterGranularity
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
#define ASSERT(condition)
static const intptr_t kMarkingSpeedAccelleration
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static Context * cast(Object *context)
bool NonFailureIsHeapObject()
int SizeFromMap(Map *map)
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
static const int kPageSize
void ActivateGeneratedStub(Code *stub)
static Code * cast(Object *obj)
static const intptr_t kMarkingSpeedAccellerationInterval
static void VisitWeakCollection(Map *map, HeapObject *object)
static const intptr_t kMaxMarkingSpeed
static Object ** RawField(HeapObject *obj, int offset)
static void BeforeVisitingSharedFunctionInfo(HeapObject *object)
kInstanceClassNameOffset flag
static void VisitFixedArrayIncremental(Map *map, HeapObject *object)
StackGuard * stack_guard()
Object * ValueAt(int entry)
void VisitPointers(Object **start, Object **end)
intptr_t MaxOldGenerationSize()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void MarkingComplete(CompletionAction action)
void LowerInlineAllocationLimit(intptr_t step)
void Initialize(Address low, Address high)
PropertyCellSpace * property_cell_space()
static const int kProgressBarScanningChunk
void Start(CompactionFlag flag=ALLOW_COMPACTION)
void MarkCompactPrologue()
bool Commit(void *address, size_t size, bool is_executable)
IncrementalMarking(Heap *heap)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
OldSpace * old_pointer_space()
void UncommitMarkingDeque()
static const int kPropertiesOffset
static double TimeCurrentMillis()
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
LargeObjectSpace * lo_space()
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void UpdateMarkingDequeAfterScavenge()
void SetOldSpacePageFlags(MemoryChunk *chunk)
void set_progress_bar(int progress_bar)
InnerPointerToCodeCache * inner_pointer_to_code_cache()
INLINE(static void MarkObject(Heap *heap, Object *obj))
void set_write_barrier_counter(int counter)
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Continue(InterruptFlag after_what)
IncrementalMarkingRootMarkingVisitor(IncrementalMarking *incremental_marking)
void set_should_hurry(bool val)
Object * native_contexts_list()
IncrementalMarking * incremental_marking()
void PrintPID(const char *format,...)
static void RecordWriteFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
intptr_t PromotedTotalSize()
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
intptr_t PromotedSpaceSizeOfObjects()
MarkingDeque * marking_deque()
static const intptr_t kFastMarking
void RestartIfNotMarking()
int write_barrier_counter()
static const intptr_t kAllocatedThreshold
INLINE(static void VisitPointersWithAnchor(Heap *heap, Object **anchor, Object **start, Object **end))
bool IsSweepingComplete()
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
void OldSpaceStep(intptr_t allocated)
void AddMarkingTime(double marking_time)
void NotifyIncompleteScanOfObject(int unscanned_bytes)
static const intptr_t kInitialMarkingSpeed
OldSpace * old_data_space()
MarkCompactCollector * mark_compact_collector()
static void VisitNativeContextIncremental(Map *map, HeapObject *object)
AllocationSpace identity()
bool Uncommit(void *address, size_t size)
void PrepareForScavenge()
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)