45 marking_deque_memory_(
NULL),
46 marking_deque_memory_committed_(
false),
50 old_generation_space_available_at_start_of_incremental_(0),
51 old_generation_space_used_at_start_of_incremental_(0),
52 steps_count_since_last_gc_(0),
53 steps_took_since_last_gc_(0),
57 no_marking_scope_depth_(0) {
62 delete marking_deque_memory_;
69 if (BaseRecordWrite(obj, slot, value) && slot !=
NULL) {
83 ASSERT(obj->IsHeapObject());
85 ASSERT(!marking->is_compacting_);
90 marking->write_barriers_invoked_since_last_step_ +=
97 marking->RecordWrite(obj,
NULL, value);
104 ASSERT(obj->IsHeapObject());
106 ASSERT(marking->is_compacting_);
111 marking->write_barriers_invoked_since_last_step_ +=
118 marking->RecordWrite(obj, slot, *slot);
126 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
127 RecordWriteIntoCode(host, &rinfo, value);
135 GcSafeFindCodeForInnerPointer(pc);
136 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
137 RecordWriteIntoCode(host, &rinfo, value);
145 if (BaseRecordWrite(host, slot, value)) {
148 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
167 if (is_compacting_) {
200 MarkObject(heap, obj);
205 for (
Object** p = start; p < end; p++) {
209 MarkObject(heap, obj);
221 heap_object->
Size());
236 heap_object->
Size());
249 incremental_marking_(incremental_marking) {
253 MarkObjectByPointer(p);
257 for (
Object** p = start; p < end; p++) MarkObjectByPointer(p);
261 void MarkObjectByPointer(
Object** p) {
263 if (!obj->IsHeapObject())
return;
270 heap_object->
Size());
280 IncrementalMarking* incremental_marking_;
291 bool is_compacting) {
325 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
327 PageIterator it(space);
328 while (it.has_next()) {
335 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
337 NewSpacePageIterator it(space);
338 while (it.has_next()) {
339 NewSpacePage* p = it.next();
345 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
347 DeactivateIncrementalWriteBarrierForSpace(heap_->
old_data_space());
348 DeactivateIncrementalWriteBarrierForSpace(heap_->
cell_space());
349 DeactivateIncrementalWriteBarrierForSpace(heap_->
map_space());
350 DeactivateIncrementalWriteBarrierForSpace(heap_->
code_space());
351 DeactivateIncrementalWriteBarrierForSpace(heap_->
new_space());
354 while (lop->is_valid()) {
356 lop = lop->next_page();
361 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
362 PageIterator it(space);
363 while (it.has_next()) {
370 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
371 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
372 while (it.has_next()) {
373 NewSpacePage* p = it.next();
379 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
382 ActivateIncrementalWriteBarrier(heap_->
cell_space());
383 ActivateIncrementalWriteBarrier(heap_->
map_space());
384 ActivateIncrementalWriteBarrier(heap_->
code_space());
385 ActivateIncrementalWriteBarrier(heap_->
new_space());
388 while (lop->is_valid()) {
390 lop = lop->next_page();
397 static const intptr_t kActivationThreshold = 8 *
MB;
401 static const intptr_t kActivationThreshold = 0;
404 return !FLAG_expose_gc &&
405 FLAG_incremental_marking &&
427 static void PatchIncrementalMarkingRecordWriteStubs(
432 for (
int i = 0; i < capacity; i++) {
434 if (stubs->
IsKey(k)) {
437 if (CodeStub::MajorKeyFromKey(key) ==
438 CodeStub::RecordWrite) {
449 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
450 if (marking_deque_memory_ ==
NULL) {
451 marking_deque_memory_ =
new VirtualMemory(4 *
MB);
453 if (!marking_deque_memory_committed_) {
454 bool success = marking_deque_memory_->
Commit(
455 reinterpret_cast<Address>(marking_deque_memory_->
address()),
456 marking_deque_memory_->
size(),
459 marking_deque_memory_committed_ =
true;
464 if (state_ ==
STOPPED && marking_deque_memory_committed_) {
465 bool success = marking_deque_memory_->
Uncommit(
466 reinterpret_cast<Address>(marking_deque_memory_->
address()),
467 marking_deque_memory_->
size());
469 marking_deque_memory_committed_ =
false;
475 if (FLAG_trace_incremental_marking) {
476 PrintF(
"[IncrementalMarking] Start\n");
478 ASSERT(FLAG_incremental_marking);
485 StartMarking(ALLOW_COMPACTION);
487 if (FLAG_trace_incremental_marking) {
488 PrintF(
"[IncrementalMarking] Start sweeping.\n");
497 static void MarkObjectGreyDoNotEnqueue(
Object* obj) {
498 if (obj->IsHeapObject()) {
510 void IncrementalMarking::StartMarking(CompactionFlag
flag) {
511 if (FLAG_trace_incremental_marking) {
512 PrintF(
"[IncrementalMarking] Start marking\n");
515 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
524 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
526 EnsureMarkingDequeIsCommitted();
530 size_t size = marking_deque_memory_->
size();
531 if (FLAG_force_marking_deque_overflows) size = 64 *
kPointerSize;
534 ActivateIncrementalWriteBarrier();
538 if (FLAG_verify_heap) {
546 if (FLAG_cleanup_code_caches_at_gc) {
549 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
553 IncrementalMarkingRootMarkingVisitor visitor(heap_,
this);
557 if (FLAG_trace_incremental_marking) {
558 PrintF(
"[IncrementalMarking] Running\n");
564 if (!IsMarking())
return;
567 while (it.has_next()) {
574 if (!IsMarking())
return;
576 int current = marking_deque_.
bottom();
577 int mask = marking_deque_.
mask();
578 int limit = marking_deque_.
top();
580 int new_top = current;
582 Map* filler_map = heap_->one_pointer_filler_map();
584 while (current != limit) {
586 ASSERT(obj->IsHeapObject());
587 current = ((current + 1) & mask);
590 if (map_word.IsForwardingAddress()) {
591 HeapObject* dest = map_word.ToForwardingAddress();
592 array[new_top] = dest;
593 new_top = ((new_top + 1) & mask);
601 }
else if (obj->
map() != filler_map) {
604 array[new_top] = obj;
605 new_top = ((new_top + 1) & mask);
614 marking_deque_.
set_top(new_top);
616 steps_took_since_last_gc_ = 0;
617 steps_count_since_last_gc_ = 0;
625 if (FLAG_trace_incremental_marking) {
626 PrintF(
"[IncrementalMarking] Hurry\n");
631 Map* filler_map = heap_->one_pointer_filler_map();
632 Map* native_context_map = heap_->native_context_map();
633 while (!marking_deque_.
IsEmpty()) {
639 if (map == filler_map) {
641 }
else if (map == native_context_map) {
658 if (FLAG_trace_incremental_marking) {
660 PrintF(
"[IncrementalMarking] Complete (hurry), spent %d ms.\n",
661 static_cast<int>(end - start));
665 if (FLAG_cleanup_code_caches_at_gc) {
673 while (!context->IsUndefined()) {
678 if (!cache->IsUndefined()) {
692 if (FLAG_trace_incremental_marking) {
693 PrintF(
"[IncrementalMarking] Aborting.\n");
699 PatchIncrementalMarkingRecordWriteStubs(heap_,
701 DeactivateIncrementalWriteBarrier();
703 if (is_compacting_) {
715 is_compacting_ =
false;
722 is_compacting_ =
false;
726 PatchIncrementalMarkingRecordWriteStubs(heap_,
728 DeactivateIncrementalWriteBarrier();
742 if (FLAG_trace_incremental_marking) {
743 PrintF(
"[IncrementalMarking] Complete (normal).\n");
754 !FLAG_incremental_marking ||
755 !FLAG_incremental_marking_steps ||
760 allocated_ += allocated_bytes;
763 write_barriers_invoked_since_last_step_ <
768 if (state_ ==
MARKING && no_marking_scope_depth_ > 0)
return;
776 intptr_t bytes_to_process =
779 write_barriers_invoked_since_last_step_ = 0;
781 bytes_scanned_ += bytes_to_process;
785 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
792 StartMarking(PREVENT_COMPACTION);
794 }
else if (state_ ==
MARKING) {
795 Map* filler_map = heap_->one_pointer_filler_map();
796 Map* native_context_map = heap_->native_context_map();
797 while (!marking_deque_.
IsEmpty() && bytes_to_process > 0) {
803 if (map == filler_map)
continue;
806 bytes_to_process -= size;
813 if (map == native_context_map) {
819 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
836 steps_count_since_last_gc_++;
838 bool speed_up =
false;
842 PrintPID(
"Speed up marking after %d steps\n",
843 static_cast<int>(kMarkingSpeedAccellerationInterval));
848 bool space_left_is_very_small =
849 (old_generation_space_available_at_start_of_incremental_ < 10 *
MB);
851 bool only_1_nth_of_space_that_was_available_still_left =
852 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
853 old_generation_space_available_at_start_of_incremental_);
855 if (space_left_is_very_small ||
856 only_1_nth_of_space_that_was_available_still_left) {
857 if (FLAG_trace_gc)
PrintPID(
"Speed up marking because of low space left\n");
861 bool size_of_old_space_multiplied_by_n_during_marking =
863 (marking_speed_ + 1) *
864 old_generation_space_used_at_start_of_incremental_);
865 if (size_of_old_space_multiplied_by_n_during_marking) {
868 PrintPID(
"Speed up marking because of heap size increase\n");
873 - old_generation_space_used_at_start_of_incremental_;
874 intptr_t delay = marking_speed_ *
MB;
878 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
880 PrintPID(
"Speed up marking because marker was not keeping up\n");
888 PrintPID(
"Postponing speeding up marking until marking starts\n");
892 marking_speed_ =
static_cast<int>(
894 static_cast<intptr_t>(marking_speed_ * 1.3)));
896 PrintPID(
"Marking speed increased to %d\n", marking_speed_);
901 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
903 double delta = (end - start);
904 longest_step_ = Max(longest_step_, delta);
905 steps_took_ += delta;
906 steps_took_since_last_gc_ += delta;
911 void IncrementalMarking::ResetStepCounters() {
915 old_generation_space_available_at_start_of_incremental_ =
916 SpaceLeftInOldSpace();
917 old_generation_space_used_at_start_of_incremental_ =
919 steps_count_since_last_gc_ = 0;
920 steps_took_since_last_gc_ = 0;
921 bytes_rescanned_ = 0;
924 write_barriers_invoked_since_last_step_ = 0;
928 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
static bool IsBlack(MarkBit mark_bit)
void VisitPointer(Object **p)
Object * KeyAt(int entry)
#define SLOW_ASSERT(condition)
static Mode GetMode(Code *stub)
static VisitorDispatchTable< Callback > table_
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
void PrintF(const char *format,...)
bool InNewSpace(Object *object)
static const intptr_t kWriteBarriersInvokedThreshold
static void VisitJSRegExp(Map *map, HeapObject *object)
uint32_t NumberToUint32(Object *number)
void CompletelyClearInstanceofCache()
static MemoryChunk * FromAddress(Address a)
static HeapObject * cast(Object *obj)
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
static void RecordWriteForEvacuationFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static void VisitJSWeakMap(Map *map, HeapObject *object)
INLINE(static void VisitPointer(Heap *heap, Object **p))
static const int kWriteBarrierCounterGranularity
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
#define ASSERT(condition)
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static void AnyToGrey(MarkBit markbit)
static bool IsGrey(MarkBit mark_bit)
static Context * cast(Object *context)
static bool IsWhite(MarkBit mark_bit)
bool NonFailureIsHeapObject()
int SizeFromMap(Map *map)
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
static void RecordWriteFromCode(HeapObject *obj, Object *value, Isolate *isolate)
static const int kPageSize
void ActivateGeneratedStub(Code *stub)
bool IsSweepingComplete()
static Code * cast(Object *obj)
static const intptr_t kMarkingSpeedAccellerationInterval
static const intptr_t kMaxMarkingSpeed
static Object ** RawField(HeapObject *obj, int offset)
static void BeforeVisitingSharedFunctionInfo(HeapObject *object)
static MarkBit MarkBitFrom(Address addr)
StackGuard * stack_guard()
Object * ValueAt(int entry)
void VisitPointers(Object **start, Object **end)
intptr_t MaxOldGenerationSize()
void MarkingComplete(CompletionAction action)
void LowerInlineAllocationLimit(intptr_t step)
void Initialize(Address low, Address high)
void MarkCompactPrologue()
bool Commit(void *address, size_t size, bool is_executable)
IncrementalMarking(Heap *heap)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
static void IterateBody(Map *map, HeapObject *obj)
OldSpace * old_pointer_space()
void UncommitMarkingDeque()
static const int kPropertiesOffset
static double TimeCurrentMillis()
static void MarkBlack(MarkBit mark_bit)
static void GreyToBlack(MarkBit markbit)
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
LargeObjectSpace * lo_space()
activate correct semantics for inheriting readonliness false
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void UpdateMarkingDequeAfterScavenge()
void SetOldSpacePageFlags(MemoryChunk *chunk)
InnerPointerToCodeCache * inner_pointer_to_code_cache()
INLINE(static void MarkObject(Heap *heap, Object *obj))
void set_write_barrier_counter(int counter)
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Continue(InterruptFlag after_what)
void set_should_hurry(bool val)
Object * native_contexts_list()
IncrementalMarking * incremental_marking()
void PrintPID(const char *format,...)
intptr_t PromotedTotalSize()
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
IncrementalMarkingRootMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
intptr_t PromotedSpaceSizeOfObjects()
bool MarkBlackOrKeepGrey(MarkBit mark_bit)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
bool AdvanceSweepers(int step_size)
void RestartIfNotMarking()
int write_barrier_counter()
static const intptr_t kAllocatedThreshold
static void VisitNativeContext(Map *map, HeapObject *object)
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
static const intptr_t kInitialMarkingSpeed
OldSpace * old_data_space()
MarkCompactCollector * mark_compact_collector()
AllocationSpace identity()
bool Uncommit(void *address, size_t size)
void PrepareForScavenge()
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag