43 marking_deque_memory_(
NULL),
44 marking_deque_memory_committed_(
false),
45 marker_(this, heap->mark_compact_collector()),
49 old_generation_space_available_at_start_of_incremental_(0),
50 old_generation_space_used_at_start_of_incremental_(0),
51 steps_count_since_last_gc_(0),
52 steps_took_since_last_gc_(0),
54 allocation_marking_factor_(0),
56 no_marking_scope_depth_(0) {
61 delete marking_deque_memory_;
68 if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot !=
NULL) {
82 ASSERT(obj->IsHeapObject());
85 ASSERT(value->IsHeapObject());
86 ASSERT(!value->IsHeapNumber());
87 ASSERT(!value->IsString() ||
88 value->IsConsString() ||
89 value->IsSlicedString());
93 ASSERT(!marking->is_compacting_);
94 marking->RecordWrite(obj,
NULL, value);
102 ASSERT(marking->is_compacting_);
103 marking->RecordWrite(obj, slot, *slot);
111 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
112 RecordWriteIntoCode(host, &rinfo, value);
120 GcSafeFindCodeForInnerPointer(pc);
121 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
122 RecordWriteIntoCode(host, &rinfo, value);
130 if (BaseRecordWrite(host, slot, value) && is_compacting_) {
133 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
152 if (is_compacting_) {
168 incremental_marking_(incremental_marking) {
172 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
173 Object* target = rinfo->target_object();
174 if (target->NonFailureIsHeapObject()) {
181 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
193 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
194 rinfo->IsPatchedReturnSequence()) ||
195 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
196 rinfo->IsPatchedDebugBreakSlotSequence()));
205 RecordCodeEntrySlot(entry_address,
Code::cast(target));
224 for (
Object** p = start; p < end; p++) {
235 INLINE(
void MarkObject(
Object* obj)) {
241 heap_object->
Size());
249 IncrementalMarking* incremental_marking_;
258 incremental_marking_(incremental_marking) {
262 MarkObjectByPointer(p);
266 for (
Object** p = start; p < end; p++) MarkObjectByPointer(p);
270 void MarkObjectByPointer(
Object** p) {
272 if (!obj->IsHeapObject())
return;
279 heap_object->
Size());
289 IncrementalMarking* incremental_marking_;
295 bool is_compacting) {
301 if (chunk->owner()->identity() ==
LO_SPACE &&
306 }
else if (chunk->owner()->identity() ==
CELL_SPACE ||
307 chunk->scan_on_scavenge()) {
329 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
331 PageIterator it(space);
332 while (it.has_next()) {
339 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
341 NewSpacePageIterator it(space);
342 while (it.has_next()) {
343 NewSpacePage* p = it.next();
349 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
351 DeactivateIncrementalWriteBarrierForSpace(heap_->
old_data_space());
352 DeactivateIncrementalWriteBarrierForSpace(heap_->
cell_space());
353 DeactivateIncrementalWriteBarrierForSpace(heap_->
map_space());
354 DeactivateIncrementalWriteBarrierForSpace(heap_->
code_space());
355 DeactivateIncrementalWriteBarrierForSpace(heap_->
new_space());
358 while (lop->is_valid()) {
360 lop = lop->next_page();
365 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
366 PageIterator it(space);
367 while (it.has_next()) {
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
375 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
376 while (it.has_next()) {
377 NewSpacePage* p = it.next();
383 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
386 ActivateIncrementalWriteBarrier(heap_->
cell_space());
387 ActivateIncrementalWriteBarrier(heap_->
map_space());
388 ActivateIncrementalWriteBarrier(heap_->
code_space());
389 ActivateIncrementalWriteBarrier(heap_->
new_space());
392 while (lop->is_valid()) {
394 lop = lop->next_page();
401 static const intptr_t kActivationThreshold = 8 *
MB;
405 static const intptr_t kActivationThreshold = 0;
408 return !FLAG_expose_gc &&
409 FLAG_incremental_marking &&
431 static void PatchIncrementalMarkingRecordWriteStubs(
436 for (
int i = 0; i < capacity; i++) {
438 if (stubs->
IsKey(k)) {
441 if (CodeStub::MajorKeyFromKey(key) ==
442 CodeStub::RecordWrite) {
453 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
454 if (marking_deque_memory_ ==
NULL) {
455 marking_deque_memory_ =
new VirtualMemory(4 *
MB);
457 if (!marking_deque_memory_committed_) {
458 bool success = marking_deque_memory_->
Commit(
459 reinterpret_cast<Address>(marking_deque_memory_->
address()),
460 marking_deque_memory_->
size(),
463 marking_deque_memory_committed_ =
true;
468 if (state_ ==
STOPPED && marking_deque_memory_committed_) {
469 bool success = marking_deque_memory_->
Uncommit(
470 reinterpret_cast<Address>(marking_deque_memory_->
address()),
471 marking_deque_memory_->
size());
473 marking_deque_memory_committed_ =
false;
479 if (FLAG_trace_incremental_marking) {
480 PrintF(
"[IncrementalMarking] Start\n");
482 ASSERT(FLAG_incremental_marking);
489 StartMarking(ALLOW_COMPACTION);
491 if (FLAG_trace_incremental_marking) {
492 PrintF(
"[IncrementalMarking] Start sweeping.\n");
501 static void MarkObjectGreyDoNotEnqueue(
Object* obj) {
502 if (obj->IsHeapObject()) {
514 void IncrementalMarking::StartMarking(CompactionFlag
flag) {
515 if (FLAG_trace_incremental_marking) {
516 PrintF(
"[IncrementalMarking] Start marking\n");
519 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
528 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
530 EnsureMarkingDequeIsCommitted();
534 size_t size = marking_deque_memory_->
size();
535 if (FLAG_force_marking_deque_overflows) size = 64 *
kPointerSize;
538 ActivateIncrementalWriteBarrier();
542 if (FLAG_verify_heap) {
550 if (FLAG_cleanup_code_caches_at_gc) {
553 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
557 IncrementalMarkingRootMarkingVisitor visitor(heap_,
this);
561 if (FLAG_trace_incremental_marking) {
562 PrintF(
"[IncrementalMarking] Running\n");
568 if (!IsMarking())
return;
571 while (it.has_next()) {
578 if (!IsMarking())
return;
580 int current = marking_deque_.
bottom();
581 int mask = marking_deque_.
mask();
582 int limit = marking_deque_.
top();
584 int new_top = current;
586 Map* filler_map = heap_->one_pointer_filler_map();
588 while (current != limit) {
590 ASSERT(obj->IsHeapObject());
591 current = ((current + 1) & mask);
594 if (map_word.IsForwardingAddress()) {
595 HeapObject* dest = map_word.ToForwardingAddress();
596 array[new_top] = dest;
597 new_top = ((new_top + 1) & mask);
605 }
else if (obj->
map() != filler_map) {
608 array[new_top] = obj;
609 new_top = ((new_top + 1) & mask);
618 marking_deque_.
set_top(new_top);
620 steps_took_since_last_gc_ = 0;
621 steps_count_since_last_gc_ = 0;
626 void IncrementalMarking::VisitGlobalContext(
Context* ctx, ObjectVisitor* v) {
639 collector->RecordSlot(slot, slot, *slot);
647 if (FLAG_trace_incremental_marking) {
648 PrintF(
"[IncrementalMarking] Hurry\n");
653 Map* filler_map = heap_->one_pointer_filler_map();
654 Map* global_context_map = heap_->global_context_map();
656 while (!marking_deque_.
IsEmpty()) {
662 if (map == filler_map) {
664 }
else if (map == global_context_map) {
675 if (FLAG_collect_maps &&
677 marker_.MarkMapContents(map);
684 obj->
Iterate(&marking_visitor);
693 if (FLAG_trace_incremental_marking) {
695 PrintF(
"[IncrementalMarking] Complete (hurry), spent %d ms.\n",
696 static_cast<int>(end - start));
700 if (FLAG_cleanup_code_caches_at_gc) {
708 while (!context->IsUndefined()) {
713 if (!cache->IsUndefined()) {
727 if (FLAG_trace_incremental_marking) {
728 PrintF(
"[IncrementalMarking] Aborting.\n");
734 PatchIncrementalMarkingRecordWriteStubs(heap_,
736 DeactivateIncrementalWriteBarrier();
738 if (is_compacting_) {
750 is_compacting_ =
false;
757 is_compacting_ =
false;
761 PatchIncrementalMarkingRecordWriteStubs(heap_,
763 DeactivateIncrementalWriteBarrier();
777 if (FLAG_trace_incremental_marking) {
778 PrintF(
"[IncrementalMarking] Complete (normal).\n");
789 !FLAG_incremental_marking ||
790 !FLAG_incremental_marking_steps ||
795 allocated_ += allocated_bytes;
799 if (state_ ==
MARKING && no_marking_scope_depth_ > 0)
return;
801 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
802 bytes_scanned_ += bytes_to_process;
806 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
813 StartMarking(PREVENT_COMPACTION);
815 }
else if (state_ ==
MARKING) {
816 Map* filler_map = heap_->one_pointer_filler_map();
817 Map* global_context_map = heap_->global_context_map();
819 while (!marking_deque_.
IsEmpty() && bytes_to_process > 0) {
825 if (map == filler_map)
continue;
828 bytes_to_process -= size;
835 if (map == global_context_map) {
841 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
843 VisitGlobalContext(ctx, &marking_visitor);
852 if (FLAG_collect_maps &&
854 marker_.MarkMapContents(map);
889 steps_count_since_last_gc_++;
891 bool speed_up =
false;
895 PrintF(
"Speed up marking after %d steps\n",
896 static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
901 bool space_left_is_very_small =
902 (old_generation_space_available_at_start_of_incremental_ < 10 *
MB);
904 bool only_1_nth_of_space_that_was_available_still_left =
905 (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
906 old_generation_space_available_at_start_of_incremental_);
908 if (space_left_is_very_small ||
909 only_1_nth_of_space_that_was_available_still_left) {
910 if (FLAG_trace_gc)
PrintF(
"Speed up marking because of low space left\n");
914 bool size_of_old_space_multiplied_by_n_during_marking =
916 (allocation_marking_factor_ + 1) *
917 old_generation_space_used_at_start_of_incremental_);
918 if (size_of_old_space_multiplied_by_n_during_marking) {
921 PrintF(
"Speed up marking because of heap size increase\n");
926 - old_generation_space_used_at_start_of_incremental_;
927 intptr_t delay = allocation_marking_factor_ *
MB;
931 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
933 PrintF(
"Speed up marking because marker was not keeping up\n");
941 PrintF(
"Postponing speeding up marking until marking starts\n");
945 allocation_marking_factor_ =
static_cast<int>(
947 static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
949 PrintF(
"Marking speed increased to %d\n", allocation_marking_factor_);
954 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
956 double delta = (end - start);
957 longest_step_ =
Max(longest_step_, delta);
958 steps_took_ += delta;
959 steps_took_since_last_gc_ += delta;
964 void IncrementalMarking::ResetStepCounters() {
968 old_generation_space_available_at_start_of_incremental_ =
969 SpaceLeftInOldSpace();
970 old_generation_space_used_at_start_of_incremental_ =
972 steps_count_since_last_gc_ = 0;
973 steps_took_since_last_gc_ = 0;
974 bytes_rescanned_ = 0;
980 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
static bool IsBlack(MarkBit mark_bit)
void VisitPointer(Object **p)
static const int kPointerFieldsEndOffset
Object * KeyAt(int entry)
#define SLOW_ASSERT(condition)
static const int kCodeEntryOffset
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
void PrintF(const char *format,...)
bool InNewSpace(Object *object)
void VisitPointers(Object **start, Object **end)
uint32_t NumberToUint32(Object *number)
void VisitCodeTarget(RelocInfo *rinfo)
static Object * GetObjectFromEntryAddress(Address location_of_address)
void CompletelyClearInstanceofCache()
static MemoryChunk * FromAddress(Address a)
static HeapObject * cast(Object *obj)
static Map * cast(Object *obj)
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
static void RecordWriteForEvacuationFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static const int kStartOffset
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
#define ASSERT(condition)
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static void AnyToGrey(MarkBit markbit)
static bool IsGrey(MarkBit mark_bit)
static Context * cast(Object *context)
static bool IsWhite(MarkBit mark_bit)
bool NonFailureIsHeapObject()
int SizeFromMap(Map *map)
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
static void RecordWriteFromCode(HeapObject *obj, Object *value, Isolate *isolate)
static const int kPageSize
void ActivateGeneratedStub(Code *stub)
bool IsSweepingComplete()
static Code * cast(Object *obj)
static Object ** RawField(HeapObject *obj, int offset)
static MarkBit MarkBitFrom(Address addr)
StackGuard * stack_guard()
void IterateBody(InstanceType type, int object_size, ObjectVisitor *v)
Object * ValueAt(int entry)
void VisitPointers(Object **start, Object **end)
void VisitEmbeddedPointer(RelocInfo *rinfo)
intptr_t MaxOldGenerationSize()
void Iterate(ObjectVisitor *v)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
void MarkingComplete(CompletionAction action)
void ClearCacheOnMap(Map *map)
IncrementalMarkingMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
void LowerInlineAllocationLimit(intptr_t step)
void ResetForNewContext(int new_ic_age)
void Initialize(Address low, Address high)
void MarkCompactPrologue()
bool Commit(void *address, size_t size, bool is_executable)
IncrementalMarking(Heap *heap)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
OldSpace * old_pointer_space()
void UncommitMarkingDeque()
static const int kPropertiesOffset
static double TimeCurrentMillis()
static void MarkBlack(MarkBit mark_bit)
static Code * GetCodeFromTargetAddress(Address address)
bool is_inline_cache_stub()
static void GreyToBlack(MarkBit markbit)
static const intptr_t kInitialAllocationMarkingFactor
LargeObjectSpace * lo_space()
static const int kNonWeakFieldsEndOffset
static int OffsetOfElementAt(int index)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void UpdateMarkingDequeAfterScavenge()
void SetOldSpacePageFlags(MemoryChunk *chunk)
InnerPointerToCodeCache * inner_pointer_to_code_cache()
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
static const intptr_t kAllocationMarkingFactorSpeedupInterval
void Continue(InterruptFlag after_what)
void set_should_hurry(bool val)
static const intptr_t kAllocationMarkingFactorSpeedup
IncrementalMarking * incremental_marking()
static const int kEndOffset
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
void VisitSharedFunctionInfo(SharedFunctionInfo *shared)
Object * global_contexts_list()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
intptr_t PromotedTotalSize()
InstanceType instance_type()
void VisitDebugTarget(RelocInfo *rinfo)
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
IncrementalMarkingRootMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
intptr_t PromotedSpaceSizeOfObjects()
bool MarkBlackOrKeepGrey(MarkBit mark_bit)
bool AdvanceSweepers(int step_size)
void RestartIfNotMarking()
static const intptr_t kAllocatedThreshold
void VisitPointer(Object **p)
OldSpace * old_data_space()
static void Clear(Address address)
MarkCompactCollector * mark_compact_collector()
static const intptr_t kMaxAllocationMarkingFactor
bool Uncommit(void *address, size_t size)
void VisitCodeEntry(Address entry_address)
void PrepareForScavenge()
static const int kPointerFieldsBeginOffset
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)