v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
incremental-marking.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "incremental-marking.h"
31 
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "objects-visiting.h"
35 #include "objects-visiting-inl.h"
36 #include "v8conversions.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 
43  : heap_(heap),
44  state_(STOPPED),
45  marking_deque_memory_(NULL),
46  marking_deque_memory_committed_(false),
47  steps_count_(0),
48  steps_took_(0),
49  longest_step_(0.0),
50  old_generation_space_available_at_start_of_incremental_(0),
51  old_generation_space_used_at_start_of_incremental_(0),
52  steps_count_since_last_gc_(0),
53  steps_took_since_last_gc_(0),
54  should_hurry_(false),
55  marking_speed_(0),
56  allocated_(0),
57  no_marking_scope_depth_(0),
58  unscanned_bytes_of_large_object_(0) {
59 }
60 
61 
63  delete marking_deque_memory_;
64 }
65 
66 
68  Object** slot,
69  Object* value) {
70  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
71  MarkBit obj_bit = Marking::MarkBitFrom(obj);
72  if (Marking::IsBlack(obj_bit)) {
73  // Object is not going to be rescanned we need to record the slot.
74  heap_->mark_compact_collector()->RecordSlot(
75  HeapObject::RawField(obj, 0), slot, value);
76  }
77  }
78 }
79 
80 
82  Object** slot,
83  Isolate* isolate) {
84  ASSERT(obj->IsHeapObject());
85  IncrementalMarking* marking = isolate->heap()->incremental_marking();
86 
88  int counter = chunk->write_barrier_counter();
90  marking->write_barriers_invoked_since_last_step_ +=
92  chunk->write_barrier_counter();
95  }
96 
97  marking->RecordWrite(obj, slot, *slot);
98 }
99 
100 
102  Address pc,
103  HeapObject* value) {
104  if (IsMarking()) {
105  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
106  RecordWriteIntoCode(host, &rinfo, value);
107  }
108 }
109 
110 
112  if (IsMarking()) {
113  Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
114  GcSafeFindCodeForInnerPointer(pc);
115  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
116  RecordWriteIntoCode(host, &rinfo, value);
117  }
118 }
119 
120 
122  Object** slot,
123  Code* value) {
124  if (BaseRecordWrite(host, slot, value)) {
125  ASSERT(slot != NULL);
126  heap_->mark_compact_collector()->
127  RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
128  }
129 }
130 
131 
133  RelocInfo* rinfo,
134  Object* value) {
135  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
136  if (Marking::IsWhite(value_bit)) {
137  MarkBit obj_bit = Marking::MarkBitFrom(obj);
138  if (Marking::IsBlack(obj_bit)) {
139  BlackToGreyAndUnshift(obj, obj_bit);
141  }
142  // Object is either grey or white. It will be scanned if survives.
143  return;
144  }
145 
146  if (is_compacting_) {
147  MarkBit obj_bit = Marking::MarkBitFrom(obj);
148  if (Marking::IsBlack(obj_bit)) {
149  // Object is not going to be rescanned. We need to record the slot.
150  heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
151  Code::cast(value));
152  }
153  }
154 }
155 
156 
157 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
158  if (obj->IsHeapObject()) {
159  HeapObject* heap_obj = HeapObject::cast(obj);
160  MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
161  if (Marking::IsBlack(mark_bit)) {
163  -heap_obj->Size());
164  }
165  Marking::AnyToGrey(mark_bit);
166  }
167 }
168 
169 
170 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
171  MarkBit mark_bit,
172  int size) {
173  ASSERT(!Marking::IsImpossible(mark_bit));
174  if (mark_bit.Get()) return;
175  mark_bit.Set();
176  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
177  ASSERT(Marking::IsBlack(mark_bit));
178 }
179 
180 
181 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
182  MarkBit mark_bit,
183  int size) {
184  ASSERT(!Marking::IsImpossible(mark_bit));
185  if (Marking::IsBlack(mark_bit)) return;
186  Marking::MarkBlack(mark_bit);
187  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
188  ASSERT(Marking::IsBlack(mark_bit));
189 }
190 
191 
193  : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
194  public:
195  static void Initialize() {
197  table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
198  table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
199  table_.Register(kVisitJSRegExp, &VisitJSRegExp);
200  }
201 
202  static const int kProgressBarScanningChunk = 32 * 1024;
203 
204  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
205  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
206  // TODO(mstarzinger): Move setting of the flag to the allocation site of
207  // the array. The visitor should just check the flag.
208  if (FLAG_use_marking_progress_bar &&
209  chunk->owner()->identity() == LO_SPACE) {
211  }
213  Heap* heap = map->GetHeap();
214  // When using a progress bar for large fixed arrays, scan only a chunk of
215  // the array and try to push it onto the marking deque again until it is
216  // fully scanned. Fall back to scanning it through to the end in case this
217  // fails because of a full deque.
218  int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
220  chunk->progress_bar());
221  int end_offset = Min(object_size,
222  start_offset + kProgressBarScanningChunk);
223  int already_scanned_offset = start_offset;
224  bool scan_until_end = false;
225  do {
226  VisitPointersWithAnchor(heap,
227  HeapObject::RawField(object, 0),
228  HeapObject::RawField(object, start_offset),
229  HeapObject::RawField(object, end_offset));
230  start_offset = end_offset;
231  end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
232  scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
233  } while (scan_until_end && start_offset < object_size);
234  chunk->set_progress_bar(start_offset);
235  if (start_offset < object_size) {
236  heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
238  object_size - (start_offset - already_scanned_offset));
239  }
240  } else {
241  FixedArrayVisitor::Visit(map, object);
242  }
243  }
244 
246  Context* context = Context::cast(object);
247 
248  // We will mark cache black with a separate pass
249  // when we finish marking.
250  MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
251  VisitNativeContext(map, context);
252  }
253 
254  static void VisitWeakCollection(Map* map, HeapObject* object) {
255  Heap* heap = map->GetHeap();
256  VisitPointers(heap,
257  HeapObject::RawField(object,
260  }
261 
263 
264  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
265  Object* obj = *p;
266  if (obj->NonFailureIsHeapObject()) {
267  heap->mark_compact_collector()->RecordSlot(p, p, obj);
268  MarkObject(heap, obj);
269  }
270  }
271 
272  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
273  for (Object** p = start; p < end; p++) {
274  Object* obj = *p;
275  if (obj->NonFailureIsHeapObject()) {
276  heap->mark_compact_collector()->RecordSlot(start, p, obj);
277  MarkObject(heap, obj);
278  }
279  }
280  }
281 
282  INLINE(static void VisitPointersWithAnchor(Heap* heap,
283  Object** anchor,
284  Object** start,
285  Object** end)) {
286  for (Object** p = start; p < end; p++) {
287  Object* obj = *p;
288  if (obj->NonFailureIsHeapObject()) {
289  heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
290  MarkObject(heap, obj);
291  }
292  }
293  }
294 
295  // Marks the object grey and pushes it on the marking stack.
296  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
297  HeapObject* heap_object = HeapObject::cast(obj);
298  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
299  if (mark_bit.data_only()) {
300  MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
301  } else if (Marking::IsWhite(mark_bit)) {
302  heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
303  }
304  }
305 
306  // Marks the object black without pushing it on the marking stack.
307  // Returns true if object needed marking and false otherwise.
308  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
309  HeapObject* heap_object = HeapObject::cast(obj);
310  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
311  if (Marking::IsWhite(mark_bit)) {
312  mark_bit.Set();
314  heap_object->Size());
315  return true;
316  }
317  return false;
318  }
319 };
320 
321 
322 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
323  public:
325  IncrementalMarking* incremental_marking)
326  : incremental_marking_(incremental_marking) {
327  }
328 
329  void VisitPointer(Object** p) {
330  MarkObjectByPointer(p);
331  }
332 
333  void VisitPointers(Object** start, Object** end) {
334  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
335  }
336 
337  private:
338  void MarkObjectByPointer(Object** p) {
339  Object* obj = *p;
340  if (!obj->IsHeapObject()) return;
341 
342  HeapObject* heap_object = HeapObject::cast(obj);
343  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
344  if (mark_bit.data_only()) {
345  MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
346  } else {
347  if (Marking::IsWhite(mark_bit)) {
348  incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
349  }
350  }
351  }
352 
353  IncrementalMarking* incremental_marking_;
354 };
355 
356 
359 }
360 
361 
363  bool is_marking,
364  bool is_compacting) {
365  if (is_marking) {
368 
369  // It's difficult to filter out slots recorded for large objects.
370  if (chunk->owner()->identity() == LO_SPACE &&
371  chunk->size() > static_cast<size_t>(Page::kPageSize) &&
372  is_compacting) {
374  }
375  } else if (chunk->owner()->identity() == CELL_SPACE ||
376  chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
377  chunk->scan_on_scavenge()) {
380  } else {
383  }
384 }
385 
386 
387 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
388  bool is_marking) {
390  if (is_marking) {
392  } else {
394  }
395  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
396 }
397 
398 
399 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
400  PagedSpace* space) {
401  PageIterator it(space);
402  while (it.has_next()) {
403  Page* p = it.next();
404  SetOldSpacePageFlags(p, false, false);
405  }
406 }
407 
408 
409 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
410  NewSpace* space) {
411  NewSpacePageIterator it(space);
412  while (it.has_next()) {
413  NewSpacePage* p = it.next();
414  SetNewSpacePageFlags(p, false);
415  }
416 }
417 
418 
419 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
420  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
421  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
422  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
423  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
424  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
425  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
426  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
427 
428  LargePage* lop = heap_->lo_space()->first_page();
429  while (lop->is_valid()) {
430  SetOldSpacePageFlags(lop, false, false);
431  lop = lop->next_page();
432  }
433 }
434 
435 
436 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
437  PageIterator it(space);
438  while (it.has_next()) {
439  Page* p = it.next();
440  SetOldSpacePageFlags(p, true, is_compacting_);
441  }
442 }
443 
444 
445 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
446  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
447  while (it.has_next()) {
448  NewSpacePage* p = it.next();
449  SetNewSpacePageFlags(p, true);
450  }
451 }
452 
453 
454 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
455  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
456  ActivateIncrementalWriteBarrier(heap_->old_data_space());
457  ActivateIncrementalWriteBarrier(heap_->cell_space());
458  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
459  ActivateIncrementalWriteBarrier(heap_->map_space());
460  ActivateIncrementalWriteBarrier(heap_->code_space());
461  ActivateIncrementalWriteBarrier(heap_->new_space());
462 
463  LargePage* lop = heap_->lo_space()->first_page();
464  while (lop->is_valid()) {
465  SetOldSpacePageFlags(lop, true, is_compacting_);
466  lop = lop->next_page();
467  }
468 }
469 
470 
472 #ifndef DEBUG
473  static const intptr_t kActivationThreshold = 8 * MB;
474 #else
475  // TODO(gc) consider setting this to some low level so that some
476  // debug tests run with incremental marking and some without.
477  static const intptr_t kActivationThreshold = 0;
478 #endif
479  // Only start incremental marking in a safe state: 1) when incremental
480  // marking is turned on, 2) when we are currently not in a GC, and
481  // 3) when we are currently not serializing or deserializing the heap.
482  return FLAG_incremental_marking &&
483  FLAG_incremental_marking_steps &&
484  heap_->gc_state() == Heap::NOT_IN_GC &&
485  !Serializer::enabled() &&
486  heap_->isolate()->IsInitialized() &&
487  heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
488 }
489 
490 
494 
495  if (!IsMarking()) {
496  // Initially stub is generated in STORE_BUFFER_ONLY mode thus
497  // we don't need to do anything if incremental marking is
498  // not active.
499  } else if (IsCompacting()) {
501  } else {
503  }
504 }
505 
506 
507 static void PatchIncrementalMarkingRecordWriteStubs(
509  UnseededNumberDictionary* stubs = heap->code_stubs();
510 
511  int capacity = stubs->Capacity();
512  for (int i = 0; i < capacity; i++) {
513  Object* k = stubs->KeyAt(i);
514  if (stubs->IsKey(k)) {
515  uint32_t key = NumberToUint32(k);
516 
517  if (CodeStub::MajorKeyFromKey(key) ==
518  CodeStub::RecordWrite) {
519  Object* e = stubs->ValueAt(i);
520  if (e->IsCode()) {
522  }
523  }
524  }
525  }
526 }
527 
528 
529 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
530  if (marking_deque_memory_ == NULL) {
531  marking_deque_memory_ = new VirtualMemory(4 * MB);
532  }
533  if (!marking_deque_memory_committed_) {
534  bool success = marking_deque_memory_->Commit(
535  reinterpret_cast<Address>(marking_deque_memory_->address()),
536  marking_deque_memory_->size(),
537  false); // Not executable.
538  CHECK(success);
539  marking_deque_memory_committed_ = true;
540  }
541 }
542 
543 
545  if (state_ == STOPPED && marking_deque_memory_committed_) {
546  bool success = marking_deque_memory_->Uncommit(
547  reinterpret_cast<Address>(marking_deque_memory_->address()),
548  marking_deque_memory_->size());
549  CHECK(success);
550  marking_deque_memory_committed_ = false;
551  }
552 }
553 
554 
556  if (FLAG_trace_incremental_marking) {
557  PrintF("[IncrementalMarking] Start\n");
558  }
559  ASSERT(FLAG_incremental_marking);
560  ASSERT(FLAG_incremental_marking_steps);
561  ASSERT(state_ == STOPPED);
562  ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
564  ASSERT(heap_->isolate()->IsInitialized());
565 
566  ResetStepCounters();
567 
568  if (heap_->IsSweepingComplete()) {
569  StartMarking(flag);
570  } else {
571  if (FLAG_trace_incremental_marking) {
572  PrintF("[IncrementalMarking] Start sweeping.\n");
573  }
574  state_ = SWEEPING;
575  }
576 
578 }
579 
580 
581 void IncrementalMarking::StartMarking(CompactionFlag flag) {
582  if (FLAG_trace_incremental_marking) {
583  PrintF("[IncrementalMarking] Start marking\n");
584  }
585 
586  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
589 
590  state_ = MARKING;
591 
592  RecordWriteStub::Mode mode = is_compacting_ ?
594 
595  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
596 
597  EnsureMarkingDequeIsCommitted();
598 
599  // Initialize marking stack.
600  Address addr = static_cast<Address>(marking_deque_memory_->address());
601  size_t size = marking_deque_memory_->size();
602  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
603  marking_deque_.Initialize(addr, addr + size);
604 
605  ActivateIncrementalWriteBarrier();
606 
607  // Marking bits are cleared by the sweeper.
608 #ifdef VERIFY_HEAP
609  if (FLAG_verify_heap) {
610  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
611  }
612 #endif
613 
616 
617  if (FLAG_cleanup_code_caches_at_gc) {
618  // We will mark cache black with a separate pass
619  // when we finish marking.
620  MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
621  }
622 
623  // Mark strong roots grey.
624  IncrementalMarkingRootMarkingVisitor visitor(this);
625  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
626 
628 
629  // Ready to start incremental marking.
630  if (FLAG_trace_incremental_marking) {
631  PrintF("[IncrementalMarking] Running\n");
632  }
633 }
634 
635 
637  if (!IsMarking()) return;
638  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
639  heap_->new_space()->FromSpaceEnd());
640  while (it.has_next()) {
641  Bitmap::Clear(it.next());
642  }
643 }
644 
645 
647  if (!IsMarking()) return;
648 
649  int current = marking_deque_.bottom();
650  int mask = marking_deque_.mask();
651  int limit = marking_deque_.top();
652  HeapObject** array = marking_deque_.array();
653  int new_top = current;
654 
655  Map* filler_map = heap_->one_pointer_filler_map();
656 
657  while (current != limit) {
658  HeapObject* obj = array[current];
659  ASSERT(obj->IsHeapObject());
660  current = ((current + 1) & mask);
661  if (heap_->InNewSpace(obj)) {
662  MapWord map_word = obj->map_word();
663  if (map_word.IsForwardingAddress()) {
664  HeapObject* dest = map_word.ToForwardingAddress();
665  array[new_top] = dest;
666  new_top = ((new_top + 1) & mask);
667  ASSERT(new_top != marking_deque_.bottom());
668 #ifdef DEBUG
669  MarkBit mark_bit = Marking::MarkBitFrom(obj);
670  ASSERT(Marking::IsGrey(mark_bit) ||
671  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
672 #endif
673  }
674  } else if (obj->map() != filler_map) {
675  // Skip one word filler objects that appear on the
676  // stack when we perform in place array shift.
677  array[new_top] = obj;
678  new_top = ((new_top + 1) & mask);
679  ASSERT(new_top != marking_deque_.bottom());
680 #ifdef DEBUG
681  MarkBit mark_bit = Marking::MarkBitFrom(obj);
683  ASSERT(Marking::IsGrey(mark_bit) ||
684  (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
686  Marking::IsBlack(mark_bit)));
687 #endif
688  }
689  }
690  marking_deque_.set_top(new_top);
691 
692  steps_took_since_last_gc_ = 0;
693  steps_count_since_last_gc_ = 0;
694  longest_step_ = 0.0;
695 }
696 
697 
698 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
699  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
700  if (Marking::IsWhite(map_mark_bit)) {
701  WhiteToGreyAndPush(map, map_mark_bit);
702  }
703 
704  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
705 
706  MarkBit mark_bit = Marking::MarkBitFrom(obj);
707 #if ENABLE_SLOW_ASSERTS
708  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
709  SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
710  (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
711  (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
712  Marking::IsBlack(mark_bit)));
713 #endif
714  MarkBlackOrKeepBlack(obj, mark_bit, size);
715 }
716 
717 
718 void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
719  Map* filler_map = heap_->one_pointer_filler_map();
720  while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
721  HeapObject* obj = marking_deque_.Pop();
722 
723  // Explicitly skip one word fillers. Incremental markbit patterns are
724  // correct only for objects that occupy at least two words.
725  Map* map = obj->map();
726  if (map == filler_map) continue;
727 
728  int size = obj->SizeFromMap(map);
729  unscanned_bytes_of_large_object_ = 0;
730  VisitObject(map, obj, size);
731  bytes_to_process -= (size - unscanned_bytes_of_large_object_);
732  }
733 }
734 
735 
736 void IncrementalMarking::ProcessMarkingDeque() {
737  Map* filler_map = heap_->one_pointer_filler_map();
738  while (!marking_deque_.IsEmpty()) {
739  HeapObject* obj = marking_deque_.Pop();
740 
741  // Explicitly skip one word fillers. Incremental markbit patterns are
742  // correct only for objects that occupy at least two words.
743  Map* map = obj->map();
744  if (map == filler_map) continue;
745 
746  VisitObject(map, obj, obj->SizeFromMap(map));
747  }
748 }
749 
750 
752  if (state() == MARKING) {
753  double start = 0.0;
754  if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
755  start = OS::TimeCurrentMillis();
756  if (FLAG_trace_incremental_marking) {
757  PrintF("[IncrementalMarking] Hurry\n");
758  }
759  }
760  // TODO(gc) hurry can mark objects it encounters black as mutator
761  // was stopped.
762  ProcessMarkingDeque();
763  state_ = COMPLETE;
764  if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
765  double end = OS::TimeCurrentMillis();
766  double delta = end - start;
767  heap_->AddMarkingTime(delta);
768  if (FLAG_trace_incremental_marking) {
769  PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
770  static_cast<int>(delta));
771  }
772  }
773  }
774 
775  if (FLAG_cleanup_code_caches_at_gc) {
776  PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
777  Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
780  }
781 
782  Object* context = heap_->native_contexts_list();
783  while (!context->IsUndefined()) {
784  // GC can happen when the context is not fully initialized,
785  // so the cache can be undefined.
786  HeapObject* cache = HeapObject::cast(
788  if (!cache->IsUndefined()) {
789  MarkBit mark_bit = Marking::MarkBitFrom(cache);
790  if (Marking::IsGrey(mark_bit)) {
791  Marking::GreyToBlack(mark_bit);
793  }
794  }
795  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
796  }
797 }
798 
799 
801  if (IsStopped()) return;
802  if (FLAG_trace_incremental_marking) {
803  PrintF("[IncrementalMarking] Aborting.\n");
804  }
807  ResetStepCounters();
808  if (IsMarking()) {
809  PatchIncrementalMarkingRecordWriteStubs(heap_,
811  DeactivateIncrementalWriteBarrier();
812 
813  if (is_compacting_) {
814  LargeObjectIterator it(heap_->lo_space());
815  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
816  Page* p = Page::FromAddress(obj->address());
819  }
820  }
821  }
822  }
823  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
824  state_ = STOPPED;
825  is_compacting_ = false;
826 }
827 
828 
830  Hurry();
831  state_ = STOPPED;
832  is_compacting_ = false;
835  ResetStepCounters();
836  PatchIncrementalMarkingRecordWriteStubs(heap_,
838  DeactivateIncrementalWriteBarrier();
839  ASSERT(marking_deque_.IsEmpty());
840  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
841 }
842 
843 
845  state_ = COMPLETE;
846  // We will set the stack guard to request a GC now. This will mean the rest
847  // of the GC gets performed as soon as possible (we can't do a GC here in a
848  // record-write context). If a few things get allocated between now and then
849  // that shouldn't make us do a scavenge and keep being incremental, so we set
850  // the should-hurry flag to indicate that there can't be much work left to do.
851  set_should_hurry(true);
852  if (FLAG_trace_incremental_marking) {
853  PrintF("[IncrementalMarking] Complete (normal).\n");
854  }
855  if (action == GC_VIA_STACK_GUARD) {
856  heap_->isolate()->stack_guard()->RequestGC();
857  }
858 }
859 
860 
861 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
862  if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
863  // TODO(hpayer): Let's play safe for now, but compaction should be
864  // in principle possible.
866  } else {
868  }
869 }
870 
871 
872 void IncrementalMarking::Step(intptr_t allocated_bytes,
873  CompletionAction action) {
874  if (heap_->gc_state() != Heap::NOT_IN_GC ||
875  !FLAG_incremental_marking ||
876  !FLAG_incremental_marking_steps ||
877  (state_ != SWEEPING && state_ != MARKING)) {
878  return;
879  }
880 
881  allocated_ += allocated_bytes;
882 
883  if (allocated_ < kAllocatedThreshold &&
884  write_barriers_invoked_since_last_step_ <
886  return;
887  }
888 
889  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
890 
891  // The marking speed is driven either by the allocation rate or by the rate
892  // at which we are having to check the color of objects in the write barrier.
893  // It is possible for a tight non-allocating loop to run a lot of write
894  // barriers before we get here and check them (marking can only take place on
895  // allocation), so to reduce the lumpiness we don't use the write barriers
896  // invoked since last step directly to determine the amount of work to do.
897  intptr_t bytes_to_process =
898  marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
899  allocated_ = 0;
900  write_barriers_invoked_since_last_step_ = 0;
901 
902  bytes_scanned_ += bytes_to_process;
903 
904  double start = 0;
905 
906  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
907  FLAG_print_cumulative_gc_stat) {
908  start = OS::TimeCurrentMillis();
909  }
910 
911  if (state_ == SWEEPING) {
912  if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
913  bytes_scanned_ = 0;
914  StartMarking(PREVENT_COMPACTION);
915  }
916  } else if (state_ == MARKING) {
917  ProcessMarkingDeque(bytes_to_process);
918  if (marking_deque_.IsEmpty()) MarkingComplete(action);
919  }
920 
921  steps_count_++;
922  steps_count_since_last_gc_++;
923 
924  bool speed_up = false;
925 
926  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
927  if (FLAG_trace_gc) {
928  PrintPID("Speed up marking after %d steps\n",
929  static_cast<int>(kMarkingSpeedAccellerationInterval));
930  }
931  speed_up = true;
932  }
933 
934  bool space_left_is_very_small =
935  (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
936 
937  bool only_1_nth_of_space_that_was_available_still_left =
938  (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
939  old_generation_space_available_at_start_of_incremental_);
940 
941  if (space_left_is_very_small ||
942  only_1_nth_of_space_that_was_available_still_left) {
943  if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
944  speed_up = true;
945  }
946 
947  bool size_of_old_space_multiplied_by_n_during_marking =
948  (heap_->PromotedTotalSize() >
949  (marking_speed_ + 1) *
950  old_generation_space_used_at_start_of_incremental_);
951  if (size_of_old_space_multiplied_by_n_during_marking) {
952  speed_up = true;
953  if (FLAG_trace_gc) {
954  PrintPID("Speed up marking because of heap size increase\n");
955  }
956  }
957 
958  int64_t promoted_during_marking = heap_->PromotedTotalSize()
959  - old_generation_space_used_at_start_of_incremental_;
960  intptr_t delay = marking_speed_ * MB;
961  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
962 
963  // We try to scan at at least twice the speed that we are allocating.
964  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
965  if (FLAG_trace_gc) {
966  PrintPID("Speed up marking because marker was not keeping up\n");
967  }
968  speed_up = true;
969  }
970 
971  if (speed_up) {
972  if (state_ != MARKING) {
973  if (FLAG_trace_gc) {
974  PrintPID("Postponing speeding up marking until marking starts\n");
975  }
976  } else {
977  marking_speed_ += kMarkingSpeedAccelleration;
978  marking_speed_ = static_cast<int>(
980  static_cast<intptr_t>(marking_speed_ * 1.3)));
981  if (FLAG_trace_gc) {
982  PrintPID("Marking speed increased to %d\n", marking_speed_);
983  }
984  }
985  }
986 
987  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
988  FLAG_print_cumulative_gc_stat) {
989  double end = OS::TimeCurrentMillis();
990  double delta = (end - start);
991  longest_step_ = Max(longest_step_, delta);
992  steps_took_ += delta;
993  steps_took_since_last_gc_ += delta;
994  heap_->AddMarkingTime(delta);
995  }
996 }
997 
998 
999 void IncrementalMarking::ResetStepCounters() {
1000  steps_count_ = 0;
1001  steps_took_ = 0;
1002  longest_step_ = 0.0;
1003  old_generation_space_available_at_start_of_incremental_ =
1004  SpaceLeftInOldSpace();
1005  old_generation_space_used_at_start_of_incremental_ =
1006  heap_->PromotedTotalSize();
1007  steps_count_since_last_gc_ = 0;
1008  steps_took_since_last_gc_ = 0;
1009  bytes_rescanned_ = 0;
1010  marking_speed_ = kInitialMarkingSpeed;
1011  bytes_scanned_ = 0;
1012  write_barriers_invoked_since_last_step_ = 0;
1013 }
1014 
1015 
1016 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1017  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1018 }
1019 
1020 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
Address FromSpaceEnd()
Definition: spaces.h:2561
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
Object * KeyAt(int entry)
Definition: objects.h:3697
#define SLOW_ASSERT(condition)
Definition: checks.h:306
static Mode GetMode(Code *stub)
bool NextGCIsLikelyToBeFull()
Definition: heap.h:1691
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
Definition: isolate.h:867
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
bool InNewSpace(Object *object)
Definition: heap-inl.h:307
static const intptr_t kWriteBarriersInvokedThreshold
uint32_t NumberToUint32(Object *number)
Definition: v8conversions.h:61
static int SizeOf(Map *map, HeapObject *object)
Definition: objects.h:3107
Isolate * isolate()
Definition: heap-inl.h:624
bool EnsureSweepersProgressed(int step_size)
Definition: heap.h:1793
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:764
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
Address FromSpaceStart()
Definition: spaces.h:2560
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:227
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:43
static bool enabled()
Definition: serialize.h:485
INLINE(static void VisitPointer(Heap *heap, Object **p))
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:391
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6292
#define ASSERT(condition)
Definition: checks.h:329
static const intptr_t kMarkingSpeedAccelleration
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:557
void ClearFlag(int flag)
Definition: spaces.h:444
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static Context * cast(Object *context)
Definition: contexts.h:244
bool NonFailureIsHeapObject()
Definition: objects-inl.h:173
int SizeFromMap(Map *map)
Definition: objects-inl.h:3946
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
#define CHECK(condition)
Definition: checks.h:75
static const int kPageSize
Definition: spaces.h:814
static Code * cast(Object *obj)
static const intptr_t kMarkingSpeedAccellerationInterval
static void VisitWeakCollection(Map *map, HeapObject *object)
static const intptr_t kMaxMarkingSpeed
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1199
static void BeforeVisitingSharedFunctionInfo(HeapObject *object)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
static void VisitFixedArrayIncremental(Map *map, HeapObject *object)
StackGuard * stack_guard()
Definition: isolate.h:874
Object * ValueAt(int entry)
Definition: objects.h:3930
intptr_t MaxOldGenerationSize()
Definition: heap.h:599
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void MarkingComplete(CompletionAction action)
void SetFlag(int flag)
Definition: spaces.h:440
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2549
void Initialize(Address low, Address high)
Definition: mark-compact.h:174
PropertyCellSpace * property_cell_space()
Definition: heap.h:643
const int kPointerSize
Definition: globals.h:268
bool IsFlagSet(int flag)
Definition: spaces.h:456
void Start(CompactionFlag flag=ALLOW_COMPACTION)
bool Commit(void *address, size_t size, bool is_executable)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
const Register pc
OldSpace * old_pointer_space()
Definition: heap.h:638
static const int kPropertiesOffset
Definition: objects.h:2755
static double TimeCurrentMillis()
size_t size() const
Definition: spaces.h:595
HeapState gc_state()
Definition: heap.h:1508
OldSpace * code_space()
Definition: heap.h:640
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
LargeObjectSpace * lo_space()
Definition: heap.h:646
static const int kSize
Definition: objects.h:9807
CellSpace * cell_space()
Definition: heap.h:642
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void SetOldSpacePageFlags(MemoryChunk *chunk)
bool IsKey(Object *k)
Definition: objects.h:3701
Space * owner() const
Definition: spaces.h:332
void set_progress_bar(int progress_bar)
Definition: spaces.h:538
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:912
INLINE(static void MarkObject(Heap *heap, Object *obj))
void set_write_barrier_counter(int counter)
Definition: spaces.h:529
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Continue(InterruptFlag after_what)
Definition: execution.cc:561
IncrementalMarkingRootMarkingVisitor(IncrementalMarking *incremental_marking)
Object * native_contexts_list()
Definition: heap.h:1354
IncrementalMarking * incremental_marking()
Definition: heap.h:1781
int MaxSemiSpaceSize()
Definition: heap.h:596
void PrintPID(const char *format,...)
Definition: v8utils.cc:56
static void RecordWriteFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
intptr_t PromotedTotalSize()
Definition: heap.h:1578
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
MapSpace * map_space()
Definition: heap.h:641
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:6502
Object * get(int index)
Definition: objects-inl.h:2127
HeapObject * obj
static const intptr_t kAllocatedThreshold
INLINE(static void VisitPointersWithAnchor(Heap *heap, Object **anchor, Object **start, Object **end))
bool IsSweepingComplete()
Definition: heap.h:1785
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
T Min(T a, T b)
Definition: utils.h:234
void OldSpaceStep(intptr_t allocated)
void AddMarkingTime(double marking_time)
Definition: heap.h:1752
NewSpace * new_space()
Definition: heap.h:637
void NotifyIncompleteScanOfObject(int unscanned_bytes)
static const intptr_t kInitialMarkingSpeed
OldSpace * old_data_space()
Definition: heap.h:639
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1769
static void VisitNativeContextIncremental(Map *map, HeapObject *object)
AllocationSpace identity()
Definition: spaces.h:906
bool Uncommit(void *address, size_t size)
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
const int MB
Definition: globals.h:246