v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
incremental-marking.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "incremental-marking.h"
31 
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "objects-visiting.h"
35 #include "objects-visiting-inl.h"
36 #include "v8conversions.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 
43  : heap_(heap),
44  state_(STOPPED),
45  marking_deque_memory_(NULL),
46  marking_deque_memory_committed_(false),
47  steps_count_(0),
48  steps_took_(0),
49  longest_step_(0.0),
50  old_generation_space_available_at_start_of_incremental_(0),
51  old_generation_space_used_at_start_of_incremental_(0),
52  steps_count_since_last_gc_(0),
53  steps_took_since_last_gc_(0),
54  should_hurry_(false),
55  marking_speed_(0),
56  allocated_(0),
57  no_marking_scope_depth_(0) {
58 }
59 
60 
62  delete marking_deque_memory_;
63 }
64 
65 
67  Object** slot,
68  Object* value) {
69  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
70  MarkBit obj_bit = Marking::MarkBitFrom(obj);
71  if (Marking::IsBlack(obj_bit)) {
72  // Object is not going to be rescanned we need to record the slot.
73  heap_->mark_compact_collector()->RecordSlot(
74  HeapObject::RawField(obj, 0), slot, value);
75  }
76  }
77 }
78 
79 
81  Object* value,
82  Isolate* isolate) {
83  ASSERT(obj->IsHeapObject());
84  IncrementalMarking* marking = isolate->heap()->incremental_marking();
85  ASSERT(!marking->is_compacting_);
86 
88  int counter = chunk->write_barrier_counter();
90  marking->write_barriers_invoked_since_last_step_ +=
92  chunk->write_barrier_counter();
95  }
96 
97  marking->RecordWrite(obj, NULL, value);
98 }
99 
100 
102  Object** slot,
103  Isolate* isolate) {
104  ASSERT(obj->IsHeapObject());
105  IncrementalMarking* marking = isolate->heap()->incremental_marking();
106  ASSERT(marking->is_compacting_);
107 
109  int counter = chunk->write_barrier_counter();
110  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
111  marking->write_barriers_invoked_since_last_step_ +=
113  chunk->write_barrier_counter();
116  }
117 
118  marking->RecordWrite(obj, slot, *slot);
119 }
120 
121 
123  Address pc,
124  HeapObject* value) {
125  if (IsMarking()) {
126  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
127  RecordWriteIntoCode(host, &rinfo, value);
128  }
129 }
130 
131 
133  if (IsMarking()) {
134  Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
135  GcSafeFindCodeForInnerPointer(pc);
136  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
137  RecordWriteIntoCode(host, &rinfo, value);
138  }
139 }
140 
141 
143  Object** slot,
144  Code* value) {
145  if (BaseRecordWrite(host, slot, value)) {
146  ASSERT(slot != NULL);
147  heap_->mark_compact_collector()->
148  RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
149  }
150 }
151 
152 
154  RelocInfo* rinfo,
155  Object* value) {
156  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
157  if (Marking::IsWhite(value_bit)) {
158  MarkBit obj_bit = Marking::MarkBitFrom(obj);
159  if (Marking::IsBlack(obj_bit)) {
160  BlackToGreyAndUnshift(obj, obj_bit);
162  }
163  // Object is either grey or white. It will be scanned if survives.
164  return;
165  }
166 
167  if (is_compacting_) {
168  MarkBit obj_bit = Marking::MarkBitFrom(obj);
169  if (Marking::IsBlack(obj_bit)) {
170  // Object is not going to be rescanned. We need to record the slot.
171  heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
172  Code::cast(value));
173  }
174  }
175 }
176 
177 
179  : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
180  public:
181  static void Initialize() {
183 
184  table_.Register(kVisitJSRegExp, &VisitJSRegExp);
185  }
186 
187  static void VisitJSWeakMap(Map* map, HeapObject* object) {
188  Heap* heap = map->GetHeap();
189  VisitPointers(heap,
192  }
193 
195 
196  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
197  Object* obj = *p;
198  if (obj->NonFailureIsHeapObject()) {
199  heap->mark_compact_collector()->RecordSlot(p, p, obj);
200  MarkObject(heap, obj);
201  }
202  }
203 
204  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
205  for (Object** p = start; p < end; p++) {
206  Object* obj = *p;
207  if (obj->NonFailureIsHeapObject()) {
208  heap->mark_compact_collector()->RecordSlot(start, p, obj);
209  MarkObject(heap, obj);
210  }
211  }
212  }
213 
214  // Marks the object grey and pushes it on the marking stack.
215  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
216  HeapObject* heap_object = HeapObject::cast(obj);
217  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
218  if (mark_bit.data_only()) {
219  if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
221  heap_object->Size());
222  }
223  } else if (Marking::IsWhite(mark_bit)) {
224  heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
225  }
226  }
227 
228  // Marks the object black without pushing it on the marking stack.
229  // Returns true if object needed marking and false otherwise.
230  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
231  HeapObject* heap_object = HeapObject::cast(obj);
232  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
233  if (Marking::IsWhite(mark_bit)) {
234  mark_bit.Set();
236  heap_object->Size());
237  return true;
238  }
239  return false;
240  }
241 };
242 
243 
244 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
245  public:
247  IncrementalMarking* incremental_marking)
248  : heap_(heap),
249  incremental_marking_(incremental_marking) {
250  }
251 
252  void VisitPointer(Object** p) {
253  MarkObjectByPointer(p);
254  }
255 
256  void VisitPointers(Object** start, Object** end) {
257  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
258  }
259 
260  private:
261  void MarkObjectByPointer(Object** p) {
262  Object* obj = *p;
263  if (!obj->IsHeapObject()) return;
264 
265  HeapObject* heap_object = HeapObject::cast(obj);
266  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
267  if (mark_bit.data_only()) {
268  if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
270  heap_object->Size());
271  }
272  } else {
273  if (Marking::IsWhite(mark_bit)) {
274  incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
275  }
276  }
277  }
278 
279  Heap* heap_;
280  IncrementalMarking* incremental_marking_;
281 };
282 
283 
286 }
287 
288 
290  bool is_marking,
291  bool is_compacting) {
292  if (is_marking) {
295 
296  // It's difficult to filter out slots recorded for large objects.
297  if (chunk->owner()->identity() == LO_SPACE &&
298  chunk->size() > static_cast<size_t>(Page::kPageSize) &&
299  is_compacting) {
301  }
302  } else if (chunk->owner()->identity() == CELL_SPACE ||
303  chunk->scan_on_scavenge()) {
306  } else {
309  }
310 }
311 
312 
313 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
314  bool is_marking) {
316  if (is_marking) {
318  } else {
320  }
321  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
322 }
323 
324 
325 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
326  PagedSpace* space) {
327  PageIterator it(space);
328  while (it.has_next()) {
329  Page* p = it.next();
330  SetOldSpacePageFlags(p, false, false);
331  }
332 }
333 
334 
335 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
336  NewSpace* space) {
337  NewSpacePageIterator it(space);
338  while (it.has_next()) {
339  NewSpacePage* p = it.next();
340  SetNewSpacePageFlags(p, false);
341  }
342 }
343 
344 
345 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
346  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
347  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
348  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
349  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
350  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
351  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
352 
353  LargePage* lop = heap_->lo_space()->first_page();
354  while (lop->is_valid()) {
355  SetOldSpacePageFlags(lop, false, false);
356  lop = lop->next_page();
357  }
358 }
359 
360 
361 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
362  PageIterator it(space);
363  while (it.has_next()) {
364  Page* p = it.next();
365  SetOldSpacePageFlags(p, true, is_compacting_);
366  }
367 }
368 
369 
370 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
371  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
372  while (it.has_next()) {
373  NewSpacePage* p = it.next();
374  SetNewSpacePageFlags(p, true);
375  }
376 }
377 
378 
379 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
380  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
381  ActivateIncrementalWriteBarrier(heap_->old_data_space());
382  ActivateIncrementalWriteBarrier(heap_->cell_space());
383  ActivateIncrementalWriteBarrier(heap_->map_space());
384  ActivateIncrementalWriteBarrier(heap_->code_space());
385  ActivateIncrementalWriteBarrier(heap_->new_space());
386 
387  LargePage* lop = heap_->lo_space()->first_page();
388  while (lop->is_valid()) {
389  SetOldSpacePageFlags(lop, true, is_compacting_);
390  lop = lop->next_page();
391  }
392 }
393 
394 
396 #ifndef DEBUG
397  static const intptr_t kActivationThreshold = 8 * MB;
398 #else
399  // TODO(gc) consider setting this to some low level so that some
400  // debug tests run with incremental marking and some without.
401  static const intptr_t kActivationThreshold = 0;
402 #endif
403 
404  return !FLAG_expose_gc &&
405  FLAG_incremental_marking &&
406  !Serializer::enabled() &&
407  heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
408 }
409 
410 
414 
415  if (!IsMarking()) {
416  // Initially stub is generated in STORE_BUFFER_ONLY mode thus
417  // we don't need to do anything if incremental marking is
418  // not active.
419  } else if (IsCompacting()) {
421  } else {
423  }
424 }
425 
426 
427 static void PatchIncrementalMarkingRecordWriteStubs(
428  Heap* heap, RecordWriteStub::Mode mode) {
429  UnseededNumberDictionary* stubs = heap->code_stubs();
430 
431  int capacity = stubs->Capacity();
432  for (int i = 0; i < capacity; i++) {
433  Object* k = stubs->KeyAt(i);
434  if (stubs->IsKey(k)) {
435  uint32_t key = NumberToUint32(k);
436 
437  if (CodeStub::MajorKeyFromKey(key) ==
438  CodeStub::RecordWrite) {
439  Object* e = stubs->ValueAt(i);
440  if (e->IsCode()) {
442  }
443  }
444  }
445  }
446 }
447 
448 
449 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
450  if (marking_deque_memory_ == NULL) {
451  marking_deque_memory_ = new VirtualMemory(4 * MB);
452  }
453  if (!marking_deque_memory_committed_) {
454  bool success = marking_deque_memory_->Commit(
455  reinterpret_cast<Address>(marking_deque_memory_->address()),
456  marking_deque_memory_->size(),
457  false); // Not executable.
458  CHECK(success);
459  marking_deque_memory_committed_ = true;
460  }
461 }
462 
464  if (state_ == STOPPED && marking_deque_memory_committed_) {
465  bool success = marking_deque_memory_->Uncommit(
466  reinterpret_cast<Address>(marking_deque_memory_->address()),
467  marking_deque_memory_->size());
468  CHECK(success);
469  marking_deque_memory_committed_ = false;
470  }
471 }
472 
473 
475  if (FLAG_trace_incremental_marking) {
476  PrintF("[IncrementalMarking] Start\n");
477  }
478  ASSERT(FLAG_incremental_marking);
479  ASSERT(state_ == STOPPED);
480 
481  ResetStepCounters();
482 
483  if (heap_->old_pointer_space()->IsSweepingComplete() &&
484  heap_->old_data_space()->IsSweepingComplete()) {
485  StartMarking(ALLOW_COMPACTION);
486  } else {
487  if (FLAG_trace_incremental_marking) {
488  PrintF("[IncrementalMarking] Start sweeping.\n");
489  }
490  state_ = SWEEPING;
491  }
492 
494 }
495 
496 
497 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
498  if (obj->IsHeapObject()) {
499  HeapObject* heap_obj = HeapObject::cast(obj);
501  if (Marking::IsBlack(mark_bit)) {
503  -heap_obj->Size());
504  }
505  Marking::AnyToGrey(mark_bit);
506  }
507 }
508 
509 
510 void IncrementalMarking::StartMarking(CompactionFlag flag) {
511  if (FLAG_trace_incremental_marking) {
512  PrintF("[IncrementalMarking] Start marking\n");
513  }
514 
515  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
518 
519  state_ = MARKING;
520 
521  RecordWriteStub::Mode mode = is_compacting_ ?
523 
524  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
525 
526  EnsureMarkingDequeIsCommitted();
527 
528  // Initialize marking stack.
529  Address addr = static_cast<Address>(marking_deque_memory_->address());
530  size_t size = marking_deque_memory_->size();
531  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
532  marking_deque_.Initialize(addr, addr + size);
533 
534  ActivateIncrementalWriteBarrier();
535 
536  // Marking bits are cleared by the sweeper.
537 #ifdef VERIFY_HEAP
538  if (FLAG_verify_heap) {
539  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
540  }
541 #endif
542 
545 
546  if (FLAG_cleanup_code_caches_at_gc) {
547  // We will mark cache black with a separate pass
548  // when we finish marking.
549  MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
550  }
551 
552  // Mark strong roots grey.
553  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
554  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
555 
556  // Ready to start incremental marking.
557  if (FLAG_trace_incremental_marking) {
558  PrintF("[IncrementalMarking] Running\n");
559  }
560 }
561 
562 
564  if (!IsMarking()) return;
565  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
566  heap_->new_space()->FromSpaceEnd());
567  while (it.has_next()) {
568  Bitmap::Clear(it.next());
569  }
570 }
571 
572 
574  if (!IsMarking()) return;
575 
576  int current = marking_deque_.bottom();
577  int mask = marking_deque_.mask();
578  int limit = marking_deque_.top();
579  HeapObject** array = marking_deque_.array();
580  int new_top = current;
581 
582  Map* filler_map = heap_->one_pointer_filler_map();
583 
584  while (current != limit) {
585  HeapObject* obj = array[current];
586  ASSERT(obj->IsHeapObject());
587  current = ((current + 1) & mask);
588  if (heap_->InNewSpace(obj)) {
589  MapWord map_word = obj->map_word();
590  if (map_word.IsForwardingAddress()) {
591  HeapObject* dest = map_word.ToForwardingAddress();
592  array[new_top] = dest;
593  new_top = ((new_top + 1) & mask);
594  ASSERT(new_top != marking_deque_.bottom());
595 #ifdef DEBUG
596  MarkBit mark_bit = Marking::MarkBitFrom(obj);
597  ASSERT(Marking::IsGrey(mark_bit) ||
598  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
599 #endif
600  }
601  } else if (obj->map() != filler_map) {
602  // Skip one word filler objects that appear on the
603  // stack when we perform in place array shift.
604  array[new_top] = obj;
605  new_top = ((new_top + 1) & mask);
606  ASSERT(new_top != marking_deque_.bottom());
607 #ifdef DEBUG
608  MarkBit mark_bit = Marking::MarkBitFrom(obj);
609  ASSERT(Marking::IsGrey(mark_bit) ||
610  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
611 #endif
612  }
613  }
614  marking_deque_.set_top(new_top);
615 
616  steps_took_since_last_gc_ = 0;
617  steps_count_since_last_gc_ = 0;
618  longest_step_ = 0.0;
619 }
620 
621 
623  if (state() == MARKING) {
624  double start = 0.0;
625  if (FLAG_trace_incremental_marking) {
626  PrintF("[IncrementalMarking] Hurry\n");
627  start = OS::TimeCurrentMillis();
628  }
629  // TODO(gc) hurry can mark objects it encounters black as mutator
630  // was stopped.
631  Map* filler_map = heap_->one_pointer_filler_map();
632  Map* native_context_map = heap_->native_context_map();
633  while (!marking_deque_.IsEmpty()) {
634  HeapObject* obj = marking_deque_.Pop();
635 
636  // Explicitly skip one word fillers. Incremental markbit patterns are
637  // correct only for objects that occupy at least two words.
638  Map* map = obj->map();
639  if (map == filler_map) {
640  continue;
641  } else if (map == native_context_map) {
642  // Native contexts have weak fields.
644  } else {
645  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
646  if (Marking::IsWhite(map_mark_bit)) {
647  WhiteToGreyAndPush(map, map_mark_bit);
648  }
650  }
651 
652  MarkBit mark_bit = Marking::MarkBitFrom(obj);
653  ASSERT(!Marking::IsBlack(mark_bit));
654  Marking::MarkBlack(mark_bit);
656  }
657  state_ = COMPLETE;
658  if (FLAG_trace_incremental_marking) {
659  double end = OS::TimeCurrentMillis();
660  PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
661  static_cast<int>(end - start));
662  }
663  }
664 
665  if (FLAG_cleanup_code_caches_at_gc) {
666  PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
670  }
671 
672  Object* context = heap_->native_contexts_list();
673  while (!context->IsUndefined()) {
674  // GC can happen when the context is not fully initialized,
675  // so the cache can be undefined.
676  HeapObject* cache = HeapObject::cast(
678  if (!cache->IsUndefined()) {
679  MarkBit mark_bit = Marking::MarkBitFrom(cache);
680  if (Marking::IsGrey(mark_bit)) {
681  Marking::GreyToBlack(mark_bit);
683  }
684  }
685  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
686  }
687 }
688 
689 
691  if (IsStopped()) return;
692  if (FLAG_trace_incremental_marking) {
693  PrintF("[IncrementalMarking] Aborting.\n");
694  }
697  ResetStepCounters();
698  if (IsMarking()) {
699  PatchIncrementalMarkingRecordWriteStubs(heap_,
701  DeactivateIncrementalWriteBarrier();
702 
703  if (is_compacting_) {
704  LargeObjectIterator it(heap_->lo_space());
705  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
706  Page* p = Page::FromAddress(obj->address());
709  }
710  }
711  }
712  }
713  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
714  state_ = STOPPED;
715  is_compacting_ = false;
716 }
717 
718 
720  Hurry();
721  state_ = STOPPED;
722  is_compacting_ = false;
725  ResetStepCounters();
726  PatchIncrementalMarkingRecordWriteStubs(heap_,
728  DeactivateIncrementalWriteBarrier();
729  ASSERT(marking_deque_.IsEmpty());
730  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
731 }
732 
733 
735  state_ = COMPLETE;
736  // We will set the stack guard to request a GC now. This will mean the rest
737  // of the GC gets performed as soon as possible (we can't do a GC here in a
738  // record-write context). If a few things get allocated between now and then
739  // that shouldn't make us do a scavenge and keep being incremental, so we set
740  // the should-hurry flag to indicate that there can't be much work left to do.
741  set_should_hurry(true);
742  if (FLAG_trace_incremental_marking) {
743  PrintF("[IncrementalMarking] Complete (normal).\n");
744  }
745  if (action == GC_VIA_STACK_GUARD) {
746  heap_->isolate()->stack_guard()->RequestGC();
747  }
748 }
749 
750 
751 void IncrementalMarking::Step(intptr_t allocated_bytes,
752  CompletionAction action) {
753  if (heap_->gc_state() != Heap::NOT_IN_GC ||
754  !FLAG_incremental_marking ||
755  !FLAG_incremental_marking_steps ||
756  (state_ != SWEEPING && state_ != MARKING)) {
757  return;
758  }
759 
760  allocated_ += allocated_bytes;
761 
762  if (allocated_ < kAllocatedThreshold &&
763  write_barriers_invoked_since_last_step_ <
765  return;
766  }
767 
768  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
769 
770  // The marking speed is driven either by the allocation rate or by the rate
771  // at which we are having to check the color of objects in the write barrier.
772  // It is possible for a tight non-allocating loop to run a lot of write
773  // barriers before we get here and check them (marking can only take place on
774  // allocation), so to reduce the lumpiness we don't use the write barriers
775  // invoked since last step directly to determine the amount of work to do.
776  intptr_t bytes_to_process =
777  marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
778  allocated_ = 0;
779  write_barriers_invoked_since_last_step_ = 0;
780 
781  bytes_scanned_ += bytes_to_process;
782 
783  double start = 0;
784 
785  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
786  start = OS::TimeCurrentMillis();
787  }
788 
789  if (state_ == SWEEPING) {
790  if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
791  bytes_scanned_ = 0;
792  StartMarking(PREVENT_COMPACTION);
793  }
794  } else if (state_ == MARKING) {
795  Map* filler_map = heap_->one_pointer_filler_map();
796  Map* native_context_map = heap_->native_context_map();
797  while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
798  HeapObject* obj = marking_deque_.Pop();
799 
800  // Explicitly skip one word fillers. Incremental markbit patterns are
801  // correct only for objects that occupy at least two words.
802  Map* map = obj->map();
803  if (map == filler_map) continue;
804 
805  int size = obj->SizeFromMap(map);
806  bytes_to_process -= size;
807  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
808  if (Marking::IsWhite(map_mark_bit)) {
809  WhiteToGreyAndPush(map, map_mark_bit);
810  }
811 
812  // TODO(gc) switch to static visitor instead of normal visitor.
813  if (map == native_context_map) {
814  // Native contexts have weak fields.
815  Context* ctx = Context::cast(obj);
816 
817  // We will mark cache black with a separate pass
818  // when we finish marking.
819  MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
820 
822  } else {
824  }
825 
826  MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
827  SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
828  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
829  Marking::MarkBlack(obj_mark_bit);
831  }
832  if (marking_deque_.IsEmpty()) MarkingComplete(action);
833  }
834 
835  steps_count_++;
836  steps_count_since_last_gc_++;
837 
838  bool speed_up = false;
839 
840  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
841  if (FLAG_trace_gc) {
842  PrintPID("Speed up marking after %d steps\n",
843  static_cast<int>(kMarkingSpeedAccellerationInterval));
844  }
845  speed_up = true;
846  }
847 
848  bool space_left_is_very_small =
849  (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
850 
851  bool only_1_nth_of_space_that_was_available_still_left =
852  (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
853  old_generation_space_available_at_start_of_incremental_);
854 
855  if (space_left_is_very_small ||
856  only_1_nth_of_space_that_was_available_still_left) {
857  if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
858  speed_up = true;
859  }
860 
861  bool size_of_old_space_multiplied_by_n_during_marking =
862  (heap_->PromotedTotalSize() >
863  (marking_speed_ + 1) *
864  old_generation_space_used_at_start_of_incremental_);
865  if (size_of_old_space_multiplied_by_n_during_marking) {
866  speed_up = true;
867  if (FLAG_trace_gc) {
868  PrintPID("Speed up marking because of heap size increase\n");
869  }
870  }
871 
872  int64_t promoted_during_marking = heap_->PromotedTotalSize()
873  - old_generation_space_used_at_start_of_incremental_;
874  intptr_t delay = marking_speed_ * MB;
875  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
876 
877  // We try to scan at at least twice the speed that we are allocating.
878  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
879  if (FLAG_trace_gc) {
880  PrintPID("Speed up marking because marker was not keeping up\n");
881  }
882  speed_up = true;
883  }
884 
885  if (speed_up) {
886  if (state_ != MARKING) {
887  if (FLAG_trace_gc) {
888  PrintPID("Postponing speeding up marking until marking starts\n");
889  }
890  } else {
891  marking_speed_ += kMarkingSpeedAccellerationInterval;
892  marking_speed_ = static_cast<int>(
894  static_cast<intptr_t>(marking_speed_ * 1.3)));
895  if (FLAG_trace_gc) {
896  PrintPID("Marking speed increased to %d\n", marking_speed_);
897  }
898  }
899  }
900 
901  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
902  double end = OS::TimeCurrentMillis();
903  double delta = (end - start);
904  longest_step_ = Max(longest_step_, delta);
905  steps_took_ += delta;
906  steps_took_since_last_gc_ += delta;
907  }
908 }
909 
910 
911 void IncrementalMarking::ResetStepCounters() {
912  steps_count_ = 0;
913  steps_took_ = 0;
914  longest_step_ = 0.0;
915  old_generation_space_available_at_start_of_incremental_ =
916  SpaceLeftInOldSpace();
917  old_generation_space_used_at_start_of_incremental_ =
918  heap_->PromotedTotalSize();
919  steps_count_since_last_gc_ = 0;
920  steps_took_since_last_gc_ = 0;
921  bytes_rescanned_ = 0;
922  marking_speed_ = kInitialMarkingSpeed;
923  bytes_scanned_ = 0;
924  write_barriers_invoked_since_last_step_ = 0;
925 }
926 
927 
928 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
929  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
930 }
931 
932 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
Address FromSpaceEnd()
Definition: spaces.h:2234
Object * KeyAt(int entry)
Definition: objects.h:2841
#define SLOW_ASSERT(condition)
Definition: checks.h:276
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
Definition: isolate.h:827
void PrintF(const char *format,...)
Definition: v8utils.cc:40
bool InNewSpace(Object *object)
Definition: heap-inl.h:288
static const intptr_t kWriteBarriersInvokedThreshold
uint32_t NumberToUint32(Object *number)
Definition: v8conversions.h:43
Isolate * isolate()
Definition: heap-inl.h:503
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:657
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
Address FromSpaceStart()
Definition: spaces.h:2233
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:222
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
static void RecordWriteForEvacuationFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static bool enabled()
Definition: serialize.h:481
static void VisitJSWeakMap(Map *map, HeapObject *object)
INLINE(static void VisitPointer(Heap *heap, Object **p))
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:378
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5758
#define ASSERT(condition)
Definition: checks.h:270
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
void ClearFlag(int flag)
Definition: spaces.h:425
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static void AnyToGrey(MarkBit markbit)
Definition: mark-compact.h:108
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static Context * cast(Object *context)
Definition: contexts.h:212
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
bool NonFailureIsHeapObject()
Definition: objects-inl.h:166
int SizeFromMap(Map *map)
Definition: objects-inl.h:2954
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
static void RecordWriteFromCode(HeapObject *obj, Object *value, Isolate *isolate)
#define CHECK(condition)
Definition: checks.h:56
static const int kPageSize
Definition: spaces.h:711
static Code * cast(Object *obj)
static const intptr_t kMarkingSpeedAccellerationInterval
static const intptr_t kMaxMarkingSpeed
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:971
static void BeforeVisitingSharedFunctionInfo(HeapObject *object)
static MarkBit MarkBitFrom(Address addr)
StackGuard * stack_guard()
Definition: isolate.h:834
Object * ValueAt(int entry)
Definition: objects.h:3067
intptr_t MaxOldGenerationSize()
Definition: heap.h:477
void MarkingComplete(CompletionAction action)
void SetFlag(int flag)
Definition: spaces.h:421
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2216
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
const int kPointerSize
Definition: globals.h:220
bool IsFlagSet(int flag)
Definition: spaces.h:437
bool Commit(void *address, size_t size, bool is_executable)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
const Register pc
OldSpace * old_pointer_space()
Definition: heap.h:506
static const int kPropertiesOffset
Definition: objects.h:2171
static double TimeCurrentMillis()
size_t size() const
Definition: spaces.h:519
HeapState gc_state()
Definition: heap.h:1326
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
OldSpace * code_space()
Definition: heap.h:508
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
LargeObjectSpace * lo_space()
Definition: heap.h:511
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
CellSpace * cell_space()
Definition: heap.h:510
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void SetOldSpacePageFlags(MemoryChunk *chunk)
bool IsKey(Object *k)
Definition: objects.h:2845
Space * owner() const
Definition: spaces.h:320
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:874
INLINE(static void MarkObject(Heap *heap, Object *obj))
void set_write_barrier_counter(int counter)
Definition: spaces.h:479
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Continue(InterruptFlag after_what)
Definition: execution.cc:513
Object * native_contexts_list()
Definition: heap.h:1189
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
int MaxSemiSpaceSize()
Definition: heap.h:474
void PrintPID(const char *format,...)
Definition: v8utils.cc:56
intptr_t PromotedTotalSize()
Definition: heap.h:1380
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
IncrementalMarkingRootMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
MapSpace * map_space()
Definition: heap.h:509
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:5947
bool MarkBlackOrKeepGrey(MarkBit mark_bit)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
Object * get(int index)
Definition: objects-inl.h:1737
bool AdvanceSweepers(int step_size)
Definition: heap.h:1562
static const int kSize
Definition: objects.h:8238
static const intptr_t kAllocatedThreshold
static void VisitNativeContext(Map *map, HeapObject *object)
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
T Min(T a, T b)
Definition: utils.h:229
NewSpace * new_space()
Definition: heap.h:505
static const intptr_t kInitialMarkingSpeed
OldSpace * old_data_space()
Definition: heap.h:507
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1541
AllocationSpace identity()
Definition: spaces.h:788
bool Uncommit(void *address, size_t size)
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923
const int MB
Definition: globals.h:208