v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
incremental-marking.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "incremental-marking.h"
31 
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "v8conversions.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
41  : heap_(heap),
42  state_(STOPPED),
43  marking_deque_memory_(NULL),
44  marking_deque_memory_committed_(false),
45  marker_(this, heap->mark_compact_collector()),
46  steps_count_(0),
47  steps_took_(0),
48  longest_step_(0.0),
49  old_generation_space_available_at_start_of_incremental_(0),
50  old_generation_space_used_at_start_of_incremental_(0),
51  steps_count_since_last_gc_(0),
52  steps_took_since_last_gc_(0),
53  should_hurry_(false),
54  allocation_marking_factor_(0),
55  allocated_(0),
56  no_marking_scope_depth_(0) {
57 }
58 
59 
61  delete marking_deque_memory_;
62 }
63 
64 
66  Object** slot,
67  Object* value) {
68  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
69  MarkBit obj_bit = Marking::MarkBitFrom(obj);
70  if (Marking::IsBlack(obj_bit)) {
71  // Object is not going to be rescanned we need to record the slot.
72  heap_->mark_compact_collector()->RecordSlot(
73  HeapObject::RawField(obj, 0), slot, value);
74  }
75  }
76 }
77 
78 
80  Object* value,
81  Isolate* isolate) {
82  ASSERT(obj->IsHeapObject());
83 
84  // Fast cases should already be covered by RecordWriteStub.
85  ASSERT(value->IsHeapObject());
86  ASSERT(!value->IsHeapNumber());
87  ASSERT(!value->IsString() ||
88  value->IsConsString() ||
89  value->IsSlicedString());
91 
92  IncrementalMarking* marking = isolate->heap()->incremental_marking();
93  ASSERT(!marking->is_compacting_);
94  marking->RecordWrite(obj, NULL, value);
95 }
96 
97 
99  Object** slot,
100  Isolate* isolate) {
101  IncrementalMarking* marking = isolate->heap()->incremental_marking();
102  ASSERT(marking->is_compacting_);
103  marking->RecordWrite(obj, slot, *slot);
104 }
105 
106 
108  Address pc,
109  HeapObject* value) {
110  if (IsMarking()) {
111  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
112  RecordWriteIntoCode(host, &rinfo, value);
113  }
114 }
115 
116 
118  if (IsMarking()) {
119  Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
120  GcSafeFindCodeForInnerPointer(pc);
121  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
122  RecordWriteIntoCode(host, &rinfo, value);
123  }
124 }
125 
126 
128  Object** slot,
129  Code* value) {
130  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
131  ASSERT(slot != NULL);
132  heap_->mark_compact_collector()->
133  RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
134  }
135 }
136 
137 
139  RelocInfo* rinfo,
140  Object* value) {
141  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
142  if (Marking::IsWhite(value_bit)) {
143  MarkBit obj_bit = Marking::MarkBitFrom(obj);
144  if (Marking::IsBlack(obj_bit)) {
145  BlackToGreyAndUnshift(obj, obj_bit);
147  }
148  // Object is either grey or white. It will be scanned if survives.
149  return;
150  }
151 
152  if (is_compacting_) {
153  MarkBit obj_bit = Marking::MarkBitFrom(obj);
154  if (Marking::IsBlack(obj_bit)) {
155  // Object is not going to be rescanned. We need to record the slot.
156  heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
157  Code::cast(value));
158  }
159  }
160 }
161 
162 
163 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
164  public:
166  IncrementalMarking* incremental_marking)
167  : heap_(heap),
168  incremental_marking_(incremental_marking) {
169  }
170 
171  void VisitEmbeddedPointer(RelocInfo* rinfo) {
172  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
173  Object* target = rinfo->target_object();
174  if (target->NonFailureIsHeapObject()) {
175  heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
176  MarkObject(target);
177  }
178  }
179 
180  void VisitCodeTarget(RelocInfo* rinfo) {
181  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
182  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
183  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
184  && (target->ic_age() != heap_->global_ic_age())) {
185  IC::Clear(rinfo->pc());
186  target = Code::GetCodeFromTargetAddress(rinfo->target_address());
187  }
188  heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
189  MarkObject(target);
190  }
191 
192  void VisitDebugTarget(RelocInfo* rinfo) {
193  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
194  rinfo->IsPatchedReturnSequence()) ||
195  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
196  rinfo->IsPatchedDebugBreakSlotSequence()));
197  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
198  heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
199  MarkObject(target);
200  }
201 
202  void VisitCodeEntry(Address entry_address) {
203  Object* target = Code::GetObjectFromEntryAddress(entry_address);
204  heap_->mark_compact_collector()->
205  RecordCodeEntrySlot(entry_address, Code::cast(target));
206  MarkObject(target);
207  }
208 
210  if (shared->ic_age() != heap_->global_ic_age()) {
211  shared->ResetForNewContext(heap_->global_ic_age());
212  }
213  }
214 
215  void VisitPointer(Object** p) {
216  Object* obj = *p;
217  if (obj->NonFailureIsHeapObject()) {
218  heap_->mark_compact_collector()->RecordSlot(p, p, obj);
219  MarkObject(obj);
220  }
221  }
222 
223  void VisitPointers(Object** start, Object** end) {
224  for (Object** p = start; p < end; p++) {
225  Object* obj = *p;
226  if (obj->NonFailureIsHeapObject()) {
227  heap_->mark_compact_collector()->RecordSlot(start, p, obj);
228  MarkObject(obj);
229  }
230  }
231  }
232 
233  private:
234  // Mark object pointed to by p.
235  INLINE(void MarkObject(Object* obj)) {
236  HeapObject* heap_object = HeapObject::cast(obj);
237  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
238  if (mark_bit.data_only()) {
239  if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
241  heap_object->Size());
242  }
243  } else if (Marking::IsWhite(mark_bit)) {
244  incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
245  }
246  }
247 
248  Heap* heap_;
249  IncrementalMarking* incremental_marking_;
250 };
251 
252 
253 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
254  public:
256  IncrementalMarking* incremental_marking)
257  : heap_(heap),
258  incremental_marking_(incremental_marking) {
259  }
260 
261  void VisitPointer(Object** p) {
262  MarkObjectByPointer(p);
263  }
264 
265  void VisitPointers(Object** start, Object** end) {
266  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
267  }
268 
269  private:
270  void MarkObjectByPointer(Object** p) {
271  Object* obj = *p;
272  if (!obj->IsHeapObject()) return;
273 
274  HeapObject* heap_object = HeapObject::cast(obj);
275  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
276  if (mark_bit.data_only()) {
277  if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
279  heap_object->Size());
280  }
281  } else {
282  if (Marking::IsWhite(mark_bit)) {
283  incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
284  }
285  }
286  }
287 
288  Heap* heap_;
289  IncrementalMarking* incremental_marking_;
290 };
291 
292 
293 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
294  bool is_marking,
295  bool is_compacting) {
296  if (is_marking) {
299 
300  // It's difficult to filter out slots recorded for large objects.
301  if (chunk->owner()->identity() == LO_SPACE &&
302  chunk->size() > static_cast<size_t>(Page::kPageSize) &&
303  is_compacting) {
304  chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
305  }
306  } else if (chunk->owner()->identity() == CELL_SPACE ||
307  chunk->scan_on_scavenge()) {
310  } else {
313  }
314 }
315 
316 
317 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
318  bool is_marking) {
320  if (is_marking) {
322  } else {
324  }
325  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
326 }
327 
328 
329 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
330  PagedSpace* space) {
331  PageIterator it(space);
332  while (it.has_next()) {
333  Page* p = it.next();
334  SetOldSpacePageFlags(p, false, false);
335  }
336 }
337 
338 
339 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
340  NewSpace* space) {
341  NewSpacePageIterator it(space);
342  while (it.has_next()) {
343  NewSpacePage* p = it.next();
344  SetNewSpacePageFlags(p, false);
345  }
346 }
347 
348 
349 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
350  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
351  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
352  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
353  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
354  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
355  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
356 
357  LargePage* lop = heap_->lo_space()->first_page();
358  while (lop->is_valid()) {
359  SetOldSpacePageFlags(lop, false, false);
360  lop = lop->next_page();
361  }
362 }
363 
364 
365 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
366  PageIterator it(space);
367  while (it.has_next()) {
368  Page* p = it.next();
369  SetOldSpacePageFlags(p, true, is_compacting_);
370  }
371 }
372 
373 
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
375  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
376  while (it.has_next()) {
377  NewSpacePage* p = it.next();
378  SetNewSpacePageFlags(p, true);
379  }
380 }
381 
382 
383 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
384  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
385  ActivateIncrementalWriteBarrier(heap_->old_data_space());
386  ActivateIncrementalWriteBarrier(heap_->cell_space());
387  ActivateIncrementalWriteBarrier(heap_->map_space());
388  ActivateIncrementalWriteBarrier(heap_->code_space());
389  ActivateIncrementalWriteBarrier(heap_->new_space());
390 
391  LargePage* lop = heap_->lo_space()->first_page();
392  while (lop->is_valid()) {
393  SetOldSpacePageFlags(lop, true, is_compacting_);
394  lop = lop->next_page();
395  }
396 }
397 
398 
400 #ifndef DEBUG
401  static const intptr_t kActivationThreshold = 8 * MB;
402 #else
403  // TODO(gc) consider setting this to some low level so that some
404  // debug tests run with incremental marking and some without.
405  static const intptr_t kActivationThreshold = 0;
406 #endif
407 
408  return !FLAG_expose_gc &&
409  FLAG_incremental_marking &&
410  !Serializer::enabled() &&
411  heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
412 }
413 
414 
418 
419  if (!IsMarking()) {
420  // Initially stub is generated in STORE_BUFFER_ONLY mode thus
421  // we don't need to do anything if incremental marking is
422  // not active.
423  } else if (IsCompacting()) {
425  } else {
427  }
428 }
429 
430 
431 static void PatchIncrementalMarkingRecordWriteStubs(
432  Heap* heap, RecordWriteStub::Mode mode) {
433  UnseededNumberDictionary* stubs = heap->code_stubs();
434 
435  int capacity = stubs->Capacity();
436  for (int i = 0; i < capacity; i++) {
437  Object* k = stubs->KeyAt(i);
438  if (stubs->IsKey(k)) {
439  uint32_t key = NumberToUint32(k);
440 
441  if (CodeStub::MajorKeyFromKey(key) ==
442  CodeStub::RecordWrite) {
443  Object* e = stubs->ValueAt(i);
444  if (e->IsCode()) {
446  }
447  }
448  }
449  }
450 }
451 
452 
453 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
454  if (marking_deque_memory_ == NULL) {
455  marking_deque_memory_ = new VirtualMemory(4 * MB);
456  }
457  if (!marking_deque_memory_committed_) {
458  bool success = marking_deque_memory_->Commit(
459  reinterpret_cast<Address>(marking_deque_memory_->address()),
460  marking_deque_memory_->size(),
461  false); // Not executable.
462  CHECK(success);
463  marking_deque_memory_committed_ = true;
464  }
465 }
466 
468  if (state_ == STOPPED && marking_deque_memory_committed_) {
469  bool success = marking_deque_memory_->Uncommit(
470  reinterpret_cast<Address>(marking_deque_memory_->address()),
471  marking_deque_memory_->size());
472  CHECK(success);
473  marking_deque_memory_committed_ = false;
474  }
475 }
476 
477 
479  if (FLAG_trace_incremental_marking) {
480  PrintF("[IncrementalMarking] Start\n");
481  }
482  ASSERT(FLAG_incremental_marking);
483  ASSERT(state_ == STOPPED);
484 
485  ResetStepCounters();
486 
487  if (heap_->old_pointer_space()->IsSweepingComplete() &&
488  heap_->old_data_space()->IsSweepingComplete()) {
489  StartMarking(ALLOW_COMPACTION);
490  } else {
491  if (FLAG_trace_incremental_marking) {
492  PrintF("[IncrementalMarking] Start sweeping.\n");
493  }
494  state_ = SWEEPING;
495  }
496 
498 }
499 
500 
501 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
502  if (obj->IsHeapObject()) {
503  HeapObject* heap_obj = HeapObject::cast(obj);
505  if (Marking::IsBlack(mark_bit)) {
507  -heap_obj->Size());
508  }
509  Marking::AnyToGrey(mark_bit);
510  }
511 }
512 
513 
514 void IncrementalMarking::StartMarking(CompactionFlag flag) {
515  if (FLAG_trace_incremental_marking) {
516  PrintF("[IncrementalMarking] Start marking\n");
517  }
518 
519  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
522 
523  state_ = MARKING;
524 
525  RecordWriteStub::Mode mode = is_compacting_ ?
527 
528  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
529 
530  EnsureMarkingDequeIsCommitted();
531 
532  // Initialize marking stack.
533  Address addr = static_cast<Address>(marking_deque_memory_->address());
534  size_t size = marking_deque_memory_->size();
535  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
536  marking_deque_.Initialize(addr, addr + size);
537 
538  ActivateIncrementalWriteBarrier();
539 
540 #ifdef DEBUG
541  // Marking bits are cleared by the sweeper.
542  if (FLAG_verify_heap) {
543  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
544  }
545 #endif
546 
549 
550  if (FLAG_cleanup_code_caches_at_gc) {
551  // We will mark cache black with a separate pass
552  // when we finish marking.
553  MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
554  }
555 
556  // Mark strong roots grey.
557  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
558  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
559 
560  // Ready to start incremental marking.
561  if (FLAG_trace_incremental_marking) {
562  PrintF("[IncrementalMarking] Running\n");
563  }
564 }
565 
566 
568  if (!IsMarking()) return;
569  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
570  heap_->new_space()->FromSpaceEnd());
571  while (it.has_next()) {
572  Bitmap::Clear(it.next());
573  }
574 }
575 
576 
578  if (!IsMarking()) return;
579 
580  int current = marking_deque_.bottom();
581  int mask = marking_deque_.mask();
582  int limit = marking_deque_.top();
583  HeapObject** array = marking_deque_.array();
584  int new_top = current;
585 
586  Map* filler_map = heap_->one_pointer_filler_map();
587 
588  while (current != limit) {
589  HeapObject* obj = array[current];
590  ASSERT(obj->IsHeapObject());
591  current = ((current + 1) & mask);
592  if (heap_->InNewSpace(obj)) {
593  MapWord map_word = obj->map_word();
594  if (map_word.IsForwardingAddress()) {
595  HeapObject* dest = map_word.ToForwardingAddress();
596  array[new_top] = dest;
597  new_top = ((new_top + 1) & mask);
598  ASSERT(new_top != marking_deque_.bottom());
599 #ifdef DEBUG
600  MarkBit mark_bit = Marking::MarkBitFrom(obj);
601  ASSERT(Marking::IsGrey(mark_bit) ||
602  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
603 #endif
604  }
605  } else if (obj->map() != filler_map) {
606  // Skip one word filler objects that appear on the
607  // stack when we perform in place array shift.
608  array[new_top] = obj;
609  new_top = ((new_top + 1) & mask);
610  ASSERT(new_top != marking_deque_.bottom());
611 #ifdef DEBUG
612  MarkBit mark_bit = Marking::MarkBitFrom(obj);
613  ASSERT(Marking::IsGrey(mark_bit) ||
614  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
615 #endif
616  }
617  }
618  marking_deque_.set_top(new_top);
619 
620  steps_took_since_last_gc_ = 0;
621  steps_count_since_last_gc_ = 0;
622  longest_step_ = 0.0;
623 }
624 
625 
626 void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
627  v->VisitPointers(
632 
633  MarkCompactCollector* collector = heap_->mark_compact_collector();
634  for (int idx = Context::FIRST_WEAK_SLOT;
636  ++idx) {
637  Object** slot =
639  collector->RecordSlot(slot, slot, *slot);
640  }
641 }
642 
643 
645  if (state() == MARKING) {
646  double start = 0.0;
647  if (FLAG_trace_incremental_marking) {
648  PrintF("[IncrementalMarking] Hurry\n");
649  start = OS::TimeCurrentMillis();
650  }
651  // TODO(gc) hurry can mark objects it encounters black as mutator
652  // was stopped.
653  Map* filler_map = heap_->one_pointer_filler_map();
654  Map* global_context_map = heap_->global_context_map();
655  IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
656  while (!marking_deque_.IsEmpty()) {
657  HeapObject* obj = marking_deque_.Pop();
658 
659  // Explicitly skip one word fillers. Incremental markbit patterns are
660  // correct only for objects that occupy at least two words.
661  Map* map = obj->map();
662  if (map == filler_map) {
663  continue;
664  } else if (map == global_context_map) {
665  // Global contexts have weak fields.
666  VisitGlobalContext(Context::cast(obj), &marking_visitor);
667  } else if (map->instance_type() == MAP_TYPE) {
668  Map* map = Map::cast(obj);
669  heap_->ClearCacheOnMap(map);
670 
671  // When map collection is enabled we have to mark through map's
672  // transitions and back pointers in a special way to make these links
673  // weak. Only maps for subclasses of JSReceiver can have transitions.
675  if (FLAG_collect_maps &&
677  marker_.MarkMapContents(map);
678  } else {
679  marking_visitor.VisitPointers(
682  }
683  } else {
684  obj->Iterate(&marking_visitor);
685  }
686 
687  MarkBit mark_bit = Marking::MarkBitFrom(obj);
688  ASSERT(!Marking::IsBlack(mark_bit));
689  Marking::MarkBlack(mark_bit);
691  }
692  state_ = COMPLETE;
693  if (FLAG_trace_incremental_marking) {
694  double end = OS::TimeCurrentMillis();
695  PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
696  static_cast<int>(end - start));
697  }
698  }
699 
700  if (FLAG_cleanup_code_caches_at_gc) {
701  PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
705  }
706 
707  Object* context = heap_->global_contexts_list();
708  while (!context->IsUndefined()) {
709  // GC can happen when the context is not fully initialized,
710  // so the cache can be undefined.
711  HeapObject* cache = HeapObject::cast(
713  if (!cache->IsUndefined()) {
714  MarkBit mark_bit = Marking::MarkBitFrom(cache);
715  if (Marking::IsGrey(mark_bit)) {
716  Marking::GreyToBlack(mark_bit);
718  }
719  }
720  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
721  }
722 }
723 
724 
726  if (IsStopped()) return;
727  if (FLAG_trace_incremental_marking) {
728  PrintF("[IncrementalMarking] Aborting.\n");
729  }
732  ResetStepCounters();
733  if (IsMarking()) {
734  PatchIncrementalMarkingRecordWriteStubs(heap_,
736  DeactivateIncrementalWriteBarrier();
737 
738  if (is_compacting_) {
739  LargeObjectIterator it(heap_->lo_space());
740  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
741  Page* p = Page::FromAddress(obj->address());
744  }
745  }
746  }
747  }
748  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
749  state_ = STOPPED;
750  is_compacting_ = false;
751 }
752 
753 
755  Hurry();
756  state_ = STOPPED;
757  is_compacting_ = false;
760  ResetStepCounters();
761  PatchIncrementalMarkingRecordWriteStubs(heap_,
763  DeactivateIncrementalWriteBarrier();
764  ASSERT(marking_deque_.IsEmpty());
765  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
766 }
767 
768 
770  state_ = COMPLETE;
771  // We will set the stack guard to request a GC now. This will mean the rest
772  // of the GC gets performed as soon as possible (we can't do a GC here in a
773  // record-write context). If a few things get allocated between now and then
774  // that shouldn't make us do a scavenge and keep being incremental, so we set
775  // the should-hurry flag to indicate that there can't be much work left to do.
776  set_should_hurry(true);
777  if (FLAG_trace_incremental_marking) {
778  PrintF("[IncrementalMarking] Complete (normal).\n");
779  }
780  if (action == GC_VIA_STACK_GUARD) {
781  heap_->isolate()->stack_guard()->RequestGC();
782  }
783 }
784 
785 
786 void IncrementalMarking::Step(intptr_t allocated_bytes,
787  CompletionAction action) {
788  if (heap_->gc_state() != Heap::NOT_IN_GC ||
789  !FLAG_incremental_marking ||
790  !FLAG_incremental_marking_steps ||
791  (state_ != SWEEPING && state_ != MARKING)) {
792  return;
793  }
794 
795  allocated_ += allocated_bytes;
796 
797  if (allocated_ < kAllocatedThreshold) return;
798 
799  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
800 
801  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
802  bytes_scanned_ += bytes_to_process;
803 
804  double start = 0;
805 
806  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
807  start = OS::TimeCurrentMillis();
808  }
809 
810  if (state_ == SWEEPING) {
811  if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
812  bytes_scanned_ = 0;
813  StartMarking(PREVENT_COMPACTION);
814  }
815  } else if (state_ == MARKING) {
816  Map* filler_map = heap_->one_pointer_filler_map();
817  Map* global_context_map = heap_->global_context_map();
818  IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
819  while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
820  HeapObject* obj = marking_deque_.Pop();
821 
822  // Explicitly skip one word fillers. Incremental markbit patterns are
823  // correct only for objects that occupy at least two words.
824  Map* map = obj->map();
825  if (map == filler_map) continue;
826 
827  int size = obj->SizeFromMap(map);
828  bytes_to_process -= size;
829  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
830  if (Marking::IsWhite(map_mark_bit)) {
831  WhiteToGreyAndPush(map, map_mark_bit);
832  }
833 
834  // TODO(gc) switch to static visitor instead of normal visitor.
835  if (map == global_context_map) {
836  // Global contexts have weak fields.
837  Context* ctx = Context::cast(obj);
838 
839  // We will mark cache black with a separate pass
840  // when we finish marking.
841  MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
842 
843  VisitGlobalContext(ctx, &marking_visitor);
844  } else if (map->instance_type() == MAP_TYPE) {
845  Map* map = Map::cast(obj);
846  heap_->ClearCacheOnMap(map);
847 
848  // When map collection is enabled we have to mark through map's
849  // transitions and back pointers in a special way to make these links
850  // weak. Only maps for subclasses of JSReceiver can have transitions.
852  if (FLAG_collect_maps &&
854  marker_.MarkMapContents(map);
855  } else {
856  marking_visitor.VisitPointers(
859  }
860  } else if (map->instance_type() == JS_FUNCTION_TYPE) {
861  marking_visitor.VisitPointers(
864 
865  marking_visitor.VisitCodeEntry(
867 
868  marking_visitor.VisitPointers(
873  } else {
874  obj->IterateBody(map->instance_type(), size, &marking_visitor);
875  }
876 
877  MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
878  SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
879  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
880  Marking::MarkBlack(obj_mark_bit);
882  }
883  if (marking_deque_.IsEmpty()) MarkingComplete(action);
884  }
885 
886  allocated_ = 0;
887 
888  steps_count_++;
889  steps_count_since_last_gc_++;
890 
891  bool speed_up = false;
892 
893  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
894  if (FLAG_trace_gc) {
895  PrintF("Speed up marking after %d steps\n",
896  static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
897  }
898  speed_up = true;
899  }
900 
901  bool space_left_is_very_small =
902  (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
903 
904  bool only_1_nth_of_space_that_was_available_still_left =
905  (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
906  old_generation_space_available_at_start_of_incremental_);
907 
908  if (space_left_is_very_small ||
909  only_1_nth_of_space_that_was_available_still_left) {
910  if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
911  speed_up = true;
912  }
913 
914  bool size_of_old_space_multiplied_by_n_during_marking =
915  (heap_->PromotedTotalSize() >
916  (allocation_marking_factor_ + 1) *
917  old_generation_space_used_at_start_of_incremental_);
918  if (size_of_old_space_multiplied_by_n_during_marking) {
919  speed_up = true;
920  if (FLAG_trace_gc) {
921  PrintF("Speed up marking because of heap size increase\n");
922  }
923  }
924 
925  int64_t promoted_during_marking = heap_->PromotedTotalSize()
926  - old_generation_space_used_at_start_of_incremental_;
927  intptr_t delay = allocation_marking_factor_ * MB;
928  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
929 
930  // We try to scan at at least twice the speed that we are allocating.
931  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
932  if (FLAG_trace_gc) {
933  PrintF("Speed up marking because marker was not keeping up\n");
934  }
935  speed_up = true;
936  }
937 
938  if (speed_up) {
939  if (state_ != MARKING) {
940  if (FLAG_trace_gc) {
941  PrintF("Postponing speeding up marking until marking starts\n");
942  }
943  } else {
944  allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
945  allocation_marking_factor_ = static_cast<int>(
947  static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
948  if (FLAG_trace_gc) {
949  PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
950  }
951  }
952  }
953 
954  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
955  double end = OS::TimeCurrentMillis();
956  double delta = (end - start);
957  longest_step_ = Max(longest_step_, delta);
958  steps_took_ += delta;
959  steps_took_since_last_gc_ += delta;
960  }
961 }
962 
963 
964 void IncrementalMarking::ResetStepCounters() {
965  steps_count_ = 0;
966  steps_took_ = 0;
967  longest_step_ = 0.0;
968  old_generation_space_available_at_start_of_incremental_ =
969  SpaceLeftInOldSpace();
970  old_generation_space_used_at_start_of_incremental_ =
971  heap_->PromotedTotalSize();
972  steps_count_since_last_gc_ = 0;
973  steps_took_since_last_gc_ = 0;
974  bytes_rescanned_ = 0;
975  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
976  bytes_scanned_ = 0;
977 }
978 
979 
980 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
981  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
982 }
983 
984 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:172
Address FromSpaceEnd()
Definition: spaces.h:2209
static const int kPointerFieldsEndOffset
Definition: objects.h:4977
Object * KeyAt(int entry)
Definition: objects.h:2816
#define SLOW_ASSERT(condition)
Definition: checks.h:276
static const int kCodeEntryOffset
Definition: objects.h:5981
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
CompilationCache * compilation_cache()
Definition: isolate.h:812
void PrintF(const char *format,...)
Definition: v8utils.cc:40
bool InNewSpace(Object *object)
Definition: heap-inl.h:292
void VisitPointers(Object **start, Object **end)
uint32_t NumberToUint32(Object *number)
Definition: v8conversions.h:43
Isolate * isolate()
Definition: heap-inl.h:494
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:3391
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:646
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
Address FromSpaceStart()
Definition: spaces.h:2208
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:222
static Map * cast(Object *obj)
void SetNewSpacePageFlags(NewSpacePage *chunk)
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
static void RecordWriteForEvacuationFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static bool enabled()
Definition: serialize.h:480
static const int kStartOffset
Definition: objects.h:1245
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5648
#define ASSERT(condition)
Definition: checks.h:270
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:471
void ClearFlag(int flag)
Definition: spaces.h:421
void Step(intptr_t allocated, CompletionAction action)
bool StartCompaction(CompactionMode mode)
static void AnyToGrey(MarkBit markbit)
Definition: mark-compact.h:108
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static Context * cast(Object *context)
Definition: contexts.h:207
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
bool NonFailureIsHeapObject()
Definition: objects-inl.h:165
int SizeFromMap(Map *map)
Definition: objects-inl.h:2809
int global_ic_age()
Definition: heap.h:1595
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
static void RecordWriteFromCode(HeapObject *obj, Object *value, Isolate *isolate)
#define CHECK(condition)
Definition: checks.h:56
static const int kPageSize
Definition: spaces.h:695
static Code * cast(Object *obj)
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:963
static MarkBit MarkBitFrom(Address addr)
StackGuard * stack_guard()
Definition: isolate.h:819
void IterateBody(InstanceType type, int object_size, ObjectVisitor *v)
Definition: objects.cc:1313
Object * ValueAt(int entry)
Definition: objects.h:3039
intptr_t MaxOldGenerationSize()
Definition: heap.h:471
void Iterate(ObjectVisitor *v)
Definition: objects.cc:1304
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
void MarkingComplete(CompletionAction action)
void ClearCacheOnMap(Map *map)
Definition: heap.h:1487
IncrementalMarkingMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2191
void ResetForNewContext(int new_ic_age)
Definition: objects.cc:7974
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
const int kPointerSize
Definition: globals.h:234
bool IsFlagSet(int flag)
Definition: spaces.h:433
bool Commit(void *address, size_t size, bool is_executable)
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
const Register pc
OldSpace * old_pointer_space()
Definition: heap.h:500
static const int kPropertiesOffset
Definition: objects.h:2113
static double TimeCurrentMillis()
HeapState gc_state()
Definition: heap.h:1280
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3380
bool is_inline_cache_stub()
Definition: objects-inl.h:3306
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
OldSpace * code_space()
Definition: heap.h:502
static const intptr_t kInitialAllocationMarkingFactor
LargeObjectSpace * lo_space()
Definition: heap.h:505
static const int kNonWeakFieldsEndOffset
Definition: objects.h:5988
CellSpace * cell_space()
Definition: heap.h:504
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void SetOldSpacePageFlags(MemoryChunk *chunk)
bool IsKey(Object *k)
Definition: objects.h:2820
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:859
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
static const intptr_t kAllocationMarkingFactorSpeedupInterval
void Continue(InterruptFlag after_what)
Definition: execution.cc:488
static const intptr_t kAllocationMarkingFactorSpeedup
IncrementalMarking * incremental_marking()
Definition: heap.h:1524
int MaxSemiSpaceSize()
Definition: heap.h:468
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Definition: objects-inl.h:3682
void VisitSharedFunctionInfo(SharedFunctionInfo *shared)
Object * global_contexts_list()
Definition: heap.h:1154
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
intptr_t PromotedTotalSize()
Definition: heap.h:1344
InstanceType instance_type()
Definition: objects-inl.h:2864
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
IncrementalMarkingRootMarkingVisitor(Heap *heap, IncrementalMarking *incremental_marking)
MapSpace * map_space()
Definition: heap.h:503
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:5836
bool MarkBlackOrKeepGrey(MarkBit mark_bit)
Object * get(int index)
Definition: objects-inl.h:1675
bool AdvanceSweepers(int step_size)
Definition: heap.h:1533
static const intptr_t kAllocatedThreshold
T Min(T a, T b)
Definition: utils.h:229
NewSpace * new_space()
Definition: heap.h:499
OldSpace * old_data_space()
Definition: heap.h:501
static void Clear(Address address)
Definition: ic.cc:340
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1512
static const intptr_t kMaxAllocationMarkingFactor
bool Uncommit(void *address, size_t size)
static const int kPointerFieldsBeginOffset
Definition: objects.h:4976
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
const int MB
Definition: globals.h:222