v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
33 #include "execution.h"
34 #include "gdb-jit.h"
35 #include "global-handles.h"
36 #include "heap-profiler.h"
37 #include "ic-inl.h"
38 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
53 
54 
55 // -------------------------------------------------------------------------
56 // MarkCompactCollector
57 
58 MarkCompactCollector::MarkCompactCollector() : // NOLINT
59 #ifdef DEBUG
60  state_(IDLE),
61 #endif
62  sweep_precisely_(false),
63  reduce_memory_footprint_(false),
64  abort_incremental_marking_(false),
65  compacting_(false),
66  was_marked_incrementally_(false),
67  flush_monomorphic_ics_(false),
68  tracer_(NULL),
69  migration_slots_buffer_(NULL),
70  heap_(NULL),
71  code_flusher_(NULL),
72  encountered_weak_maps_(NULL),
73  marker_(this, this) { }
74 
75 
76 #ifdef DEBUG
77 class VerifyMarkingVisitor: public ObjectVisitor {
78  public:
79  void VisitPointers(Object** start, Object** end) {
80  for (Object** current = start; current < end; current++) {
81  if ((*current)->IsHeapObject()) {
82  HeapObject* object = HeapObject::cast(*current);
83  ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
84  }
85  }
86  }
87 };
88 
89 
90 static void VerifyMarking(Address bottom, Address top) {
91  VerifyMarkingVisitor visitor;
92  HeapObject* object;
93  Address next_object_must_be_here_or_later = bottom;
94 
95  for (Address current = bottom;
96  current < top;
97  current += kPointerSize) {
98  object = HeapObject::FromAddress(current);
99  if (MarkCompactCollector::IsMarked(object)) {
100  ASSERT(current >= next_object_must_be_here_or_later);
101  object->Iterate(&visitor);
102  next_object_must_be_here_or_later = current + object->Size();
103  }
104  }
105 }
106 
107 
108 static void VerifyMarking(NewSpace* space) {
109  Address end = space->top();
110  NewSpacePageIterator it(space->bottom(), end);
111  // The bottom position is at the start of its page. Allows us to use
112  // page->area_start() as start of range on all pages.
113  ASSERT_EQ(space->bottom(),
114  NewSpacePage::FromAddress(space->bottom())->area_start());
115  while (it.has_next()) {
116  NewSpacePage* page = it.next();
117  Address limit = it.has_next() ? page->area_end() : end;
118  ASSERT(limit == end || !page->Contains(end));
119  VerifyMarking(page->area_start(), limit);
120  }
121 }
122 
123 
124 static void VerifyMarking(PagedSpace* space) {
125  PageIterator it(space);
126 
127  while (it.has_next()) {
128  Page* p = it.next();
129  VerifyMarking(p->area_start(), p->area_end());
130  }
131 }
132 
133 
134 static void VerifyMarking(Heap* heap) {
135  VerifyMarking(heap->old_pointer_space());
136  VerifyMarking(heap->old_data_space());
137  VerifyMarking(heap->code_space());
138  VerifyMarking(heap->cell_space());
139  VerifyMarking(heap->map_space());
140  VerifyMarking(heap->new_space());
141 
142  VerifyMarkingVisitor visitor;
143 
144  LargeObjectIterator it(heap->lo_space());
145  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
147  obj->Iterate(&visitor);
148  }
149  }
150 
151  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
152 }
153 
154 
155 class VerifyEvacuationVisitor: public ObjectVisitor {
156  public:
157  void VisitPointers(Object** start, Object** end) {
158  for (Object** current = start; current < end; current++) {
159  if ((*current)->IsHeapObject()) {
160  HeapObject* object = HeapObject::cast(*current);
161  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
162  }
163  }
164  }
165 };
166 
167 
168 static void VerifyEvacuation(Address bottom, Address top) {
169  VerifyEvacuationVisitor visitor;
170  HeapObject* object;
171  Address next_object_must_be_here_or_later = bottom;
172 
173  for (Address current = bottom;
174  current < top;
175  current += kPointerSize) {
176  object = HeapObject::FromAddress(current);
177  if (MarkCompactCollector::IsMarked(object)) {
178  ASSERT(current >= next_object_must_be_here_or_later);
179  object->Iterate(&visitor);
180  next_object_must_be_here_or_later = current + object->Size();
181  }
182  }
183 }
184 
185 
186 static void VerifyEvacuation(NewSpace* space) {
187  NewSpacePageIterator it(space->bottom(), space->top());
188  VerifyEvacuationVisitor visitor;
189 
190  while (it.has_next()) {
191  NewSpacePage* page = it.next();
192  Address current = page->area_start();
193  Address limit = it.has_next() ? page->area_end() : space->top();
194  ASSERT(limit == space->top() || !page->Contains(space->top()));
195  while (current < limit) {
196  HeapObject* object = HeapObject::FromAddress(current);
197  object->Iterate(&visitor);
198  current += object->Size();
199  }
200  }
201 }
202 
203 
204 static void VerifyEvacuation(PagedSpace* space) {
205  PageIterator it(space);
206 
207  while (it.has_next()) {
208  Page* p = it.next();
209  if (p->IsEvacuationCandidate()) continue;
210  VerifyEvacuation(p->area_start(), p->area_end());
211  }
212 }
213 
214 
215 static void VerifyEvacuation(Heap* heap) {
216  VerifyEvacuation(heap->old_pointer_space());
217  VerifyEvacuation(heap->old_data_space());
218  VerifyEvacuation(heap->code_space());
219  VerifyEvacuation(heap->cell_space());
220  VerifyEvacuation(heap->map_space());
221  VerifyEvacuation(heap->new_space());
222 
223  VerifyEvacuationVisitor visitor;
224  heap->IterateStrongRoots(&visitor, VISIT_ALL);
225 }
226 #endif
227 
228 
231  evacuation_candidates_.Add(p);
232 }
233 
234 
235 static void TraceFragmentation(PagedSpace* space) {
236  int number_of_pages = space->CountTotalPages();
237  intptr_t reserved = (number_of_pages * space->AreaSize());
238  intptr_t free = reserved - space->SizeOfObjects();
239  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
240  AllocationSpaceName(space->identity()),
241  number_of_pages,
242  static_cast<int>(free),
243  static_cast<double>(free) * 100 / reserved);
244 }
245 
246 
248  if (!compacting_) {
249  ASSERT(evacuation_candidates_.length() == 0);
250 
251  CollectEvacuationCandidates(heap()->old_pointer_space());
252  CollectEvacuationCandidates(heap()->old_data_space());
253 
254  if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
255  CollectEvacuationCandidates(heap()->code_space());
256  } else if (FLAG_trace_fragmentation) {
257  TraceFragmentation(heap()->code_space());
258  }
259 
260  if (FLAG_trace_fragmentation) {
261  TraceFragmentation(heap()->map_space());
262  TraceFragmentation(heap()->cell_space());
263  }
264 
268 
269  compacting_ = evacuation_candidates_.length() > 0;
270  }
271 
272  return compacting_;
273 }
274 
275 
277  // Make sure that Prepare() has been called. The individual steps below will
278  // update the state as they proceed.
279  ASSERT(state_ == PREPARE_GC);
280  ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
281 
282  MarkLiveObjects();
283  ASSERT(heap_->incremental_marking()->IsStopped());
284 
285  if (FLAG_collect_maps) ClearNonLiveTransitions();
286 
287  ClearWeakMaps();
288 
289 #ifdef DEBUG
290  if (FLAG_verify_heap) {
291  VerifyMarking(heap_);
292  }
293 #endif
294 
295  SweepSpaces();
296 
297  if (!FLAG_collect_maps) ReattachInitialMaps();
298 
299  Finish();
300 
301  tracer_ = NULL;
302 }
303 
304 
305 #ifdef DEBUG
306 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
307  PageIterator it(space);
308 
309  while (it.has_next()) {
310  Page* p = it.next();
311  CHECK(p->markbits()->IsClean());
312  CHECK_EQ(0, p->LiveBytes());
313  }
314 }
315 
316 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
317  NewSpacePageIterator it(space->bottom(), space->top());
318 
319  while (it.has_next()) {
320  NewSpacePage* p = it.next();
321  CHECK(p->markbits()->IsClean());
322  CHECK_EQ(0, p->LiveBytes());
323  }
324 }
325 
326 void MarkCompactCollector::VerifyMarkbitsAreClean() {
327  VerifyMarkbitsAreClean(heap_->old_pointer_space());
328  VerifyMarkbitsAreClean(heap_->old_data_space());
329  VerifyMarkbitsAreClean(heap_->code_space());
330  VerifyMarkbitsAreClean(heap_->cell_space());
331  VerifyMarkbitsAreClean(heap_->map_space());
332  VerifyMarkbitsAreClean(heap_->new_space());
333 
334  LargeObjectIterator it(heap_->lo_space());
335  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
336  MarkBit mark_bit = Marking::MarkBitFrom(obj);
337  ASSERT(Marking::IsWhite(mark_bit));
338  ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
339  }
340 }
341 #endif
342 
343 
344 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
345  PageIterator it(space);
346 
347  while (it.has_next()) {
348  Bitmap::Clear(it.next());
349  }
350 }
351 
352 
353 static void ClearMarkbitsInNewSpace(NewSpace* space) {
354  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
355 
356  while (it.has_next()) {
357  Bitmap::Clear(it.next());
358  }
359 }
360 
361 
363  ClearMarkbitsInPagedSpace(heap_->code_space());
364  ClearMarkbitsInPagedSpace(heap_->map_space());
365  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
366  ClearMarkbitsInPagedSpace(heap_->old_data_space());
367  ClearMarkbitsInPagedSpace(heap_->cell_space());
368  ClearMarkbitsInNewSpace(heap_->new_space());
369 
370  LargeObjectIterator it(heap_->lo_space());
371  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
372  MarkBit mark_bit = Marking::MarkBitFrom(obj);
373  mark_bit.Clear();
374  mark_bit.Next().Clear();
375  Page::FromAddress(obj->address())->ResetLiveBytes();
376  }
377 }
378 
379 
380 bool Marking::TransferMark(Address old_start, Address new_start) {
381  // This is only used when resizing an object.
382  ASSERT(MemoryChunk::FromAddress(old_start) ==
383  MemoryChunk::FromAddress(new_start));
384 
385  // If the mark doesn't move, we don't check the color of the object.
386  // It doesn't matter whether the object is black, since it hasn't changed
387  // size, so the adjustment to the live data count will be zero anyway.
388  if (old_start == new_start) return false;
389 
390  MarkBit new_mark_bit = MarkBitFrom(new_start);
391  MarkBit old_mark_bit = MarkBitFrom(old_start);
392 
393 #ifdef DEBUG
394  ObjectColor old_color = Color(old_mark_bit);
395 #endif
396 
397  if (Marking::IsBlack(old_mark_bit)) {
398  old_mark_bit.Clear();
399  ASSERT(IsWhite(old_mark_bit));
400  Marking::MarkBlack(new_mark_bit);
401  return true;
402  } else if (Marking::IsGrey(old_mark_bit)) {
403  ASSERT(heap_->incremental_marking()->IsMarking());
404  old_mark_bit.Clear();
405  old_mark_bit.Next().Clear();
406  ASSERT(IsWhite(old_mark_bit));
408  HeapObject::FromAddress(new_start), new_mark_bit);
410  }
411 
412 #ifdef DEBUG
413  ObjectColor new_color = Color(new_mark_bit);
414  ASSERT(new_color == old_color);
415 #endif
416 
417  return false;
418 }
419 
420 
422  switch (space) {
423  case NEW_SPACE: return "NEW_SPACE";
424  case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
425  case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
426  case CODE_SPACE: return "CODE_SPACE";
427  case MAP_SPACE: return "MAP_SPACE";
428  case CELL_SPACE: return "CELL_SPACE";
429  case LO_SPACE: return "LO_SPACE";
430  default:
431  UNREACHABLE();
432  }
433 
434  return NULL;
435 }
436 
437 
438 // Returns zero for pages that have so little fragmentation that it is not
439 // worth defragmenting them. Otherwise a positive integer that gives an
440 // estimate of fragmentation on an arbitrary scale.
441 static int FreeListFragmentation(PagedSpace* space, Page* p) {
442  // If page was not swept then there are no free list items on it.
443  if (!p->WasSwept()) {
444  if (FLAG_trace_fragmentation) {
445  PrintF("%p [%s]: %d bytes live (unswept)\n",
446  reinterpret_cast<void*>(p),
447  AllocationSpaceName(space->identity()),
448  p->LiveBytes());
449  }
450  return 0;
451  }
452 
453  FreeList::SizeStats sizes;
454  space->CountFreeListItems(p, &sizes);
455 
456  intptr_t ratio;
457  intptr_t ratio_threshold;
458  intptr_t area_size = space->AreaSize();
459  if (space->identity() == CODE_SPACE) {
460  ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
461  area_size;
462  ratio_threshold = 10;
463  } else {
464  ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
465  area_size;
466  ratio_threshold = 15;
467  }
468 
469  if (FLAG_trace_fragmentation) {
470  PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
471  reinterpret_cast<void*>(p),
472  AllocationSpaceName(space->identity()),
473  static_cast<int>(sizes.small_size_),
474  static_cast<double>(sizes.small_size_ * 100) /
475  area_size,
476  static_cast<int>(sizes.medium_size_),
477  static_cast<double>(sizes.medium_size_ * 100) /
478  area_size,
479  static_cast<int>(sizes.large_size_),
480  static_cast<double>(sizes.large_size_ * 100) /
481  area_size,
482  static_cast<int>(sizes.huge_size_),
483  static_cast<double>(sizes.huge_size_ * 100) /
484  area_size,
485  (ratio > ratio_threshold) ? "[fragmented]" : "");
486  }
487 
488  if (FLAG_always_compact && sizes.Total() != area_size) {
489  return 1;
490  }
491 
492  if (ratio <= ratio_threshold) return 0; // Not fragmented.
493 
494  return static_cast<int>(ratio - ratio_threshold);
495 }
496 
497 
499  ASSERT(space->identity() == OLD_POINTER_SPACE ||
500  space->identity() == OLD_DATA_SPACE ||
501  space->identity() == CODE_SPACE);
502 
503  int number_of_pages = space->CountTotalPages();
504 
505  const int kMaxMaxEvacuationCandidates = 1000;
506  int max_evacuation_candidates = Min(
507  kMaxMaxEvacuationCandidates,
508  static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
509 
510  if (FLAG_stress_compaction || FLAG_always_compact) {
511  max_evacuation_candidates = kMaxMaxEvacuationCandidates;
512  }
513 
514  class Candidate {
515  public:
516  Candidate() : fragmentation_(0), page_(NULL) { }
517  Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
518 
519  int fragmentation() { return fragmentation_; }
520  Page* page() { return page_; }
521 
522  private:
523  int fragmentation_;
524  Page* page_;
525  };
526 
527  enum CompactionMode {
528  COMPACT_FREE_LISTS,
529  REDUCE_MEMORY_FOOTPRINT
530  };
531 
532  CompactionMode mode = COMPACT_FREE_LISTS;
533 
534  intptr_t reserved = number_of_pages * space->AreaSize();
535  intptr_t over_reserved = reserved - space->SizeOfObjects();
536  static const intptr_t kFreenessThreshold = 50;
537 
538  if (over_reserved >= 2 * space->AreaSize() &&
539  reduce_memory_footprint_) {
540  mode = REDUCE_MEMORY_FOOTPRINT;
541 
542  // We expect that empty pages are easier to compact so slightly bump the
543  // limit.
544  max_evacuation_candidates += 2;
545 
546  if (FLAG_trace_fragmentation) {
547  PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
548  static_cast<double>(over_reserved) / MB,
549  static_cast<int>(kFreenessThreshold));
550  }
551  }
552 
553  intptr_t estimated_release = 0;
554 
555  Candidate candidates[kMaxMaxEvacuationCandidates];
556 
557  int count = 0;
558  int fragmentation = 0;
559  Candidate* least = NULL;
560 
561  PageIterator it(space);
562  if (it.has_next()) it.next(); // Never compact the first page.
563 
564  while (it.has_next()) {
565  Page* p = it.next();
567 
568  if (FLAG_stress_compaction) {
569  int counter = space->heap()->ms_count();
570  uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
571  if ((counter & 1) == (page_number & 1)) fragmentation = 1;
572  } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
573  // Don't try to release too many pages.
574  if (estimated_release >= ((over_reserved * 3) / 4)) {
575  continue;
576  }
577 
578  intptr_t free_bytes = 0;
579 
580  if (!p->WasSwept()) {
581  free_bytes = (p->area_size() - p->LiveBytes());
582  } else {
583  FreeList::SizeStats sizes;
584  space->CountFreeListItems(p, &sizes);
585  free_bytes = sizes.Total();
586  }
587 
588  int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
589 
590  if (free_pct >= kFreenessThreshold) {
591  estimated_release += 2 * p->area_size() - free_bytes;
592  fragmentation = free_pct;
593  } else {
594  fragmentation = 0;
595  }
596 
597  if (FLAG_trace_fragmentation) {
598  PrintF("%p [%s]: %d (%.2f%%) free %s\n",
599  reinterpret_cast<void*>(p),
600  AllocationSpaceName(space->identity()),
601  static_cast<int>(free_bytes),
602  static_cast<double>(free_bytes * 100) / p->area_size(),
603  (fragmentation > 0) ? "[fragmented]" : "");
604  }
605  } else {
606  fragmentation = FreeListFragmentation(space, p);
607  }
608 
609  if (fragmentation != 0) {
610  if (count < max_evacuation_candidates) {
611  candidates[count++] = Candidate(fragmentation, p);
612  } else {
613  if (least == NULL) {
614  for (int i = 0; i < max_evacuation_candidates; i++) {
615  if (least == NULL ||
616  candidates[i].fragmentation() < least->fragmentation()) {
617  least = candidates + i;
618  }
619  }
620  }
621  if (least->fragmentation() < fragmentation) {
622  *least = Candidate(fragmentation, p);
623  least = NULL;
624  }
625  }
626  }
627  }
628 
629  for (int i = 0; i < count; i++) {
630  AddEvacuationCandidate(candidates[i].page());
631  }
632 
633  if (count > 0 && FLAG_trace_fragmentation) {
634  PrintF("Collected %d evacuation candidates for space %s\n",
635  count,
636  AllocationSpaceName(space->identity()));
637  }
638 }
639 
640 
642  if (compacting_) {
643  int npages = evacuation_candidates_.length();
644  for (int i = 0; i < npages; i++) {
645  Page* p = evacuation_candidates_[i];
646  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
649  }
650  compacting_ = false;
651  evacuation_candidates_.Rewind(0);
652  invalidated_code_.Rewind(0);
653  }
654  ASSERT_EQ(0, evacuation_candidates_.length());
655 }
656 
657 
658 void MarkCompactCollector::Prepare(GCTracer* tracer) {
659  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
660 
661  // Monomorphic ICs are preserved when possible, but need to be flushed
662  // when they might be keeping a Context alive, or when the heap is about
663  // to be serialized.
664  flush_monomorphic_ics_ =
666 
667  // Rather than passing the tracer around we stash it in a static member
668  // variable.
669  tracer_ = tracer;
670 
671 #ifdef DEBUG
672  ASSERT(state_ == IDLE);
673  state_ = PREPARE_GC;
674 #endif
675 
676  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
677 
678 #ifdef ENABLE_GDB_JIT_INTERFACE
679  if (FLAG_gdbjit) {
680  // If GDBJIT interface is active disable compaction.
681  compacting_collection_ = false;
682  }
683 #endif
684 
685  // Clear marking bits if incremental marking is aborted.
686  if (was_marked_incrementally_ && abort_incremental_marking_) {
688  ClearMarkbits();
689  AbortCompaction();
690  was_marked_incrementally_ = false;
691  }
692 
693  // Don't start compaction if we are in the middle of incremental
694  // marking cycle. We did not collect any slots.
695  if (!FLAG_never_compact && !was_marked_incrementally_) {
697  }
698 
699  PagedSpaces spaces;
700  for (PagedSpace* space = spaces.next();
701  space != NULL;
702  space = spaces.next()) {
703  space->PrepareForMarkCompact();
704  }
705 
706 #ifdef DEBUG
707  if (!was_marked_incrementally_ && FLAG_verify_heap) {
708  VerifyMarkbitsAreClean();
709  }
710 #endif
711 }
712 
713 
714 void MarkCompactCollector::Finish() {
715 #ifdef DEBUG
716  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
717  state_ = IDLE;
718 #endif
719  // The stub cache is not traversed during GC; clear the cache to
720  // force lazy re-initialization of it. This must be done after the
721  // GC, because it relies on the new address of certain old space
722  // objects (empty string, illegal builtin).
723  heap()->isolate()->stub_cache()->Clear();
724 
725  heap()->external_string_table_.CleanUp();
726 }
727 
728 
729 // -------------------------------------------------------------------------
730 // Phase 1: tracing and marking live objects.
731 // before: all objects are in normal state.
732 // after: a live object's map pointer is marked as '00'.
733 
734 // Marking all live objects in the heap as part of mark-sweep or mark-compact
735 // collection. Before marking, all objects are in their normal state. After
736 // marking, live objects' map pointers are marked indicating that the object
737 // has been found reachable.
738 //
739 // The marking algorithm is a (mostly) depth-first (because of possible stack
740 // overflow) traversal of the graph of objects reachable from the roots. It
741 // uses an explicit stack of pointers rather than recursion. The young
742 // generation's inactive ('from') space is used as a marking stack. The
743 // objects in the marking stack are the ones that have been reached and marked
744 // but their children have not yet been visited.
745 //
746 // The marking stack can overflow during traversal. In that case, we set an
747 // overflow flag. When the overflow flag is set, we continue marking objects
748 // reachable from the objects on the marking stack, but no longer push them on
749 // the marking stack. Instead, we mark them as both marked and overflowed.
750 // When the stack is in the overflowed state, objects marked as overflowed
751 // have been reached and marked but their children have not been visited yet.
752 // After emptying the marking stack, we clear the overflow flag and traverse
753 // the heap looking for objects marked as overflowed, push them on the stack,
754 // and continue with marking. This process repeats until all reachable
755 // objects have been marked.
756 
757 class CodeFlusher {
758  public:
759  explicit CodeFlusher(Isolate* isolate)
760  : isolate_(isolate),
761  jsfunction_candidates_head_(NULL),
762  shared_function_info_candidates_head_(NULL) {}
763 
764  void AddCandidate(SharedFunctionInfo* shared_info) {
765  SetNextCandidate(shared_info, shared_function_info_candidates_head_);
766  shared_function_info_candidates_head_ = shared_info;
767  }
768 
769  void AddCandidate(JSFunction* function) {
770  ASSERT(function->code() == function->shared()->code());
771 
772  SetNextCandidate(function, jsfunction_candidates_head_);
773  jsfunction_candidates_head_ = function;
774  }
775 
777  ProcessSharedFunctionInfoCandidates();
778  ProcessJSFunctionCandidates();
779  }
780 
781  private:
782  void ProcessJSFunctionCandidates() {
783  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
784 
785  JSFunction* candidate = jsfunction_candidates_head_;
786  JSFunction* next_candidate;
787  while (candidate != NULL) {
788  next_candidate = GetNextCandidate(candidate);
789 
790  SharedFunctionInfo* shared = candidate->shared();
791 
792  Code* code = shared->code();
793  MarkBit code_mark = Marking::MarkBitFrom(code);
794  if (!code_mark.Get()) {
795  shared->set_code(lazy_compile);
796  candidate->set_code(lazy_compile);
797  } else {
798  candidate->set_code(shared->code());
799  }
800 
801  // We are in the middle of a GC cycle so the write barrier in the code
802  // setter did not record the slot update and we have to do that manually.
803  Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
804  Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
805  isolate_->heap()->mark_compact_collector()->
806  RecordCodeEntrySlot(slot, target);
807 
808  RecordSharedFunctionInfoCodeSlot(shared);
809 
810  candidate = next_candidate;
811  }
812 
813  jsfunction_candidates_head_ = NULL;
814  }
815 
816 
817  void ProcessSharedFunctionInfoCandidates() {
818  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
819 
820  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
821  SharedFunctionInfo* next_candidate;
822  while (candidate != NULL) {
823  next_candidate = GetNextCandidate(candidate);
824  SetNextCandidate(candidate, NULL);
825 
826  Code* code = candidate->code();
827  MarkBit code_mark = Marking::MarkBitFrom(code);
828  if (!code_mark.Get()) {
829  candidate->set_code(lazy_compile);
830  }
831 
832  RecordSharedFunctionInfoCodeSlot(candidate);
833 
834  candidate = next_candidate;
835  }
836 
837  shared_function_info_candidates_head_ = NULL;
838  }
839 
840  void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
841  Object** slot = HeapObject::RawField(shared,
843  isolate_->heap()->mark_compact_collector()->
844  RecordSlot(slot, slot, HeapObject::cast(*slot));
845  }
846 
847  static JSFunction** GetNextCandidateField(JSFunction* candidate) {
848  return reinterpret_cast<JSFunction**>(
849  candidate->address() + JSFunction::kCodeEntryOffset);
850  }
851 
852  static JSFunction* GetNextCandidate(JSFunction* candidate) {
853  return *GetNextCandidateField(candidate);
854  }
855 
856  static void SetNextCandidate(JSFunction* candidate,
857  JSFunction* next_candidate) {
858  *GetNextCandidateField(candidate) = next_candidate;
859  }
860 
861  static SharedFunctionInfo** GetNextCandidateField(
862  SharedFunctionInfo* candidate) {
863  Code* code = candidate->code();
864  return reinterpret_cast<SharedFunctionInfo**>(
865  code->address() + Code::kGCMetadataOffset);
866  }
867 
868  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
869  return reinterpret_cast<SharedFunctionInfo*>(
870  candidate->code()->gc_metadata());
871  }
872 
873  static void SetNextCandidate(SharedFunctionInfo* candidate,
874  SharedFunctionInfo* next_candidate) {
875  candidate->code()->set_gc_metadata(next_candidate);
876  }
877 
878  Isolate* isolate_;
879  JSFunction* jsfunction_candidates_head_;
880  SharedFunctionInfo* shared_function_info_candidates_head_;
881 
882  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
883 };
884 
885 
886 MarkCompactCollector::~MarkCompactCollector() {
887  if (code_flusher_ != NULL) {
888  delete code_flusher_;
889  code_flusher_ = NULL;
890  }
891 }
892 
893 
894 static inline HeapObject* ShortCircuitConsString(Object** p) {
895  // Optimization: If the heap object pointed to by p is a non-symbol
896  // cons string whose right substring is HEAP->empty_string, update
897  // it in place to its left substring. Return the updated value.
898  //
899  // Here we assume that if we change *p, we replace it with a heap object
900  // (i.e., the left substring of a cons string is always a heap object).
901  //
902  // The check performed is:
903  // object->IsConsString() && !object->IsSymbol() &&
904  // (ConsString::cast(object)->second() == HEAP->empty_string())
905  // except the maps for the object and its possible substrings might be
906  // marked.
907  HeapObject* object = HeapObject::cast(*p);
908  if (!FLAG_clever_optimizations) return object;
909  Map* map = object->map();
910  InstanceType type = map->instance_type();
911  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
912 
913  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
914  Heap* heap = map->GetHeap();
915  if (second != heap->empty_string()) {
916  return object;
917  }
918 
919  // Since we don't have the object's start, it is impossible to update the
920  // page dirty marks. Therefore, we only replace the string with its left
921  // substring when page dirty marks do not change.
922  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
923  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
924 
925  *p = first;
926  return HeapObject::cast(first);
927 }
928 
929 
931  public:
932  static inline void IterateBody(Map* map, HeapObject* obj) {
933  table_.GetVisitor(map)(map, obj);
934  }
935 
936  static void Initialize() {
937  table_.Register(kVisitShortcutCandidate,
940  void>::Visit);
941 
942  table_.Register(kVisitConsString,
945  void>::Visit);
946 
947  table_.Register(kVisitSlicedString,
950  void>::Visit);
951 
952  table_.Register(kVisitFixedArray,
955  void>::Visit);
956 
957  table_.Register(kVisitGlobalContext, &VisitGlobalContext);
958 
959  table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
960 
961  table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
962  table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
963  table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
964  table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
965 
966  table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
967 
968  table_.Register(kVisitOddball,
971  void>::Visit);
972  table_.Register(kVisitMap,
975  void>::Visit);
976 
977  table_.Register(kVisitCode, &VisitCode);
978 
979  table_.Register(kVisitSharedFunctionInfo,
980  &VisitSharedFunctionInfoAndFlushCode);
981 
982  table_.Register(kVisitJSFunction,
983  &VisitJSFunctionAndFlushCode);
984 
985  table_.Register(kVisitJSRegExp,
986  &VisitRegExpAndFlushCode);
987 
988  table_.Register(kVisitPropertyCell,
991  void>::Visit);
992 
993  table_.RegisterSpecializations<DataObjectVisitor,
996 
997  table_.RegisterSpecializations<JSObjectVisitor,
1000 
1001  table_.RegisterSpecializations<StructObjectVisitor,
1002  kVisitStruct,
1004  }
1005 
1006  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1007  MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1008  }
1009 
1010  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1011  // Mark all objects pointed to in [start, end).
1012  const int kMinRangeForMarkingRecursion = 64;
1013  if (end - start >= kMinRangeForMarkingRecursion) {
1014  if (VisitUnmarkedObjects(heap, start, end)) return;
1015  // We are close to a stack overflow, so just mark the objects.
1016  }
1017  MarkCompactCollector* collector = heap->mark_compact_collector();
1018  for (Object** p = start; p < end; p++) {
1019  MarkObjectByPointer(collector, start, p);
1020  }
1021  }
1022 
1023  static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
1024  ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
1025  JSGlobalPropertyCell* cell =
1026  JSGlobalPropertyCell::cast(rinfo->target_cell());
1027  MarkBit mark = Marking::MarkBitFrom(cell);
1028  heap->mark_compact_collector()->MarkObject(cell, mark);
1029  }
1030 
1031  static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
1032  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1033  // TODO(mstarzinger): We do not short-circuit cons strings here, verify
1034  // that there can be no such embedded pointers and add assertion here.
1035  HeapObject* object = HeapObject::cast(rinfo->target_object());
1036  heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
1037  MarkBit mark = Marking::MarkBitFrom(object);
1038  heap->mark_compact_collector()->MarkObject(object, mark);
1039  }
1040 
1041  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
1042  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1043  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1044  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
1045  && (target->ic_state() == MEGAMORPHIC ||
1046  heap->mark_compact_collector()->flush_monomorphic_ics_ ||
1047  target->ic_age() != heap->global_ic_age())) {
1048  IC::Clear(rinfo->pc());
1049  target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1050  }
1051  MarkBit code_mark = Marking::MarkBitFrom(target);
1052  heap->mark_compact_collector()->MarkObject(target, code_mark);
1053  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1054  }
1055 
1056  static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
1057  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1058  rinfo->IsPatchedReturnSequence()) ||
1059  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1060  rinfo->IsPatchedDebugBreakSlotSequence()));
1061  Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1062  MarkBit code_mark = Marking::MarkBitFrom(target);
1063  heap->mark_compact_collector()->MarkObject(target, code_mark);
1064  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1065  }
1066 
1067  // Mark object pointed to by p.
1068  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1069  Object** anchor_slot,
1070  Object** p)) {
1071  if (!(*p)->IsHeapObject()) return;
1072  HeapObject* object = ShortCircuitConsString(p);
1073  collector->RecordSlot(anchor_slot, p, object);
1074  MarkBit mark = Marking::MarkBitFrom(object);
1075  collector->MarkObject(object, mark);
1076  }
1077 
1078 
1079  // Visit an unmarked object.
1080  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1081  HeapObject* obj)) {
1082 #ifdef DEBUG
1083  ASSERT(Isolate::Current()->heap()->Contains(obj));
1084  ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1085 #endif
1086  Map* map = obj->map();
1087  Heap* heap = obj->GetHeap();
1088  MarkBit mark = Marking::MarkBitFrom(obj);
1089  heap->mark_compact_collector()->SetMark(obj, mark);
1090  // Mark the map pointer and the body.
1091  MarkBit map_mark = Marking::MarkBitFrom(map);
1092  heap->mark_compact_collector()->MarkObject(map, map_mark);
1093  IterateBody(map, obj);
1094  }
1095 
1096  // Visit all unmarked objects pointed to by [start, end).
1097  // Returns false if the operation fails (lack of stack space).
1098  static inline bool VisitUnmarkedObjects(Heap* heap,
1099  Object** start,
1100  Object** end) {
1101  // Return false is we are close to the stack limit.
1102  StackLimitCheck check(heap->isolate());
1103  if (check.HasOverflowed()) return false;
1104 
1105  MarkCompactCollector* collector = heap->mark_compact_collector();
1106  // Visit the unmarked objects.
1107  for (Object** p = start; p < end; p++) {
1108  Object* o = *p;
1109  if (!o->IsHeapObject()) continue;
1110  collector->RecordSlot(start, p, o);
1111  HeapObject* obj = HeapObject::cast(o);
1112  MarkBit mark = Marking::MarkBitFrom(obj);
1113  if (mark.Get()) continue;
1114  VisitUnmarkedObject(collector, obj);
1115  }
1116  return true;
1117  }
1118 
1119  static inline void VisitExternalReference(Address* p) { }
1120  static inline void VisitExternalReference(RelocInfo* rinfo) { }
1121  static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
1122 
1123  private:
1124  class DataObjectVisitor {
1125  public:
1126  template<int size>
1127  static void VisitSpecialized(Map* map, HeapObject* object) {
1128  }
1129 
1130  static void Visit(Map* map, HeapObject* object) {
1131  }
1132  };
1133 
1134  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1135  JSObject::BodyDescriptor,
1136  void> JSObjectVisitor;
1137 
1138  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1139  StructBodyDescriptor,
1140  void> StructObjectVisitor;
1141 
1142  static void VisitJSWeakMap(Map* map, HeapObject* object) {
1143  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1144  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1145 
1146  // Enqueue weak map in linked list of encountered weak maps.
1147  if (weak_map->next() == Smi::FromInt(0)) {
1148  weak_map->set_next(collector->encountered_weak_maps());
1149  collector->set_encountered_weak_maps(weak_map);
1150  }
1151 
1152  // Skip visiting the backing hash table containing the mappings.
1153  int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
1154  BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1155  map->GetHeap(),
1156  object,
1159  BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1160  map->GetHeap(),
1161  object,
1163  object_size);
1164 
1165  // Mark the backing hash table without pushing it on the marking stack.
1166  Object* table_object = weak_map->table();
1167  if (!table_object->IsHashTable()) return;
1168  ObjectHashTable* table = ObjectHashTable::cast(table_object);
1169  Object** table_slot =
1171  MarkBit table_mark = Marking::MarkBitFrom(table);
1172  collector->RecordSlot(table_slot, table_slot, table);
1173  if (!table_mark.Get()) collector->SetMark(table, table_mark);
1174  // Recording the map slot can be skipped, because maps are not compacted.
1175  collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1176  ASSERT(MarkCompactCollector::IsMarked(table->map()));
1177  }
1178 
1179  static void VisitCode(Map* map, HeapObject* object) {
1180  Heap* heap = map->GetHeap();
1181  Code* code = reinterpret_cast<Code*>(object);
1182  if (FLAG_cleanup_code_caches_at_gc) {
1183  code->ClearTypeFeedbackCells(heap);
1184  }
1185  code->CodeIterateBody<StaticMarkingVisitor>(heap);
1186  }
1187 
1188  // Code flushing support.
1189 
1190  // How many collections newly compiled code object will survive before being
1191  // flushed.
1192  static const int kCodeAgeThreshold = 5;
1193 
1194  static const int kRegExpCodeThreshold = 5;
1195 
1196  inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
1197  Object* undefined = heap->undefined_value();
1198  return (info->script() != undefined) &&
1199  (reinterpret_cast<Script*>(info->script())->source() != undefined);
1200  }
1201 
1202 
1203  inline static bool IsCompiled(JSFunction* function) {
1204  return function->code() !=
1205  function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1206  }
1207 
1208  inline static bool IsCompiled(SharedFunctionInfo* function) {
1209  return function->code() !=
1210  function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1211  }
1212 
1213  inline static bool IsFlushable(Heap* heap, JSFunction* function) {
1214  SharedFunctionInfo* shared_info = function->unchecked_shared();
1215 
1216  // Code is either on stack, in compilation cache or referenced
1217  // by optimized version of function.
1218  MarkBit code_mark = Marking::MarkBitFrom(function->code());
1219  if (code_mark.Get()) {
1220  if (!Marking::MarkBitFrom(shared_info).Get()) {
1221  shared_info->set_code_age(0);
1222  }
1223  return false;
1224  }
1225 
1226  // We do not flush code for optimized functions.
1227  if (function->code() != shared_info->code()) {
1228  return false;
1229  }
1230 
1231  return IsFlushable(heap, shared_info);
1232  }
1233 
1234  inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
1235  // Code is either on stack, in compilation cache or referenced
1236  // by optimized version of function.
1237  MarkBit code_mark =
1238  Marking::MarkBitFrom(shared_info->code());
1239  if (code_mark.Get()) {
1240  return false;
1241  }
1242 
1243  // The function must be compiled and have the source code available,
1244  // to be able to recompile it in case we need the function again.
1245  if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
1246  return false;
1247  }
1248 
1249  // We never flush code for Api functions.
1250  Object* function_data = shared_info->function_data();
1251  if (function_data->IsFunctionTemplateInfo()) {
1252  return false;
1253  }
1254 
1255  // Only flush code for functions.
1256  if (shared_info->code()->kind() != Code::FUNCTION) {
1257  return false;
1258  }
1259 
1260  // Function must be lazy compilable.
1261  if (!shared_info->allows_lazy_compilation()) {
1262  return false;
1263  }
1264 
1265  // If this is a full script wrapped in a function we do no flush the code.
1266  if (shared_info->is_toplevel()) {
1267  return false;
1268  }
1269 
1270  // Age this shared function info.
1271  if (shared_info->code_age() < kCodeAgeThreshold) {
1272  shared_info->set_code_age(shared_info->code_age() + 1);
1273  return false;
1274  }
1275 
1276  return true;
1277  }
1278 
1279 
1280  static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
1281  if (!IsFlushable(heap, function)) return false;
1282 
1283  // This function's code looks flushable. But we have to postpone the
1284  // decision until we see all functions that point to the same
1285  // SharedFunctionInfo because some of them might be optimized.
1286  // That would make the nonoptimized version of the code nonflushable,
1287  // because it is required for bailing out from optimized code.
1288  heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
1289  return true;
1290  }
1291 
1292  static inline bool IsValidNotBuiltinContext(Object* ctx) {
1293  return ctx->IsContext() &&
1294  !Context::cast(ctx)->global()->IsJSBuiltinsObject();
1295  }
1296 
1297 
1298  static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
1299  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1300 
1301  if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1302 
1303  FixedBodyVisitor<StaticMarkingVisitor,
1305  void>::Visit(map, object);
1306  }
1307 
1308 
1309  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1310  JSRegExp* re,
1311  bool is_ascii) {
1312  // Make sure that the fixed array is in fact initialized on the RegExp.
1313  // We could potentially trigger a GC when initializing the RegExp.
1314  if (HeapObject::cast(re->data())->map()->instance_type() !=
1315  FIXED_ARRAY_TYPE) return;
1316 
1317  // Make sure this is a RegExp that actually contains code.
1318  if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1319 
1320  Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
1321  if (!code->IsSmi() &&
1322  HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1323  // Save a copy that can be reinstated if we need the code again.
1324  re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1325  code,
1326  heap);
1327 
1328  // Saving a copy might create a pointer into compaction candidate
1329  // that was not observed by marker. This might happen if JSRegExp data
1330  // was marked through the compilation cache before marker reached JSRegExp
1331  // object.
1332  FixedArray* data = FixedArray::cast(re->data());
1333  Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1334  heap->mark_compact_collector()->
1335  RecordSlot(slot, slot, code);
1336 
1337  // Set a number in the 0-255 range to guarantee no smi overflow.
1338  re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1339  Smi::FromInt(heap->sweep_generation() & 0xff),
1340  heap);
1341  } else if (code->IsSmi()) {
1342  int value = Smi::cast(code)->value();
1343  // The regexp has not been compiled yet or there was a compilation error.
1344  if (value == JSRegExp::kUninitializedValue ||
1346  return;
1347  }
1348 
1349  // Check if we should flush now.
1350  if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1351  re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1353  heap);
1354  re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1356  heap);
1357  }
1358  }
1359  }
1360 
1361 
1362  // Works by setting the current sweep_generation (as a smi) in the
1363  // code object place in the data array of the RegExp and keeps a copy
1364  // around that can be reinstated if we reuse the RegExp before flushing.
1365  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1366  // we flush the code.
1367  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1368  Heap* heap = map->GetHeap();
1369  MarkCompactCollector* collector = heap->mark_compact_collector();
1370  if (!collector->is_code_flushing_enabled()) {
1371  VisitJSRegExpFields(map, object);
1372  return;
1373  }
1374  JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1375  // Flush code or set age on both ASCII and two byte code.
1376  UpdateRegExpCodeAgeAndFlush(heap, re, true);
1377  UpdateRegExpCodeAgeAndFlush(heap, re, false);
1378  // Visit the fields of the RegExp, including the updated FixedArray.
1379  VisitJSRegExpFields(map, object);
1380  }
1381 
1382 
1383  static void VisitSharedFunctionInfoAndFlushCode(Map* map,
1384  HeapObject* object) {
1385  Heap* heap = map->GetHeap();
1386  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1387  if (shared->ic_age() != heap->global_ic_age()) {
1388  shared->ResetForNewContext(heap->global_ic_age());
1389  }
1390 
1391  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1392  if (!collector->is_code_flushing_enabled()) {
1393  VisitSharedFunctionInfoGeneric(map, object);
1394  return;
1395  }
1396  VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
1397  }
1398 
1399 
1400  static void VisitSharedFunctionInfoAndFlushCodeGeneric(
1401  Map* map, HeapObject* object, bool known_flush_code_candidate) {
1402  Heap* heap = map->GetHeap();
1403  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1404 
1405  if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1406 
1407  if (!known_flush_code_candidate) {
1408  known_flush_code_candidate = IsFlushable(heap, shared);
1409  if (known_flush_code_candidate) {
1410  heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
1411  }
1412  }
1413 
1414  VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
1415  }
1416 
1417 
1418  static void VisitCodeEntry(Heap* heap, Address entry_address) {
1419  Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1420  MarkBit mark = Marking::MarkBitFrom(code);
1421  heap->mark_compact_collector()->MarkObject(code, mark);
1422  heap->mark_compact_collector()->
1423  RecordCodeEntrySlot(entry_address, code);
1424  }
1425 
1426  static void VisitGlobalContext(Map* map, HeapObject* object) {
1427  FixedBodyVisitor<StaticMarkingVisitor,
1429  void>::Visit(map, object);
1430 
1431  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1432  for (int idx = Context::FIRST_WEAK_SLOT;
1434  ++idx) {
1435  Object** slot =
1437  collector->RecordSlot(slot, slot, *slot);
1438  }
1439  }
1440 
1441  static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
1442  Heap* heap = map->GetHeap();
1443  MarkCompactCollector* collector = heap->mark_compact_collector();
1444  if (!collector->is_code_flushing_enabled()) {
1445  VisitJSFunction(map, object);
1446  return;
1447  }
1448 
1449  JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
1450  // The function must have a valid context and not be a builtin.
1451  bool flush_code_candidate = false;
1452  if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
1453  flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
1454  }
1455 
1456  if (!flush_code_candidate) {
1457  Code* code = jsfunction->shared()->code();
1458  MarkBit code_mark = Marking::MarkBitFrom(code);
1459  collector->MarkObject(code, code_mark);
1460 
1461  if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
1462  collector->MarkInlinedFunctionsCode(jsfunction->code());
1463  }
1464  }
1465 
1466  VisitJSFunctionFields(map,
1467  reinterpret_cast<JSFunction*>(object),
1468  flush_code_candidate);
1469  }
1470 
1471 
1472  static void VisitJSFunction(Map* map, HeapObject* object) {
1473  VisitJSFunctionFields(map,
1474  reinterpret_cast<JSFunction*>(object),
1475  false);
1476  }
1477 
1478 
1479 #define SLOT_ADDR(obj, offset) \
1480  reinterpret_cast<Object**>((obj)->address() + offset)
1481 
1482 
1483  static inline void VisitJSFunctionFields(Map* map,
1484  JSFunction* object,
1485  bool flush_code_candidate) {
1486  Heap* heap = map->GetHeap();
1487 
1488  VisitPointers(heap,
1491 
1492  if (!flush_code_candidate) {
1493  VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
1494  } else {
1495  // Don't visit code object.
1496 
1497  // Visit shared function info to avoid double checking of it's
1498  // flushability.
1499  SharedFunctionInfo* shared_info = object->unchecked_shared();
1500  MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1501  if (!shared_info_mark.Get()) {
1502  Map* shared_info_map = shared_info->map();
1503  MarkBit shared_info_map_mark =
1504  Marking::MarkBitFrom(shared_info_map);
1505  heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1506  heap->mark_compact_collector()->MarkObject(shared_info_map,
1507  shared_info_map_mark);
1508  VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
1509  shared_info,
1510  true);
1511  }
1512  }
1513 
1514  VisitPointers(
1515  heap,
1516  HeapObject::RawField(object,
1517  JSFunction::kCodeEntryOffset + kPointerSize),
1518  HeapObject::RawField(object,
1520  }
1521 
1522  static inline void VisitJSRegExpFields(Map* map,
1523  HeapObject* object) {
1524  int last_property_offset =
1525  JSRegExp::kSize + kPointerSize * map->inobject_properties();
1526  VisitPointers(map->GetHeap(),
1528  SLOT_ADDR(object, last_property_offset));
1529  }
1530 
1531 
1532  static void VisitSharedFunctionInfoFields(Heap* heap,
1533  HeapObject* object,
1534  bool flush_code_candidate) {
1535  VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
1536 
1537  if (!flush_code_candidate) {
1538  VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
1539  }
1540 
1541  VisitPointers(heap,
1544  }
1545 
1546  #undef SLOT_ADDR
1547 
1548  typedef void (*Callback)(Map* map, HeapObject* object);
1549 
1550  static VisitorDispatchTable<Callback> table_;
1551 };
1552 
1553 
1554 VisitorDispatchTable<StaticMarkingVisitor::Callback>
1555  StaticMarkingVisitor::table_;
1556 
1557 
1558 class MarkingVisitor : public ObjectVisitor {
1559  public:
1560  explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1561 
1562  void VisitPointer(Object** p) {
1563  StaticMarkingVisitor::VisitPointer(heap_, p);
1564  }
1565 
1566  void VisitPointers(Object** start, Object** end) {
1567  StaticMarkingVisitor::VisitPointers(heap_, start, end);
1568  }
1569 
1570  private:
1571  Heap* heap_;
1572 };
1573 
1574 
1576  public:
1578  : collector_(collector) {}
1579 
1580  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1581  collector_->PrepareThreadForCodeFlushing(isolate, top);
1582  }
1583 
1584  private:
1585  MarkCompactCollector* collector_;
1586 };
1587 
1588 
1589 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1590  public:
1592  : collector_(collector) {}
1593 
1594  void VisitPointers(Object** start, Object** end) {
1595  for (Object** p = start; p < end; p++) VisitPointer(p);
1596  }
1597 
1598  void VisitPointer(Object** slot) {
1599  Object* obj = *slot;
1600  if (obj->IsSharedFunctionInfo()) {
1601  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1602  MarkBit shared_mark = Marking::MarkBitFrom(shared);
1603  MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1604  collector_->MarkObject(shared->code(), code_mark);
1605  collector_->MarkObject(shared, shared_mark);
1606  }
1607  }
1608 
1609  private:
1610  MarkCompactCollector* collector_;
1611 };
1612 
1613 
1614 void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1615  // For optimized functions we should retain both non-optimized version
1616  // of it's code and non-optimized version of all inlined functions.
1617  // This is required to support bailing out from inlined code.
1618  DeoptimizationInputData* data =
1619  DeoptimizationInputData::cast(code->deoptimization_data());
1620 
1621  FixedArray* literals = data->LiteralArray();
1622 
1623  for (int i = 0, count = data->InlinedFunctionCount()->value();
1624  i < count;
1625  i++) {
1626  JSFunction* inlined = JSFunction::cast(literals->get(i));
1627  Code* inlined_code = inlined->shared()->code();
1628  MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
1629  MarkObject(inlined_code, inlined_code_mark);
1630  }
1631 }
1632 
1633 
1634 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1635  ThreadLocalTop* top) {
1636  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1637  // Note: for the frame that has a pending lazy deoptimization
1638  // StackFrame::unchecked_code will return a non-optimized code object for
1639  // the outermost function and StackFrame::LookupCode will return
1640  // actual optimized code object.
1641  StackFrame* frame = it.frame();
1642  Code* code = frame->unchecked_code();
1643  MarkBit code_mark = Marking::MarkBitFrom(code);
1644  MarkObject(code, code_mark);
1645  if (frame->is_optimized()) {
1646  MarkInlinedFunctionsCode(frame->LookupCode());
1647  }
1648  }
1649 }
1650 
1651 
1652 void MarkCompactCollector::PrepareForCodeFlushing() {
1653  ASSERT(heap() == Isolate::Current()->heap());
1654 
1655  // TODO(1609) Currently incremental marker does not support code flushing.
1656  if (!FLAG_flush_code || was_marked_incrementally_) {
1657  EnableCodeFlushing(false);
1658  return;
1659  }
1660 
1661 #ifdef ENABLE_DEBUGGER_SUPPORT
1662  if (heap()->isolate()->debug()->IsLoaded() ||
1663  heap()->isolate()->debug()->has_break_points()) {
1664  EnableCodeFlushing(false);
1665  return;
1666  }
1667 #endif
1668 
1669  EnableCodeFlushing(true);
1670 
1671  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1672  // relies on it being marked before any other descriptor array.
1673  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1674  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1675  MarkObject(descriptor_array, descriptor_array_mark);
1676 
1677  // Make sure we are not referencing the code from the stack.
1678  ASSERT(this == heap()->mark_compact_collector());
1679  PrepareThreadForCodeFlushing(heap()->isolate(),
1680  heap()->isolate()->thread_local_top());
1681 
1682  // Iterate the archived stacks in all threads to check if
1683  // the code is referenced.
1684  CodeMarkingVisitor code_marking_visitor(this);
1686  &code_marking_visitor);
1687 
1688  SharedFunctionInfoMarkingVisitor visitor(this);
1689  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1690  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1691 
1692  ProcessMarkingDeque();
1693 }
1694 
1695 
1696 // Visitor class for marking heap roots.
1697 class RootMarkingVisitor : public ObjectVisitor {
1698  public:
1699  explicit RootMarkingVisitor(Heap* heap)
1700  : collector_(heap->mark_compact_collector()) { }
1701 
1702  void VisitPointer(Object** p) {
1703  MarkObjectByPointer(p);
1704  }
1705 
1706  void VisitPointers(Object** start, Object** end) {
1707  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1708  }
1709 
1710  private:
1711  void MarkObjectByPointer(Object** p) {
1712  if (!(*p)->IsHeapObject()) return;
1713 
1714  // Replace flat cons strings in place.
1715  HeapObject* object = ShortCircuitConsString(p);
1716  MarkBit mark_bit = Marking::MarkBitFrom(object);
1717  if (mark_bit.Get()) return;
1718 
1719  Map* map = object->map();
1720  // Mark the object.
1721  collector_->SetMark(object, mark_bit);
1722 
1723  // Mark the map pointer and body, and push them on the marking stack.
1724  MarkBit map_mark = Marking::MarkBitFrom(map);
1725  collector_->MarkObject(map, map_mark);
1726  StaticMarkingVisitor::IterateBody(map, object);
1727 
1728  // Mark all the objects reachable from the map and body. May leave
1729  // overflowed objects in the heap.
1730  collector_->EmptyMarkingDeque();
1731  }
1732 
1733  MarkCompactCollector* collector_;
1734 };
1735 
1736 
1737 // Helper class for pruning the symbol table.
1738 class SymbolTableCleaner : public ObjectVisitor {
1739  public:
1740  explicit SymbolTableCleaner(Heap* heap)
1741  : heap_(heap), pointers_removed_(0) { }
1742 
1743  virtual void VisitPointers(Object** start, Object** end) {
1744  // Visit all HeapObject pointers in [start, end).
1745  for (Object** p = start; p < end; p++) {
1746  Object* o = *p;
1747  if (o->IsHeapObject() &&
1749  // Check if the symbol being pruned is an external symbol. We need to
1750  // delete the associated external data as this symbol is going away.
1751 
1752  // Since no objects have yet been moved we can safely access the map of
1753  // the object.
1754  if (o->IsExternalString()) {
1756  }
1757  // Set the entry to the_hole_value (as deleted).
1758  *p = heap_->the_hole_value();
1759  pointers_removed_++;
1760  }
1761  }
1762  }
1763 
1765  return pointers_removed_;
1766  }
1767 
1768  private:
1769  Heap* heap_;
1770  int pointers_removed_;
1771 };
1772 
1773 
1774 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1775 // are retained.
1777  public:
1778  virtual Object* RetainAs(Object* object) {
1779  if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1780  return object;
1781  } else {
1782  return NULL;
1783  }
1784  }
1785 };
1786 
1787 
1788 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1789  ASSERT(IsMarked(object));
1790  ASSERT(HEAP->Contains(object));
1791  if (object->IsMap()) {
1792  Map* map = Map::cast(object);
1793  heap_->ClearCacheOnMap(map);
1794 
1795  // When map collection is enabled we have to mark through map's transitions
1796  // in a special way to make transition links weak. Only maps for subclasses
1797  // of JSReceiver can have transitions.
1799  if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1800  marker_.MarkMapContents(map);
1801  } else {
1802  marking_deque_.PushBlack(map);
1803  }
1804  } else {
1805  marking_deque_.PushBlack(object);
1806  }
1807 }
1808 
1809 
1810 // Force instantiation of template instances.
1811 template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
1812 template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
1813 
1814 
1815 template <class T>
1817  // Mark prototype transitions array but don't push it into marking stack.
1818  // This will make references from it weak. We will clean dead prototype
1819  // transitions in ClearNonLiveTransitions.
1820  Object** proto_trans_slot =
1822  HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
1823  if (prototype_transitions->IsFixedArray()) {
1824  mark_compact_collector()->RecordSlot(proto_trans_slot,
1825  proto_trans_slot,
1826  prototype_transitions);
1827  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1828  if (!mark.Get()) {
1829  mark.Set();
1830  MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
1831  prototype_transitions->Size());
1832  }
1833  }
1834 
1835  // Make sure that the back pointer stored either in the map itself or inside
1836  // its prototype transitions array is marked. Treat pointers in the descriptor
1837  // array as weak and also mark that array to prevent visiting it later.
1838  base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
1839 
1840  Object** descriptor_array_slot =
1842  Object* descriptor_array = *descriptor_array_slot;
1843  if (!descriptor_array->IsSmi()) {
1844  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
1845  }
1846 
1847  // Mark the Object* fields of the Map. Since the descriptor array has been
1848  // marked already, it is fine that one of these fields contains a pointer
1849  // to it. But make sure to skip back pointer and prototype transitions.
1852  Object** start_slot = HeapObject::RawField(
1854  Object** end_slot = HeapObject::RawField(
1856  for (Object** slot = start_slot; slot < end_slot; slot++) {
1857  Object* obj = *slot;
1858  if (!obj->NonFailureIsHeapObject()) continue;
1859  mark_compact_collector()->RecordSlot(start_slot, slot, obj);
1860  base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
1861  }
1862 }
1863 
1864 
1865 template <class T>
1867  // Empty descriptor array is marked as a root before any maps are marked.
1868  ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
1869 
1870  if (!base_marker()->MarkObjectWithoutPush(descriptors)) return;
1871  Object** descriptor_start = descriptors->data_start();
1872 
1873  // Since the descriptor array itself is not pushed for scanning, all fields
1874  // that point to objects manually have to be pushed, marked, and their slots
1875  // recorded.
1876  if (descriptors->HasEnumCache()) {
1877  Object** enum_cache_slot = descriptors->GetEnumCacheSlot();
1878  Object* enum_cache = *enum_cache_slot;
1879  base_marker()->MarkObjectAndPush(
1880  reinterpret_cast<HeapObject*>(enum_cache));
1881  mark_compact_collector()->RecordSlot(descriptor_start,
1882  enum_cache_slot,
1883  enum_cache);
1884  }
1885 
1886  // TODO(verwaest) Make sure we free unused transitions.
1887  if (descriptors->elements_transition_map() != NULL) {
1888  Object** transitions_slot = descriptors->GetTransitionsSlot();
1889  Object* transitions = *transitions_slot;
1890  base_marker()->MarkObjectAndPush(
1891  reinterpret_cast<HeapObject*>(transitions));
1892  mark_compact_collector()->RecordSlot(descriptor_start,
1893  transitions_slot,
1894  transitions);
1895  }
1896 
1897  // If the descriptor contains a transition (value is a Map), we don't mark the
1898  // value as live. It might be set to the NULL_DESCRIPTOR in
1899  // ClearNonLiveTransitions later.
1900  for (int i = 0; i < descriptors->number_of_descriptors(); ++i) {
1901  Object** key_slot = descriptors->GetKeySlot(i);
1902  Object* key = *key_slot;
1903  if (key->IsHeapObject()) {
1904  base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(key));
1905  mark_compact_collector()->RecordSlot(descriptor_start, key_slot, key);
1906  }
1907 
1908  Object** value_slot = descriptors->GetValueSlot(i);
1909  if (!(*value_slot)->IsHeapObject()) continue;
1910  HeapObject* value = HeapObject::cast(*value_slot);
1911 
1912  mark_compact_collector()->RecordSlot(descriptor_start,
1913  value_slot,
1914  value);
1915 
1916  PropertyDetails details(descriptors->GetDetails(i));
1917 
1918  switch (details.type()) {
1919  case NORMAL:
1920  case FIELD:
1921  case CONSTANT_FUNCTION:
1922  case HANDLER:
1923  case INTERCEPTOR:
1924  base_marker()->MarkObjectAndPush(value);
1925  break;
1926  case CALLBACKS:
1927  if (!value->IsAccessorPair()) {
1928  base_marker()->MarkObjectAndPush(value);
1929  } else if (base_marker()->MarkObjectWithoutPush(value)) {
1930  AccessorPair* accessors = AccessorPair::cast(value);
1931  MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
1932  MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
1933  }
1934  break;
1935  case MAP_TRANSITION:
1936  case CONSTANT_TRANSITION:
1937  case NULL_DESCRIPTOR:
1938  break;
1939  }
1940  }
1941 }
1942 
1943 
1944 template <class T>
1945 void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
1946  Object** slot = HeapObject::RawField(accessors, offset);
1947  HeapObject* accessor = HeapObject::cast(*slot);
1948  if (accessor->IsMap()) return;
1949  mark_compact_collector()->RecordSlot(slot, slot, accessor);
1950  base_marker()->MarkObjectAndPush(accessor);
1951 }
1952 
1953 
1954 // Fill the marking stack with overflowed objects returned by the given
1955 // iterator. Stop when the marking stack is filled or the end of the space
1956 // is reached, whichever comes first.
1957 template<class T>
1958 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1959  MarkingDeque* marking_deque,
1960  T* it) {
1961  // The caller should ensure that the marking stack is initially not full,
1962  // so that we don't waste effort pointlessly scanning for objects.
1963  ASSERT(!marking_deque->IsFull());
1964 
1965  Map* filler_map = heap->one_pointer_filler_map();
1966  for (HeapObject* object = it->Next();
1967  object != NULL;
1968  object = it->Next()) {
1969  MarkBit markbit = Marking::MarkBitFrom(object);
1970  if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1971  Marking::GreyToBlack(markbit);
1972  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1973  marking_deque->PushBlack(object);
1974  if (marking_deque->IsFull()) return;
1975  }
1976  }
1977 }
1978 
1979 
1980 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1981 
1982 
1983 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1984  ASSERT(!marking_deque->IsFull());
1985  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1986  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1987  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1988  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1989 
1990  MarkBit::CellType* cells = p->markbits()->cells();
1991 
1992  int last_cell_index =
1993  Bitmap::IndexToCell(
1994  Bitmap::CellAlignIndex(
1995  p->AddressToMarkbitIndex(p->area_end())));
1996 
1997  Address cell_base = p->area_start();
1998  int cell_index = Bitmap::IndexToCell(
1999  Bitmap::CellAlignIndex(
2000  p->AddressToMarkbitIndex(cell_base)));
2001 
2002 
2003  for (;
2004  cell_index < last_cell_index;
2005  cell_index++, cell_base += 32 * kPointerSize) {
2006  ASSERT((unsigned)cell_index ==
2007  Bitmap::IndexToCell(
2008  Bitmap::CellAlignIndex(
2009  p->AddressToMarkbitIndex(cell_base))));
2010 
2011  const MarkBit::CellType current_cell = cells[cell_index];
2012  if (current_cell == 0) continue;
2013 
2014  const MarkBit::CellType next_cell = cells[cell_index + 1];
2015  MarkBit::CellType grey_objects = current_cell &
2016  ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2017 
2018  int offset = 0;
2019  while (grey_objects != 0) {
2020  int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2021  grey_objects >>= trailing_zeros;
2022  offset += trailing_zeros;
2023  MarkBit markbit(&cells[cell_index], 1 << offset, false);
2024  ASSERT(Marking::IsGrey(markbit));
2025  Marking::GreyToBlack(markbit);
2026  Address addr = cell_base + offset * kPointerSize;
2027  HeapObject* object = HeapObject::FromAddress(addr);
2028  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2029  marking_deque->PushBlack(object);
2030  if (marking_deque->IsFull()) return;
2031  offset += 2;
2032  grey_objects >>= 2;
2033  }
2034 
2035  grey_objects >>= (Bitmap::kBitsPerCell - 1);
2036  }
2037 }
2038 
2039 
2040 static void DiscoverGreyObjectsInSpace(Heap* heap,
2041  MarkingDeque* marking_deque,
2042  PagedSpace* space) {
2043  if (!space->was_swept_conservatively()) {
2044  HeapObjectIterator it(space);
2045  DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2046  } else {
2047  PageIterator it(space);
2048  while (it.has_next()) {
2049  Page* p = it.next();
2050  DiscoverGreyObjectsOnPage(marking_deque, p);
2051  if (marking_deque->IsFull()) return;
2052  }
2053  }
2054 }
2055 
2056 
2057 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2058  Object* o = *p;
2059  if (!o->IsHeapObject()) return false;
2060  HeapObject* heap_object = HeapObject::cast(o);
2061  MarkBit mark = Marking::MarkBitFrom(heap_object);
2062  return !mark.Get();
2063 }
2064 
2065 
2066 void MarkCompactCollector::MarkSymbolTable() {
2067  SymbolTable* symbol_table = heap()->symbol_table();
2068  // Mark the symbol table itself.
2069  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
2070  SetMark(symbol_table, symbol_table_mark);
2071  // Explicitly mark the prefix.
2072  MarkingVisitor marker(heap());
2073  symbol_table->IteratePrefix(&marker);
2074  ProcessMarkingDeque();
2075 }
2076 
2077 
2078 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2079  // Mark the heap roots including global variables, stack variables,
2080  // etc., and all objects reachable from them.
2082 
2083  // Handle the symbol table specially.
2084  MarkSymbolTable();
2085 
2086  // There may be overflowed objects in the heap. Visit them now.
2087  while (marking_deque_.overflowed()) {
2088  RefillMarkingDeque();
2089  EmptyMarkingDeque();
2090  }
2091 }
2092 
2093 
2094 void MarkCompactCollector::MarkObjectGroups() {
2095  List<ObjectGroup*>* object_groups =
2097 
2098  int last = 0;
2099  for (int i = 0; i < object_groups->length(); i++) {
2100  ObjectGroup* entry = object_groups->at(i);
2101  ASSERT(entry != NULL);
2102 
2103  Object*** objects = entry->objects_;
2104  bool group_marked = false;
2105  for (size_t j = 0; j < entry->length_; j++) {
2106  Object* object = *objects[j];
2107  if (object->IsHeapObject()) {
2108  HeapObject* heap_object = HeapObject::cast(object);
2109  MarkBit mark = Marking::MarkBitFrom(heap_object);
2110  if (mark.Get()) {
2111  group_marked = true;
2112  break;
2113  }
2114  }
2115  }
2116 
2117  if (!group_marked) {
2118  (*object_groups)[last++] = entry;
2119  continue;
2120  }
2121 
2122  // An object in the group is marked, so mark as grey all white heap
2123  // objects in the group.
2124  for (size_t j = 0; j < entry->length_; ++j) {
2125  Object* object = *objects[j];
2126  if (object->IsHeapObject()) {
2127  HeapObject* heap_object = HeapObject::cast(object);
2128  MarkBit mark = Marking::MarkBitFrom(heap_object);
2129  MarkObject(heap_object, mark);
2130  }
2131  }
2132 
2133  // Once the entire group has been colored grey, set the object group
2134  // to NULL so it won't be processed again.
2135  entry->Dispose();
2136  object_groups->at(i) = NULL;
2137  }
2138  object_groups->Rewind(last);
2139 }
2140 
2141 
2142 void MarkCompactCollector::MarkImplicitRefGroups() {
2143  List<ImplicitRefGroup*>* ref_groups =
2145 
2146  int last = 0;
2147  for (int i = 0; i < ref_groups->length(); i++) {
2148  ImplicitRefGroup* entry = ref_groups->at(i);
2149  ASSERT(entry != NULL);
2150 
2151  if (!IsMarked(*entry->parent_)) {
2152  (*ref_groups)[last++] = entry;
2153  continue;
2154  }
2155 
2156  Object*** children = entry->children_;
2157  // A parent object is marked, so mark all child heap objects.
2158  for (size_t j = 0; j < entry->length_; ++j) {
2159  if ((*children[j])->IsHeapObject()) {
2160  HeapObject* child = HeapObject::cast(*children[j]);
2161  MarkBit mark = Marking::MarkBitFrom(child);
2162  MarkObject(child, mark);
2163  }
2164  }
2165 
2166  // Once the entire group has been marked, dispose it because it's
2167  // not needed anymore.
2168  entry->Dispose();
2169  }
2170  ref_groups->Rewind(last);
2171 }
2172 
2173 
2174 // Mark all objects reachable from the objects on the marking stack.
2175 // Before: the marking stack contains zero or more heap object pointers.
2176 // After: the marking stack is empty, and all objects reachable from the
2177 // marking stack have been marked, or are overflowed in the heap.
2178 void MarkCompactCollector::EmptyMarkingDeque() {
2179  while (!marking_deque_.IsEmpty()) {
2180  while (!marking_deque_.IsEmpty()) {
2181  HeapObject* object = marking_deque_.Pop();
2182  ASSERT(object->IsHeapObject());
2183  ASSERT(heap()->Contains(object));
2185 
2186  Map* map = object->map();
2187  MarkBit map_mark = Marking::MarkBitFrom(map);
2188  MarkObject(map, map_mark);
2189 
2190  StaticMarkingVisitor::IterateBody(map, object);
2191  }
2192 
2193  // Process encountered weak maps, mark objects only reachable by those
2194  // weak maps and repeat until fix-point is reached.
2195  ProcessWeakMaps();
2196  }
2197 }
2198 
2199 
2200 // Sweep the heap for overflowed objects, clear their overflow bits, and
2201 // push them on the marking stack. Stop early if the marking stack fills
2202 // before sweeping completes. If sweeping completes, there are no remaining
2203 // overflowed objects in the heap so the overflow flag on the markings stack
2204 // is cleared.
2205 void MarkCompactCollector::RefillMarkingDeque() {
2206  ASSERT(marking_deque_.overflowed());
2207 
2208  SemiSpaceIterator new_it(heap()->new_space());
2209  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
2210  if (marking_deque_.IsFull()) return;
2211 
2212  DiscoverGreyObjectsInSpace(heap(),
2213  &marking_deque_,
2214  heap()->old_pointer_space());
2215  if (marking_deque_.IsFull()) return;
2216 
2217  DiscoverGreyObjectsInSpace(heap(),
2218  &marking_deque_,
2219  heap()->old_data_space());
2220  if (marking_deque_.IsFull()) return;
2221 
2222  DiscoverGreyObjectsInSpace(heap(),
2223  &marking_deque_,
2224  heap()->code_space());
2225  if (marking_deque_.IsFull()) return;
2226 
2227  DiscoverGreyObjectsInSpace(heap(),
2228  &marking_deque_,
2229  heap()->map_space());
2230  if (marking_deque_.IsFull()) return;
2231 
2232  DiscoverGreyObjectsInSpace(heap(),
2233  &marking_deque_,
2234  heap()->cell_space());
2235  if (marking_deque_.IsFull()) return;
2236 
2237  LargeObjectIterator lo_it(heap()->lo_space());
2238  DiscoverGreyObjectsWithIterator(heap(),
2239  &marking_deque_,
2240  &lo_it);
2241  if (marking_deque_.IsFull()) return;
2242 
2243  marking_deque_.ClearOverflowed();
2244 }
2245 
2246 
2247 // Mark all objects reachable (transitively) from objects on the marking
2248 // stack. Before: the marking stack contains zero or more heap object
2249 // pointers. After: the marking stack is empty and there are no overflowed
2250 // objects in the heap.
2251 void MarkCompactCollector::ProcessMarkingDeque() {
2252  EmptyMarkingDeque();
2253  while (marking_deque_.overflowed()) {
2254  RefillMarkingDeque();
2255  EmptyMarkingDeque();
2256  }
2257 }
2258 
2259 
2260 void MarkCompactCollector::ProcessExternalMarking() {
2261  bool work_to_do = true;
2262  ASSERT(marking_deque_.IsEmpty());
2263  while (work_to_do) {
2264  MarkObjectGroups();
2265  MarkImplicitRefGroups();
2266  work_to_do = !marking_deque_.IsEmpty();
2267  ProcessMarkingDeque();
2268  }
2269 }
2270 
2271 
2272 void MarkCompactCollector::MarkLiveObjects() {
2273  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2274  // The recursive GC marker detects when it is nearing stack overflow,
2275  // and switches to a different marking system. JS interrupts interfere
2276  // with the C stack limit check.
2277  PostponeInterruptsScope postpone(heap()->isolate());
2278 
2279  bool incremental_marking_overflowed = false;
2280  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2281  if (was_marked_incrementally_) {
2282  // Finalize the incremental marking and check whether we had an overflow.
2283  // Both markers use grey color to mark overflowed objects so
2284  // non-incremental marker can deal with them as if overflow
2285  // occured during normal marking.
2286  // But incremental marker uses a separate marking deque
2287  // so we have to explicitly copy it's overflow state.
2288  incremental_marking->Finalize();
2289  incremental_marking_overflowed =
2290  incremental_marking->marking_deque()->overflowed();
2291  incremental_marking->marking_deque()->ClearOverflowed();
2292  } else {
2293  // Abort any pending incremental activities e.g. incremental sweeping.
2294  incremental_marking->Abort();
2295  }
2296 
2297 #ifdef DEBUG
2298  ASSERT(state_ == PREPARE_GC);
2299  state_ = MARK_LIVE_OBJECTS;
2300 #endif
2301  // The to space contains live objects, a page in from space is used as a
2302  // marking stack.
2303  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2304  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2305  if (FLAG_force_marking_deque_overflows) {
2306  marking_deque_end = marking_deque_start + 64 * kPointerSize;
2307  }
2308  marking_deque_.Initialize(marking_deque_start,
2309  marking_deque_end);
2310  ASSERT(!marking_deque_.overflowed());
2311 
2312  if (incremental_marking_overflowed) {
2313  // There are overflowed objects left in the heap after incremental marking.
2314  marking_deque_.SetOverflowed();
2315  }
2316 
2317  PrepareForCodeFlushing();
2318 
2319  if (was_marked_incrementally_) {
2320  // There is no write barrier on cells so we have to scan them now at the end
2321  // of the incremental marking.
2322  {
2323  HeapObjectIterator cell_iterator(heap()->cell_space());
2324  HeapObject* cell;
2325  while ((cell = cell_iterator.Next()) != NULL) {
2326  ASSERT(cell->IsJSGlobalPropertyCell());
2327  if (IsMarked(cell)) {
2329  StaticMarkingVisitor::VisitPointer(
2330  heap(),
2331  reinterpret_cast<Object**>(cell->address() + offset));
2332  }
2333  }
2334  }
2335  }
2336 
2337  RootMarkingVisitor root_visitor(heap());
2338  MarkRoots(&root_visitor);
2339 
2340  // The objects reachable from the roots are marked, yet unreachable
2341  // objects are unmarked. Mark objects reachable due to host
2342  // application specific logic.
2343  ProcessExternalMarking();
2344 
2345  // The objects reachable from the roots or object groups are marked,
2346  // yet unreachable objects are unmarked. Mark objects reachable
2347  // only from weak global handles.
2348  //
2349  // First we identify nonlive weak handles and mark them as pending
2350  // destruction.
2352  &IsUnmarkedHeapObject);
2353  // Then we mark the objects and process the transitive closure.
2354  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2355  while (marking_deque_.overflowed()) {
2356  RefillMarkingDeque();
2357  EmptyMarkingDeque();
2358  }
2359 
2360  // Repeat host application specific marking to mark unmarked objects
2361  // reachable from the weak roots.
2362  ProcessExternalMarking();
2363 
2364  AfterMarking();
2365 }
2366 
2367 
2368 void MarkCompactCollector::AfterMarking() {
2369  // Object literal map caches reference symbols (cache keys) and maps
2370  // (cache values). At this point still useful maps have already been
2371  // marked. Mark the keys for the alive values before we process the
2372  // symbol table.
2373  ProcessMapCaches();
2374 
2375  // Prune the symbol table removing all symbols only pointed to by the
2376  // symbol table. Cannot use symbol_table() here because the symbol
2377  // table is marked.
2378  SymbolTable* symbol_table = heap()->symbol_table();
2379  SymbolTableCleaner v(heap());
2380  symbol_table->IterateElements(&v);
2381  symbol_table->ElementsRemoved(v.PointersRemoved());
2382  heap()->external_string_table_.Iterate(&v);
2383  heap()->external_string_table_.CleanUp();
2384 
2385  // Process the weak references.
2386  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2387  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2388 
2389  // Remove object groups after marking phase.
2392 
2393  // Flush code from collected candidates.
2394  if (is_code_flushing_enabled()) {
2395  code_flusher_->ProcessCandidates();
2396  }
2397 
2398  if (!FLAG_watch_ic_patching) {
2399  // Clean up dead objects from the runtime profiler.
2401  }
2402 }
2403 
2404 
2405 void MarkCompactCollector::ProcessMapCaches() {
2406  Object* raw_context = heap()->global_contexts_list_;
2407  while (raw_context != heap()->undefined_value()) {
2408  Context* context = reinterpret_cast<Context*>(raw_context);
2409  if (IsMarked(context)) {
2410  HeapObject* raw_map_cache =
2412  // A map cache may be reachable from the stack. In this case
2413  // it's already transitively marked and it's too late to clean
2414  // up its parts.
2415  if (!IsMarked(raw_map_cache) &&
2416  raw_map_cache != heap()->undefined_value()) {
2417  MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2418  int existing_elements = map_cache->NumberOfElements();
2419  int used_elements = 0;
2420  for (int i = MapCache::kElementsStartIndex;
2421  i < map_cache->length();
2422  i += MapCache::kEntrySize) {
2423  Object* raw_key = map_cache->get(i);
2424  if (raw_key == heap()->undefined_value() ||
2425  raw_key == heap()->the_hole_value()) continue;
2427  Object* raw_map = map_cache->get(i + 1);
2428  if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2429  ++used_elements;
2430  } else {
2431  // Delete useless entries with unmarked maps.
2432  ASSERT(raw_map->IsMap());
2433  map_cache->set_the_hole(i);
2434  map_cache->set_the_hole(i + 1);
2435  }
2436  }
2437  if (used_elements == 0) {
2438  context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2439  } else {
2440  // Note: we don't actually shrink the cache here to avoid
2441  // extra complexity during GC. We rely on subsequent cache
2442  // usages (EnsureCapacity) to do this.
2443  map_cache->ElementsRemoved(existing_elements - used_elements);
2444  MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2445  MarkObject(map_cache, map_cache_markbit);
2446  }
2447  }
2448  }
2449  // Move to next element in the list.
2450  raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2451  }
2452  ProcessMarkingDeque();
2453 }
2454 
2455 
2456 void MarkCompactCollector::ReattachInitialMaps() {
2457  HeapObjectIterator map_iterator(heap()->map_space());
2458  for (HeapObject* obj = map_iterator.Next();
2459  obj != NULL;
2460  obj = map_iterator.Next()) {
2461  if (obj->IsFreeSpace()) continue;
2462  Map* map = Map::cast(obj);
2463 
2465  if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2466 
2467  if (map->attached_to_shared_function_info()) {
2468  JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2469  }
2470  }
2471 }
2472 
2473 
2474 void MarkCompactCollector::ClearNonLiveTransitions() {
2475  HeapObjectIterator map_iterator(heap()->map_space());
2476  // Iterate over the map space, setting map transitions that go from
2477  // a marked map to an unmarked map to null transitions. This action
2478  // is carried out only on maps of JSObjects and related subtypes.
2479  for (HeapObject* obj = map_iterator.Next();
2480  obj != NULL; obj = map_iterator.Next()) {
2481  Map* map = reinterpret_cast<Map*>(obj);
2482  MarkBit map_mark = Marking::MarkBitFrom(map);
2483  if (map->IsFreeSpace()) continue;
2484 
2485  ASSERT(map->IsMap());
2486  // Only JSObject and subtypes have map transitions and back pointers.
2488  if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
2489 
2490  if (map_mark.Get() &&
2491  map->attached_to_shared_function_info()) {
2492  // This map is used for inobject slack tracking and has been detached
2493  // from SharedFunctionInfo during the mark phase.
2494  // Since it survived the GC, reattach it now.
2495  map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2496  }
2497 
2498  ClearNonLivePrototypeTransitions(map);
2499  ClearNonLiveMapTransitions(map, map_mark);
2500  }
2501 }
2502 
2503 
2504 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2505  int number_of_transitions = map->NumberOfProtoTransitions();
2506  FixedArray* prototype_transitions = map->prototype_transitions();
2507 
2508  int new_number_of_transitions = 0;
2509  const int header = Map::kProtoTransitionHeaderSize;
2510  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2511  const int map_offset = header + Map::kProtoTransitionMapOffset;
2512  const int step = Map::kProtoTransitionElementsPerEntry;
2513  for (int i = 0; i < number_of_transitions; i++) {
2514  Object* prototype = prototype_transitions->get(proto_offset + i * step);
2515  Object* cached_map = prototype_transitions->get(map_offset + i * step);
2516  if (IsMarked(prototype) && IsMarked(cached_map)) {
2517  int proto_index = proto_offset + new_number_of_transitions * step;
2518  int map_index = map_offset + new_number_of_transitions * step;
2519  if (new_number_of_transitions != i) {
2520  prototype_transitions->set_unchecked(
2521  heap_,
2522  proto_index,
2523  prototype,
2525  prototype_transitions->set_unchecked(
2526  heap_,
2527  map_index,
2528  cached_map,
2530  }
2531  Object** slot =
2532  HeapObject::RawField(prototype_transitions,
2533  FixedArray::OffsetOfElementAt(proto_index));
2534  RecordSlot(slot, slot, prototype);
2535  new_number_of_transitions++;
2536  }
2537  }
2538 
2539  if (new_number_of_transitions != number_of_transitions) {
2540  map->SetNumberOfProtoTransitions(new_number_of_transitions);
2541  }
2542 
2543  // Fill slots that became free with undefined value.
2544  for (int i = new_number_of_transitions * step;
2545  i < number_of_transitions * step;
2546  i++) {
2547  prototype_transitions->set_undefined(heap_, header + i);
2548  }
2549 }
2550 
2551 
2552 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2553  MarkBit map_mark) {
2554  Object* potential_parent = map->GetBackPointer();
2555  if (!potential_parent->IsMap()) return;
2556  Map* parent = Map::cast(potential_parent);
2557 
2558  // Follow back pointer, check whether we are dealing with a map transition
2559  // from a live map to a dead path and in case clear transitions of parent.
2560  bool current_is_alive = map_mark.Get();
2561  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2562  if (!current_is_alive && parent_is_alive) {
2563  parent->ClearNonLiveTransitions(heap());
2564  }
2565 }
2566 
2567 
2568 void MarkCompactCollector::ProcessWeakMaps() {
2569  Object* weak_map_obj = encountered_weak_maps();
2570  while (weak_map_obj != Smi::FromInt(0)) {
2572  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2573  ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2574  Object** anchor = reinterpret_cast<Object**>(table->address());
2575  for (int i = 0; i < table->Capacity(); i++) {
2576  if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2577  Object** key_slot =
2580  RecordSlot(anchor, key_slot, *key_slot);
2581  Object** value_slot =
2583  ObjectHashTable::EntryToValueIndex(i)));
2584  StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
2585  }
2586  }
2587  weak_map_obj = weak_map->next();
2588  }
2589 }
2590 
2591 
2592 void MarkCompactCollector::ClearWeakMaps() {
2593  Object* weak_map_obj = encountered_weak_maps();
2594  while (weak_map_obj != Smi::FromInt(0)) {
2596  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2597  ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2598  for (int i = 0; i < table->Capacity(); i++) {
2599  if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2600  table->RemoveEntry(i);
2601  }
2602  }
2603  weak_map_obj = weak_map->next();
2604  weak_map->set_next(Smi::FromInt(0));
2605  }
2607 }
2608 
2609 
2610 // We scavange new space simultaneously with sweeping. This is done in two
2611 // passes.
2612 //
2613 // The first pass migrates all alive objects from one semispace to another or
2614 // promotes them to old space. Forwarding address is written directly into
2615 // first word of object without any encoding. If object is dead we write
2616 // NULL as a forwarding address.
2617 //
2618 // The second pass updates pointers to new space in all spaces. It is possible
2619 // to encounter pointers to dead new space objects during traversal of pointers
2620 // to new space. We should clear them to avoid encountering them during next
2621 // pointer iteration. This is an issue if the store buffer overflows and we
2622 // have to scan the entire old space, including dead objects, looking for
2623 // pointers to new space.
2625  Address src,
2626  int size,
2627  AllocationSpace dest) {
2628  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2629  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2630  Address src_slot = src;
2631  Address dst_slot = dst;
2632  ASSERT(IsAligned(size, kPointerSize));
2633 
2634  for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2635  Object* value = Memory::Object_at(src_slot);
2636 
2637  Memory::Object_at(dst_slot) = value;
2638 
2639  if (heap_->InNewSpace(value)) {
2640  heap_->store_buffer()->Mark(dst_slot);
2641  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2642  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2643  &migration_slots_buffer_,
2644  reinterpret_cast<Object**>(dst_slot),
2646  }
2647 
2648  src_slot += kPointerSize;
2649  dst_slot += kPointerSize;
2650  }
2651 
2652  if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2653  Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2654  Address code_entry = Memory::Address_at(code_entry_slot);
2655 
2656  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2657  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2658  &migration_slots_buffer_,
2660  code_entry_slot,
2662  }
2663  }
2664  } else if (dest == CODE_SPACE) {
2665  PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2666  heap()->MoveBlock(dst, src, size);
2667  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2668  &migration_slots_buffer_,
2670  dst,
2672  Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2673  } else {
2674  ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2675  heap()->MoveBlock(dst, src, size);
2676  }
2677  Memory::Address_at(src) = dst;
2678 }
2679 
2680 
2681 // Visitor for updating pointers from live objects in old spaces to new space.
2682 // It does not expect to encounter pointers to dead objects.
2683 class PointersUpdatingVisitor: public ObjectVisitor {
2684  public:
2685  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2686 
2687  void VisitPointer(Object** p) {
2688  UpdatePointer(p);
2689  }
2690 
2691  void VisitPointers(Object** start, Object** end) {
2692  for (Object** p = start; p < end; p++) UpdatePointer(p);
2693  }
2694 
2695  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2696  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2697  Object* target = rinfo->target_object();
2698  VisitPointer(&target);
2699  rinfo->set_target_object(target);
2700  }
2701 
2702  void VisitCodeTarget(RelocInfo* rinfo) {
2703  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2704  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2705  VisitPointer(&target);
2706  rinfo->set_target_address(Code::cast(target)->instruction_start());
2707  }
2708 
2709  void VisitDebugTarget(RelocInfo* rinfo) {
2710  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2711  rinfo->IsPatchedReturnSequence()) ||
2712  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2713  rinfo->IsPatchedDebugBreakSlotSequence()));
2714  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2715  VisitPointer(&target);
2716  rinfo->set_call_address(Code::cast(target)->instruction_start());
2717  }
2718 
2719  static inline void UpdateSlot(Heap* heap, Object** slot) {
2720  Object* obj = *slot;
2721 
2722  if (!obj->IsHeapObject()) return;
2723 
2724  HeapObject* heap_obj = HeapObject::cast(obj);
2725 
2726  MapWord map_word = heap_obj->map_word();
2727  if (map_word.IsForwardingAddress()) {
2728  ASSERT(heap->InFromSpace(heap_obj) ||
2729  MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2730  HeapObject* target = map_word.ToForwardingAddress();
2731  *slot = target;
2732  ASSERT(!heap->InFromSpace(target) &&
2733  !MarkCompactCollector::IsOnEvacuationCandidate(target));
2734  }
2735  }
2736 
2737  private:
2738  inline void UpdatePointer(Object** p) {
2739  UpdateSlot(heap_, p);
2740  }
2741 
2742  Heap* heap_;
2743 };
2744 
2745 
2746 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2747  ASSERT(*p == object);
2748 
2749  Address old_addr = object->address();
2750 
2751  Address new_addr = Memory::Address_at(old_addr);
2752 
2753  // The new space sweep will overwrite the map word of dead objects
2754  // with NULL. In this case we do not need to transfer this entry to
2755  // the store buffer which we are rebuilding.
2756  if (new_addr != NULL) {
2757  *p = HeapObject::FromAddress(new_addr);
2758  } else {
2759  // We have to zap this pointer, because the store buffer may overflow later,
2760  // and then we have to scan the entire heap and we don't want to find
2761  // spurious newspace pointers in the old space.
2762  // TODO(mstarzinger): This was changed to a sentinel value to track down
2763  // rare crashes, change it back to Smi::FromInt(0) later.
2764  *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
2765  }
2766 }
2767 
2768 
2769 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2770  Object** p) {
2771  MapWord map_word = HeapObject::cast(*p)->map_word();
2772 
2773  if (map_word.IsForwardingAddress()) {
2774  return String::cast(map_word.ToForwardingAddress());
2775  }
2776 
2777  return String::cast(*p);
2778 }
2779 
2780 
2782  int object_size) {
2783  Object* result;
2784 
2785  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
2786  MaybeObject* maybe_result =
2787  heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2788  if (maybe_result->ToObject(&result)) {
2789  HeapObject* target = HeapObject::cast(result);
2790  MigrateObject(target->address(),
2791  object->address(),
2792  object_size,
2793  LO_SPACE);
2795  increment_promoted_objects_size(object_size);
2796  return true;
2797  }
2798  } else {
2799  OldSpace* target_space = heap()->TargetSpace(object);
2800 
2801  ASSERT(target_space == heap()->old_pointer_space() ||
2802  target_space == heap()->old_data_space());
2803  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2804  if (maybe_result->ToObject(&result)) {
2805  HeapObject* target = HeapObject::cast(result);
2806  MigrateObject(target->address(),
2807  object->address(),
2808  object_size,
2809  target_space->identity());
2811  increment_promoted_objects_size(object_size);
2812  return true;
2813  }
2814  }
2815 
2816  return false;
2817 }
2818 
2819 
2820 void MarkCompactCollector::EvacuateNewSpace() {
2821  // There are soft limits in the allocation code, designed trigger a mark
2822  // sweep collection by failing allocations. But since we are already in
2823  // a mark-sweep allocation, there is no sense in trying to trigger one.
2824  AlwaysAllocateScope scope;
2826 
2827  NewSpace* new_space = heap()->new_space();
2828 
2829  // Store allocation range before flipping semispaces.
2830  Address from_bottom = new_space->bottom();
2831  Address from_top = new_space->top();
2832 
2833  // Flip the semispaces. After flipping, to space is empty, from space has
2834  // live objects.
2835  new_space->Flip();
2836  new_space->ResetAllocationInfo();
2837 
2838  int survivors_size = 0;
2839 
2840  // First pass: traverse all objects in inactive semispace, remove marks,
2841  // migrate live objects and write forwarding addresses. This stage puts
2842  // new entries in the store buffer and may cause some pages to be marked
2843  // scan-on-scavenge.
2844  SemiSpaceIterator from_it(from_bottom, from_top);
2845  for (HeapObject* object = from_it.Next();
2846  object != NULL;
2847  object = from_it.Next()) {
2848  MarkBit mark_bit = Marking::MarkBitFrom(object);
2849  if (mark_bit.Get()) {
2850  mark_bit.Clear();
2851  // Don't bother decrementing live bytes count. We'll discard the
2852  // entire page at the end.
2853  int size = object->Size();
2854  survivors_size += size;
2855 
2856  // Aggressively promote young survivors to the old space.
2857  if (TryPromoteObject(object, size)) {
2858  continue;
2859  }
2860 
2861  // Promotion failed. Just migrate object to another semispace.
2862  MaybeObject* allocation = new_space->AllocateRaw(size);
2863  if (allocation->IsFailure()) {
2864  if (!new_space->AddFreshPage()) {
2865  // Shouldn't happen. We are sweeping linearly, and to-space
2866  // has the same number of pages as from-space, so there is
2867  // always room.
2868  UNREACHABLE();
2869  }
2870  allocation = new_space->AllocateRaw(size);
2871  ASSERT(!allocation->IsFailure());
2872  }
2873  Object* target = allocation->ToObjectUnchecked();
2874 
2875  MigrateObject(HeapObject::cast(target)->address(),
2876  object->address(),
2877  size,
2878  NEW_SPACE);
2879  } else {
2880  // Process the dead object before we write a NULL into its header.
2882 
2883  // Mark dead objects in the new space with null in their map field.
2884  Memory::Address_at(object->address()) = NULL;
2885  }
2886  }
2887 
2888  heap_->IncrementYoungSurvivorsCounter(survivors_size);
2889  new_space->set_age_mark(new_space->top());
2890 }
2891 
2892 
2893 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2894  AlwaysAllocateScope always_allocate;
2895  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2896  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2897  MarkBit::CellType* cells = p->markbits()->cells();
2898  p->MarkSweptPrecisely();
2899 
2900  int last_cell_index =
2901  Bitmap::IndexToCell(
2902  Bitmap::CellAlignIndex(
2903  p->AddressToMarkbitIndex(p->area_end())));
2904 
2905  Address cell_base = p->area_start();
2906  int cell_index = Bitmap::IndexToCell(
2907  Bitmap::CellAlignIndex(
2908  p->AddressToMarkbitIndex(cell_base)));
2909 
2910  int offsets[16];
2911 
2912  for (;
2913  cell_index < last_cell_index;
2914  cell_index++, cell_base += 32 * kPointerSize) {
2915  ASSERT((unsigned)cell_index ==
2916  Bitmap::IndexToCell(
2917  Bitmap::CellAlignIndex(
2918  p->AddressToMarkbitIndex(cell_base))));
2919  if (cells[cell_index] == 0) continue;
2920 
2921  int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2922  for (int i = 0; i < live_objects; i++) {
2923  Address object_addr = cell_base + offsets[i] * kPointerSize;
2924  HeapObject* object = HeapObject::FromAddress(object_addr);
2926 
2927  int size = object->Size();
2928 
2929  MaybeObject* target = space->AllocateRaw(size);
2930  if (target->IsFailure()) {
2931  // OS refused to give us memory.
2932  V8::FatalProcessOutOfMemory("Evacuation");
2933  return;
2934  }
2935 
2936  Object* target_object = target->ToObjectUnchecked();
2937 
2938  MigrateObject(HeapObject::cast(target_object)->address(),
2939  object_addr,
2940  size,
2941  space->identity());
2942  ASSERT(object->map_word().IsForwardingAddress());
2943  }
2944 
2945  // Clear marking bits for current cell.
2946  cells[cell_index] = 0;
2947  }
2948  p->ResetLiveBytes();
2949 }
2950 
2951 
2952 void MarkCompactCollector::EvacuatePages() {
2953  int npages = evacuation_candidates_.length();
2954  for (int i = 0; i < npages; i++) {
2955  Page* p = evacuation_candidates_[i];
2956  ASSERT(p->IsEvacuationCandidate() ||
2957  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2958  if (p->IsEvacuationCandidate()) {
2959  // During compaction we might have to request a new page.
2960  // Check that space still have room for that.
2961  if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2962  EvacuateLiveObjectsFromPage(p);
2963  } else {
2964  // Without room for expansion evacuation is not guaranteed to succeed.
2965  // Pessimistically abandon unevacuated pages.
2966  for (int j = i; j < npages; j++) {
2967  Page* page = evacuation_candidates_[j];
2968  slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2969  page->ClearEvacuationCandidate();
2970  page->SetFlag(Page::RESCAN_ON_EVACUATION);
2971  }
2972  return;
2973  }
2974  }
2975  }
2976 }
2977 
2978 
2980  public:
2981  virtual Object* RetainAs(Object* object) {
2982  if (object->IsHeapObject()) {
2983  HeapObject* heap_object = HeapObject::cast(object);
2984  MapWord map_word = heap_object->map_word();
2985  if (map_word.IsForwardingAddress()) {
2986  return map_word.ToForwardingAddress();
2987  }
2988  }
2989  return object;
2990  }
2991 };
2992 
2993 
2994 static inline void UpdateSlot(ObjectVisitor* v,
2995  SlotsBuffer::SlotType slot_type,
2996  Address addr) {
2997  switch (slot_type) {
2999  RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3000  rinfo.Visit(v);
3001  break;
3002  }
3004  v->VisitCodeEntry(addr);
3005  break;
3006  }
3008  HeapObject* obj = HeapObject::FromAddress(addr);
3009  Code::cast(obj)->CodeIterateBody(v);
3010  break;
3011  }
3013  RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3014  if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
3015  break;
3016  }
3018  RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3019  if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
3020  break;
3021  }
3023  RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3024  rinfo.Visit(v);
3025  break;
3026  }
3027  default:
3028  UNREACHABLE();
3029  break;
3030  }
3031 }
3032 
3033 
3037 };
3038 
3039 
3043 };
3044 
3045 
3046 // Sweep a space precisely. After this has been done the space can
3047 // be iterated precisely, hitting only the live objects. Code space
3048 // is always swept precisely because we want to be able to iterate
3049 // over it. Map space is swept precisely, because it is not compacted.
3050 // Slots in live objects pointing into evacuation candidates are updated
3051 // if requested.
3052 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3053 static void SweepPrecisely(PagedSpace* space,
3054  Page* p,
3055  ObjectVisitor* v) {
3056  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3057  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3058  space->identity() == CODE_SPACE);
3059  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3060 
3061  MarkBit::CellType* cells = p->markbits()->cells();
3062  p->MarkSweptPrecisely();
3063 
3064  int last_cell_index =
3065  Bitmap::IndexToCell(
3066  Bitmap::CellAlignIndex(
3067  p->AddressToMarkbitIndex(p->area_end())));
3068 
3069  Address free_start = p->area_start();
3070  int cell_index =
3071  Bitmap::IndexToCell(
3072  Bitmap::CellAlignIndex(
3073  p->AddressToMarkbitIndex(free_start)));
3074 
3075  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3076  Address object_address = free_start;
3077  int offsets[16];
3078 
3079  SkipList* skip_list = p->skip_list();
3080  int curr_region = -1;
3081  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3082  skip_list->Clear();
3083  }
3084 
3085  for (;
3086  cell_index < last_cell_index;
3087  cell_index++, object_address += 32 * kPointerSize) {
3088  ASSERT((unsigned)cell_index ==
3089  Bitmap::IndexToCell(
3090  Bitmap::CellAlignIndex(
3091  p->AddressToMarkbitIndex(object_address))));
3092  int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
3093  int live_index = 0;
3094  for ( ; live_objects != 0; live_objects--) {
3095  Address free_end = object_address + offsets[live_index++] * kPointerSize;
3096  if (free_end != free_start) {
3097  space->Free(free_start, static_cast<int>(free_end - free_start));
3098  }
3099  HeapObject* live_object = HeapObject::FromAddress(free_end);
3101  Map* map = live_object->map();
3102  int size = live_object->SizeFromMap(map);
3103  if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3104  live_object->IterateBody(map->instance_type(), size, v);
3105  }
3106  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3107  int new_region_start =
3108  SkipList::RegionNumber(free_end);
3109  int new_region_end =
3110  SkipList::RegionNumber(free_end + size - kPointerSize);
3111  if (new_region_start != curr_region ||
3112  new_region_end != curr_region) {
3113  skip_list->AddObject(free_end, size);
3114  curr_region = new_region_end;
3115  }
3116  }
3117  free_start = free_end + size;
3118  }
3119  // Clear marking bits for current cell.
3120  cells[cell_index] = 0;
3121  }
3122  if (free_start != p->area_end()) {
3123  space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3124  }
3125  p->ResetLiveBytes();
3126 }
3127 
3128 
3129 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3130  Page* p = Page::FromAddress(code->address());
3131 
3132  if (p->IsEvacuationCandidate() ||
3133  p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3134  return false;
3135  }
3136 
3137  Address code_start = code->address();
3138  Address code_end = code_start + code->Size();
3139 
3140  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3141  uint32_t end_index =
3142  MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3143 
3144  Bitmap* b = p->markbits();
3145 
3146  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3147  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3148 
3149  MarkBit::CellType* start_cell = start_mark_bit.cell();
3150  MarkBit::CellType* end_cell = end_mark_bit.cell();
3151 
3152  if (value) {
3153  MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3154  MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3155 
3156  if (start_cell == end_cell) {
3157  *start_cell |= start_mask & end_mask;
3158  } else {
3159  *start_cell |= start_mask;
3160  for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3161  *cell = ~0;
3162  }
3163  *end_cell |= end_mask;
3164  }
3165  } else {
3166  for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3167  *cell = 0;
3168  }
3169  }
3170 
3171  return true;
3172 }
3173 
3174 
3175 static bool IsOnInvalidatedCodeObject(Address addr) {
3176  // We did not record any slots in large objects thus
3177  // we can safely go to the page from the slot address.
3178  Page* p = Page::FromAddress(addr);
3179 
3180  // First check owner's identity because old pointer and old data spaces
3181  // are swept lazily and might still have non-zero mark-bits on some
3182  // pages.
3183  if (p->owner()->identity() != CODE_SPACE) return false;
3184 
3185  // In code space only bits on evacuation candidates (but we don't record
3186  // any slots on them) and under invalidated code objects are non-zero.
3187  MarkBit mark_bit =
3188  p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3189 
3190  return mark_bit.Get();
3191 }
3192 
3193 
3195  if (heap_->incremental_marking()->IsCompacting() &&
3196  !ShouldSkipEvacuationSlotRecording(code)) {
3197  ASSERT(compacting_);
3198 
3199  // If the object is white than no slots were recorded on it yet.
3200  MarkBit mark_bit = Marking::MarkBitFrom(code);
3201  if (Marking::IsWhite(mark_bit)) return;
3202 
3203  invalidated_code_.Add(code);
3204  }
3205 }
3206 
3207 
3208 bool MarkCompactCollector::MarkInvalidatedCode() {
3209  bool code_marked = false;
3210 
3211  int length = invalidated_code_.length();
3212  for (int i = 0; i < length; i++) {
3213  Code* code = invalidated_code_[i];
3214 
3215  if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3216  code_marked = true;
3217  }
3218  }
3219 
3220  return code_marked;
3221 }
3222 
3223 
3224 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3225  int length = invalidated_code_.length();
3226  for (int i = 0; i < length; i++) {
3227  if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3228  }
3229 }
3230 
3231 
3232 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3233  int length = invalidated_code_.length();
3234  for (int i = 0; i < length; i++) {
3235  Code* code = invalidated_code_[i];
3236  if (code != NULL) {
3237  code->Iterate(visitor);
3238  SetMarkBitsUnderInvalidatedCode(code, false);
3239  }
3240  }
3241  invalidated_code_.Rewind(0);
3242 }
3243 
3244 
3245 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3246  bool code_slots_filtering_required;
3247  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3248  code_slots_filtering_required = MarkInvalidatedCode();
3249 
3250  EvacuateNewSpace();
3251  }
3252 
3253 
3254  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3255  EvacuatePages();
3256  }
3257 
3258  // Second pass: find pointers to new space and update them.
3259  PointersUpdatingVisitor updating_visitor(heap());
3260 
3261  { GCTracer::Scope gc_scope(tracer_,
3262  GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3263  // Update pointers in to space.
3264  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3265  heap()->new_space()->top());
3266  for (HeapObject* object = to_it.Next();
3267  object != NULL;
3268  object = to_it.Next()) {
3269  Map* map = object->map();
3270  object->IterateBody(map->instance_type(),
3271  object->SizeFromMap(map),
3272  &updating_visitor);
3273  }
3274  }
3275 
3276  { GCTracer::Scope gc_scope(tracer_,
3277  GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3278  // Update roots.
3279  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3280  LiveObjectList::IterateElements(&updating_visitor);
3281  }
3282 
3283  { GCTracer::Scope gc_scope(tracer_,
3284  GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3285  StoreBufferRebuildScope scope(heap_,
3286  heap_->store_buffer(),
3287  &Heap::ScavengeStoreBufferCallback);
3288  heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3289  }
3290 
3291  { GCTracer::Scope gc_scope(tracer_,
3292  GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3294  migration_slots_buffer_,
3295  code_slots_filtering_required);
3296  if (FLAG_trace_fragmentation) {
3297  PrintF(" migration slots buffer: %d\n",
3298  SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3299  }
3300 
3301  if (compacting_ && was_marked_incrementally_) {
3302  // It's difficult to filter out slots recorded for large objects.
3303  LargeObjectIterator it(heap_->lo_space());
3304  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3305  // LargeObjectSpace is not swept yet thus we have to skip
3306  // dead objects explicitly.
3307  if (!IsMarked(obj)) continue;
3308 
3309  Page* p = Page::FromAddress(obj->address());
3310  if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3311  obj->Iterate(&updating_visitor);
3313  }
3314  }
3315  }
3316  }
3317 
3318  int npages = evacuation_candidates_.length();
3319  { GCTracer::Scope gc_scope(
3320  tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3321  for (int i = 0; i < npages; i++) {
3322  Page* p = evacuation_candidates_[i];
3323  ASSERT(p->IsEvacuationCandidate() ||
3324  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3325 
3326  if (p->IsEvacuationCandidate()) {
3328  p->slots_buffer(),
3329  code_slots_filtering_required);
3330  if (FLAG_trace_fragmentation) {
3331  PrintF(" page %p slots buffer: %d\n",
3332  reinterpret_cast<void*>(p),
3333  SlotsBuffer::SizeOfChain(p->slots_buffer()));
3334  }
3335 
3336  // Important: skip list should be cleared only after roots were updated
3337  // because root iteration traverses the stack and might have to find
3338  // code objects from non-updated pc pointing into evacuation candidate.
3339  SkipList* list = p->skip_list();
3340  if (list != NULL) list->Clear();
3341  } else {
3342  if (FLAG_gc_verbose) {
3343  PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3344  reinterpret_cast<intptr_t>(p));
3345  }
3346  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3347  p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3348 
3349  switch (space->identity()) {
3350  case OLD_DATA_SPACE:
3351  SweepConservatively(space, p);
3352  break;
3353  case OLD_POINTER_SPACE:
3354  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3355  space, p, &updating_visitor);
3356  break;
3357  case CODE_SPACE:
3358  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3359  space, p, &updating_visitor);
3360  break;
3361  default:
3362  UNREACHABLE();
3363  break;
3364  }
3365  }
3366  }
3367  }
3368 
3369  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3370 
3371  // Update pointers from cells.
3372  HeapObjectIterator cell_iterator(heap_->cell_space());
3373  for (HeapObject* cell = cell_iterator.Next();
3374  cell != NULL;
3375  cell = cell_iterator.Next()) {
3376  if (cell->IsJSGlobalPropertyCell()) {
3377  Address value_address =
3378  reinterpret_cast<Address>(cell) +
3380  updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3381  }
3382  }
3383 
3384  // Update pointer from the global contexts list.
3385  updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3386 
3387  heap_->symbol_table()->Iterate(&updating_visitor);
3388 
3389  // Update pointers from external string table.
3391  &UpdateReferenceInExternalStringTableEntry);
3392 
3393  if (!FLAG_watch_ic_patching) {
3394  // Update JSFunction pointers from the runtime profiler.
3396  &updating_visitor);
3397  }
3398 
3399  EvacuationWeakObjectRetainer evacuation_object_retainer;
3400  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3401 
3402  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3403  // under it.
3404  ProcessInvalidatedCode(&updating_visitor);
3405 
3406  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3407 
3408 #ifdef DEBUG
3409  if (FLAG_verify_heap) {
3410  VerifyEvacuation(heap_);
3411  }
3412 #endif
3413 
3414  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3415  ASSERT(migration_slots_buffer_ == NULL);
3416  for (int i = 0; i < npages; i++) {
3417  Page* p = evacuation_candidates_[i];
3418  if (!p->IsEvacuationCandidate()) continue;
3419  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3420  space->Free(p->area_start(), p->area_size());
3421  p->set_scan_on_scavenge(false);
3422  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3423  p->ResetLiveBytes();
3424  space->ReleasePage(p);
3425  }
3426  evacuation_candidates_.Rewind(0);
3427  compacting_ = false;
3428 }
3429 
3430 
3431 static const int kStartTableEntriesPerLine = 5;
3432 static const int kStartTableLines = 171;
3433 static const int kStartTableInvalidLine = 127;
3434 static const int kStartTableUnusedEntry = 126;
3435 
3436 #define _ kStartTableUnusedEntry
3437 #define X kStartTableInvalidLine
3438 // Mark-bit to object start offset table.
3439 //
3440 // The line is indexed by the mark bits in a byte. The first number on
3441 // the line describes the number of live object starts for the line and the
3442 // other numbers on the line describe the offsets (in words) of the object
3443 // starts.
3444 //
3445 // Since objects are at least 2 words large we don't have entries for two
3446 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3447 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3448  0, _, _, _, _, // 0
3449  1, 0, _, _, _, // 1
3450  1, 1, _, _, _, // 2
3451  X, _, _, _, _, // 3
3452  1, 2, _, _, _, // 4
3453  2, 0, 2, _, _, // 5
3454  X, _, _, _, _, // 6
3455  X, _, _, _, _, // 7
3456  1, 3, _, _, _, // 8
3457  2, 0, 3, _, _, // 9
3458  2, 1, 3, _, _, // 10
3459  X, _, _, _, _, // 11
3460  X, _, _, _, _, // 12
3461  X, _, _, _, _, // 13
3462  X, _, _, _, _, // 14
3463  X, _, _, _, _, // 15
3464  1, 4, _, _, _, // 16
3465  2, 0, 4, _, _, // 17
3466  2, 1, 4, _, _, // 18
3467  X, _, _, _, _, // 19
3468  2, 2, 4, _, _, // 20
3469  3, 0, 2, 4, _, // 21
3470  X, _, _, _, _, // 22
3471  X, _, _, _, _, // 23
3472  X, _, _, _, _, // 24
3473  X, _, _, _, _, // 25
3474  X, _, _, _, _, // 26
3475  X, _, _, _, _, // 27
3476  X, _, _, _, _, // 28
3477  X, _, _, _, _, // 29
3478  X, _, _, _, _, // 30
3479  X, _, _, _, _, // 31
3480  1, 5, _, _, _, // 32
3481  2, 0, 5, _, _, // 33
3482  2, 1, 5, _, _, // 34
3483  X, _, _, _, _, // 35
3484  2, 2, 5, _, _, // 36
3485  3, 0, 2, 5, _, // 37
3486  X, _, _, _, _, // 38
3487  X, _, _, _, _, // 39
3488  2, 3, 5, _, _, // 40
3489  3, 0, 3, 5, _, // 41
3490  3, 1, 3, 5, _, // 42
3491  X, _, _, _, _, // 43
3492  X, _, _, _, _, // 44
3493  X, _, _, _, _, // 45
3494  X, _, _, _, _, // 46
3495  X, _, _, _, _, // 47
3496  X, _, _, _, _, // 48
3497  X, _, _, _, _, // 49
3498  X, _, _, _, _, // 50
3499  X, _, _, _, _, // 51
3500  X, _, _, _, _, // 52
3501  X, _, _, _, _, // 53
3502  X, _, _, _, _, // 54
3503  X, _, _, _, _, // 55
3504  X, _, _, _, _, // 56
3505  X, _, _, _, _, // 57
3506  X, _, _, _, _, // 58
3507  X, _, _, _, _, // 59
3508  X, _, _, _, _, // 60
3509  X, _, _, _, _, // 61
3510  X, _, _, _, _, // 62
3511  X, _, _, _, _, // 63
3512  1, 6, _, _, _, // 64
3513  2, 0, 6, _, _, // 65
3514  2, 1, 6, _, _, // 66
3515  X, _, _, _, _, // 67
3516  2, 2, 6, _, _, // 68
3517  3, 0, 2, 6, _, // 69
3518  X, _, _, _, _, // 70
3519  X, _, _, _, _, // 71
3520  2, 3, 6, _, _, // 72
3521  3, 0, 3, 6, _, // 73
3522  3, 1, 3, 6, _, // 74
3523  X, _, _, _, _, // 75
3524  X, _, _, _, _, // 76
3525  X, _, _, _, _, // 77
3526  X, _, _, _, _, // 78
3527  X, _, _, _, _, // 79
3528  2, 4, 6, _, _, // 80
3529  3, 0, 4, 6, _, // 81
3530  3, 1, 4, 6, _, // 82
3531  X, _, _, _, _, // 83
3532  3, 2, 4, 6, _, // 84
3533  4, 0, 2, 4, 6, // 85
3534  X, _, _, _, _, // 86
3535  X, _, _, _, _, // 87
3536  X, _, _, _, _, // 88
3537  X, _, _, _, _, // 89
3538  X, _, _, _, _, // 90
3539  X, _, _, _, _, // 91
3540  X, _, _, _, _, // 92
3541  X, _, _, _, _, // 93
3542  X, _, _, _, _, // 94
3543  X, _, _, _, _, // 95
3544  X, _, _, _, _, // 96
3545  X, _, _, _, _, // 97
3546  X, _, _, _, _, // 98
3547  X, _, _, _, _, // 99
3548  X, _, _, _, _, // 100
3549  X, _, _, _, _, // 101
3550  X, _, _, _, _, // 102
3551  X, _, _, _, _, // 103
3552  X, _, _, _, _, // 104
3553  X, _, _, _, _, // 105
3554  X, _, _, _, _, // 106
3555  X, _, _, _, _, // 107
3556  X, _, _, _, _, // 108
3557  X, _, _, _, _, // 109
3558  X, _, _, _, _, // 110
3559  X, _, _, _, _, // 111
3560  X, _, _, _, _, // 112
3561  X, _, _, _, _, // 113
3562  X, _, _, _, _, // 114
3563  X, _, _, _, _, // 115
3564  X, _, _, _, _, // 116
3565  X, _, _, _, _, // 117
3566  X, _, _, _, _, // 118
3567  X, _, _, _, _, // 119
3568  X, _, _, _, _, // 120
3569  X, _, _, _, _, // 121
3570  X, _, _, _, _, // 122
3571  X, _, _, _, _, // 123
3572  X, _, _, _, _, // 124
3573  X, _, _, _, _, // 125
3574  X, _, _, _, _, // 126
3575  X, _, _, _, _, // 127
3576  1, 7, _, _, _, // 128
3577  2, 0, 7, _, _, // 129
3578  2, 1, 7, _, _, // 130
3579  X, _, _, _, _, // 131
3580  2, 2, 7, _, _, // 132
3581  3, 0, 2, 7, _, // 133
3582  X, _, _, _, _, // 134
3583  X, _, _, _, _, // 135
3584  2, 3, 7, _, _, // 136
3585  3, 0, 3, 7, _, // 137
3586  3, 1, 3, 7, _, // 138
3587  X, _, _, _, _, // 139
3588  X, _, _, _, _, // 140
3589  X, _, _, _, _, // 141
3590  X, _, _, _, _, // 142
3591  X, _, _, _, _, // 143
3592  2, 4, 7, _, _, // 144
3593  3, 0, 4, 7, _, // 145
3594  3, 1, 4, 7, _, // 146
3595  X, _, _, _, _, // 147
3596  3, 2, 4, 7, _, // 148
3597  4, 0, 2, 4, 7, // 149
3598  X, _, _, _, _, // 150
3599  X, _, _, _, _, // 151
3600  X, _, _, _, _, // 152
3601  X, _, _, _, _, // 153
3602  X, _, _, _, _, // 154
3603  X, _, _, _, _, // 155
3604  X, _, _, _, _, // 156
3605  X, _, _, _, _, // 157
3606  X, _, _, _, _, // 158
3607  X, _, _, _, _, // 159
3608  2, 5, 7, _, _, // 160
3609  3, 0, 5, 7, _, // 161
3610  3, 1, 5, 7, _, // 162
3611  X, _, _, _, _, // 163
3612  3, 2, 5, 7, _, // 164
3613  4, 0, 2, 5, 7, // 165
3614  X, _, _, _, _, // 166
3615  X, _, _, _, _, // 167
3616  3, 3, 5, 7, _, // 168
3617  4, 0, 3, 5, 7, // 169
3618  4, 1, 3, 5, 7 // 170
3619 };
3620 #undef _
3621 #undef X
3622 
3623 
3624 // Takes a word of mark bits. Returns the number of objects that start in the
3625 // range. Puts the offsets of the words in the supplied array.
3626 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3627  int objects = 0;
3628  int offset = 0;
3629 
3630  // No consecutive 1 bits.
3631  ASSERT((mark_bits & 0x180) != 0x180);
3632  ASSERT((mark_bits & 0x18000) != 0x18000);
3633  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3634 
3635  while (mark_bits != 0) {
3636  int byte = (mark_bits & 0xff);
3637  mark_bits >>= 8;
3638  if (byte != 0) {
3639  ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3640  char* table = kStartTable + byte * kStartTableEntriesPerLine;
3641  int objects_in_these_8_words = table[0];
3642  ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3643  ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3644  for (int i = 0; i < objects_in_these_8_words; i++) {
3645  starts[objects++] = offset + table[1 + i];
3646  }
3647  }
3648  offset += 8;
3649  }
3650  return objects;
3651 }
3652 
3653 
3654 static inline Address DigestFreeStart(Address approximate_free_start,
3655  uint32_t free_start_cell) {
3656  ASSERT(free_start_cell != 0);
3657 
3658  // No consecutive 1 bits.
3659  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3660 
3661  int offsets[16];
3662  uint32_t cell = free_start_cell;
3663  int offset_of_last_live;
3664  if ((cell & 0x80000000u) != 0) {
3665  // This case would overflow below.
3666  offset_of_last_live = 31;
3667  } else {
3668  // Remove all but one bit, the most significant. This is an optimization
3669  // that may or may not be worthwhile.
3670  cell |= cell >> 16;
3671  cell |= cell >> 8;
3672  cell |= cell >> 4;
3673  cell |= cell >> 2;
3674  cell |= cell >> 1;
3675  cell = (cell + 1) >> 1;
3676  int live_objects = MarkWordToObjectStarts(cell, offsets);
3677  ASSERT(live_objects == 1);
3678  offset_of_last_live = offsets[live_objects - 1];
3679  }
3680  Address last_live_start =
3681  approximate_free_start + offset_of_last_live * kPointerSize;
3682  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3683  Address free_start = last_live_start + last_live->Size();
3684  return free_start;
3685 }
3686 
3687 
3688 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3689  ASSERT(cell != 0);
3690 
3691  // No consecutive 1 bits.
3692  ASSERT((cell & (cell << 1)) == 0);
3693 
3694  int offsets[16];
3695  if (cell == 0x80000000u) { // Avoid overflow below.
3696  return block_address + 31 * kPointerSize;
3697  }
3698  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3699  ASSERT((first_set_bit & cell) == first_set_bit);
3700  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3701  ASSERT(live_objects == 1);
3702  USE(live_objects);
3703  return block_address + offsets[0] * kPointerSize;
3704 }
3705 
3706 
3707 // Sweeps a space conservatively. After this has been done the larger free
3708 // spaces have been put on the free list and the smaller ones have been
3709 // ignored and left untouched. A free space is always either ignored or put
3710 // on the free list, never split up into two parts. This is important
3711 // because it means that any FreeSpace maps left actually describe a region of
3712 // memory that can be ignored when scanning. Dead objects other than free
3713 // spaces will not contain the free space map.
3715  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3716  MarkBit::CellType* cells = p->markbits()->cells();
3718 
3719  int last_cell_index =
3720  Bitmap::IndexToCell(
3721  Bitmap::CellAlignIndex(
3722  p->AddressToMarkbitIndex(p->area_end())));
3723 
3724  int cell_index =
3725  Bitmap::IndexToCell(
3726  Bitmap::CellAlignIndex(
3727  p->AddressToMarkbitIndex(p->area_start())));
3728 
3729  intptr_t freed_bytes = 0;
3730 
3731  // This is the start of the 32 word block that we are currently looking at.
3732  Address block_address = p->area_start();
3733 
3734  // Skip over all the dead objects at the start of the page and mark them free.
3735  for (;
3736  cell_index < last_cell_index;
3737  cell_index++, block_address += 32 * kPointerSize) {
3738  if (cells[cell_index] != 0) break;
3739  }
3740  size_t size = block_address - p->area_start();
3741  if (cell_index == last_cell_index) {
3742  freed_bytes += static_cast<int>(space->Free(p->area_start(),
3743  static_cast<int>(size)));
3744  ASSERT_EQ(0, p->LiveBytes());
3745  return freed_bytes;
3746  }
3747  // Grow the size of the start-of-page free space a little to get up to the
3748  // first live object.
3749  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3750  // Free the first free space.
3751  size = free_end - p->area_start();
3752  freed_bytes += space->Free(p->area_start(),
3753  static_cast<int>(size));
3754  // The start of the current free area is represented in undigested form by
3755  // the address of the last 32-word section that contained a live object and
3756  // the marking bitmap for that cell, which describes where the live object
3757  // started. Unless we find a large free space in the bitmap we will not
3758  // digest this pair into a real address. We start the iteration here at the
3759  // first word in the marking bit map that indicates a live object.
3760  Address free_start = block_address;
3761  uint32_t free_start_cell = cells[cell_index];
3762 
3763  for ( ;
3764  cell_index < last_cell_index;
3765  cell_index++, block_address += 32 * kPointerSize) {
3766  ASSERT((unsigned)cell_index ==
3767  Bitmap::IndexToCell(
3768  Bitmap::CellAlignIndex(
3769  p->AddressToMarkbitIndex(block_address))));
3770  uint32_t cell = cells[cell_index];
3771  if (cell != 0) {
3772  // We have a live object. Check approximately whether it is more than 32
3773  // words since the last live object.
3774  if (block_address - free_start > 32 * kPointerSize) {
3775  free_start = DigestFreeStart(free_start, free_start_cell);
3776  if (block_address - free_start > 32 * kPointerSize) {
3777  // Now that we know the exact start of the free space it still looks
3778  // like we have a large enough free space to be worth bothering with.
3779  // so now we need to find the start of the first live object at the
3780  // end of the free space.
3781  free_end = StartOfLiveObject(block_address, cell);
3782  freed_bytes += space->Free(free_start,
3783  static_cast<int>(free_end - free_start));
3784  }
3785  }
3786  // Update our undigested record of where the current free area started.
3787  free_start = block_address;
3788  free_start_cell = cell;
3789  // Clear marking bits for current cell.
3790  cells[cell_index] = 0;
3791  }
3792  }
3793 
3794  // Handle the free space at the end of the page.
3795  if (block_address - free_start > 32 * kPointerSize) {
3796  free_start = DigestFreeStart(free_start, free_start_cell);
3797  freed_bytes += space->Free(free_start,
3798  static_cast<int>(block_address - free_start));
3799  }
3800 
3801  p->ResetLiveBytes();
3802  return freed_bytes;
3803 }
3804 
3805 
3806 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3807  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3808  sweeper == LAZY_CONSERVATIVE);
3809 
3810  space->ClearStats();
3811 
3812  PageIterator it(space);
3813 
3814  intptr_t freed_bytes = 0;
3815  int pages_swept = 0;
3816  intptr_t newspace_size = space->heap()->new_space()->Size();
3817  bool lazy_sweeping_active = false;
3818  bool unused_page_present = false;
3819 
3820  intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
3821  intptr_t space_left =
3822  Min(heap()->OldGenPromotionLimit(old_space_size),
3823  heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3824 
3825  while (it.has_next()) {
3826  Page* p = it.next();
3827 
3828  // Clear sweeping flags indicating that marking bits are still intact.
3829  p->ClearSweptPrecisely();
3831 
3832  if (p->IsEvacuationCandidate()) {
3833  ASSERT(evacuation_candidates_.length() > 0);
3834  continue;
3835  }
3836 
3838  // Will be processed in EvacuateNewSpaceAndCandidates.
3839  continue;
3840  }
3841 
3842  // One unused page is kept, all further are released before sweeping them.
3843  if (p->LiveBytes() == 0) {
3844  if (unused_page_present) {
3845  if (FLAG_gc_verbose) {
3846  PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3847  reinterpret_cast<intptr_t>(p));
3848  }
3849  // Adjust unswept free bytes because releasing a page expects said
3850  // counter to be accurate for unswept pages.
3851  space->IncreaseUnsweptFreeBytes(p);
3852  space->ReleasePage(p);
3853  continue;
3854  }
3855  unused_page_present = true;
3856  }
3857 
3858  if (lazy_sweeping_active) {
3859  if (FLAG_gc_verbose) {
3860  PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3861  reinterpret_cast<intptr_t>(p));
3862  }
3863  space->IncreaseUnsweptFreeBytes(p);
3864  continue;
3865  }
3866 
3867  switch (sweeper) {
3868  case CONSERVATIVE: {
3869  if (FLAG_gc_verbose) {
3870  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3871  reinterpret_cast<intptr_t>(p));
3872  }
3873  SweepConservatively(space, p);
3874  pages_swept++;
3875  break;
3876  }
3877  case LAZY_CONSERVATIVE: {
3878  if (FLAG_gc_verbose) {
3879  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3880  reinterpret_cast<intptr_t>(p));
3881  }
3882  freed_bytes += SweepConservatively(space, p);
3883  pages_swept++;
3884  if (space_left + freed_bytes > newspace_size) {
3885  space->SetPagesToSweep(p->next_page());
3886  lazy_sweeping_active = true;
3887  } else {
3888  if (FLAG_gc_verbose) {
3889  PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3890  freed_bytes);
3891  }
3892  }
3893  break;
3894  }
3895  case PRECISE: {
3896  if (FLAG_gc_verbose) {
3897  PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3898  reinterpret_cast<intptr_t>(p));
3899  }
3900  if (space->identity() == CODE_SPACE) {
3901  SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3902  } else {
3903  SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3904  }
3905  pages_swept++;
3906  break;
3907  }
3908  default: {
3909  UNREACHABLE();
3910  }
3911  }
3912  }
3913 
3914  if (FLAG_gc_verbose) {
3915  PrintF("SweepSpace: %s (%d pages swept)\n",
3916  AllocationSpaceName(space->identity()),
3917  pages_swept);
3918  }
3919 
3920  // Give pages that are queued to be freed back to the OS.
3921  heap()->FreeQueuedChunks();
3922 }
3923 
3924 
3925 void MarkCompactCollector::SweepSpaces() {
3926  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3927 #ifdef DEBUG
3928  state_ = SWEEP_SPACES;
3929 #endif
3930  SweeperType how_to_sweep =
3931  FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3932  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
3933  if (sweep_precisely_) how_to_sweep = PRECISE;
3934  // Noncompacting collections simply sweep the spaces to clear the mark
3935  // bits and free the nonlive blocks (for old and map spaces). We sweep
3936  // the map space last because freeing non-live maps overwrites them and
3937  // the other spaces rely on possibly non-live maps to get the sizes for
3938  // non-live objects.
3939  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3940  SweepSpace(heap()->old_data_space(), how_to_sweep);
3941 
3942  RemoveDeadInvalidatedCode();
3943  SweepSpace(heap()->code_space(), PRECISE);
3944 
3945  SweepSpace(heap()->cell_space(), PRECISE);
3946 
3947  EvacuateNewSpaceAndCandidates();
3948 
3949  // ClearNonLiveTransitions depends on precise sweeping of map space to
3950  // detect whether unmarked map became dead in this collection or in one
3951  // of the previous ones.
3952  SweepSpace(heap()->map_space(), PRECISE);
3953 
3954  // Deallocate unmarked objects and clear marked bits for marked objects.
3955  heap_->lo_space()->FreeUnmarkedObjects();
3956 }
3957 
3958 
3960  if (enable) {
3961  if (code_flusher_ != NULL) return;
3962  code_flusher_ = new CodeFlusher(heap()->isolate());
3963  } else {
3964  if (code_flusher_ == NULL) return;
3965  delete code_flusher_;
3966  code_flusher_ = NULL;
3967  }
3968 }
3969 
3970 
3971 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3972 // Our profiling tools do not expect intersections between
3973 // code objects. We should either reenable it or change our tools.
3975  Isolate* isolate) {
3976 #ifdef ENABLE_GDB_JIT_INTERFACE
3977  if (obj->IsCode()) {
3978  GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3979  }
3980 #endif
3981  if (obj->IsCode()) {
3982  PROFILE(isolate, CodeDeleteEvent(obj->address()));
3983  }
3984 }
3985 
3986 
3989 }
3990 
3991 
3993  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3994 }
3995 
3996 
3998  SlotsBuffer** buffer_address,
3999  SlotType type,
4000  Address addr,
4001  AdditionMode mode) {
4002  SlotsBuffer* buffer = *buffer_address;
4003  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4004  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4005  allocator->DeallocateChain(buffer_address);
4006  return false;
4007  }
4008  buffer = allocator->AllocateBuffer(buffer);
4009  *buffer_address = buffer;
4010  }
4011  ASSERT(buffer->HasSpaceForTypedSlot());
4012  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4013  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4014  return true;
4015 }
4016 
4017 
4018 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4019  if (RelocInfo::IsCodeTarget(rmode)) {
4021  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4023  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4025  } else if (RelocInfo::IsJSReturn(rmode)) {
4027  }
4028  UNREACHABLE();
4030 }
4031 
4032 
4033 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4034  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4035  if (target_page->IsEvacuationCandidate() &&
4036  (rinfo->host() == NULL ||
4037  !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4038  if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4039  target_page->slots_buffer_address(),
4040  SlotTypeForRMode(rinfo->rmode()),
4041  rinfo->pc(),
4043  EvictEvacuationCandidate(target_page);
4044  }
4045  }
4046 }
4047 
4048 
4050  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4051  if (target_page->IsEvacuationCandidate() &&
4052  !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4053  if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4054  target_page->slots_buffer_address(),
4056  slot,
4058  EvictEvacuationCandidate(target_page);
4059  }
4060  }
4061 }
4062 
4063 
4064 static inline SlotsBuffer::SlotType DecodeSlotType(
4065  SlotsBuffer::ObjectSlot slot) {
4066  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4067 }
4068 
4069 
4071  PointersUpdatingVisitor v(heap);
4072 
4073  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4074  ObjectSlot slot = slots_[slot_idx];
4075  if (!IsTypedSlot(slot)) {
4077  } else {
4078  ++slot_idx;
4079  ASSERT(slot_idx < idx_);
4080  UpdateSlot(&v,
4081  DecodeSlotType(slot),
4082  reinterpret_cast<Address>(slots_[slot_idx]));
4083  }
4084  }
4085 }
4086 
4087 
4089  PointersUpdatingVisitor v(heap);
4090 
4091  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4092  ObjectSlot slot = slots_[slot_idx];
4093  if (!IsTypedSlot(slot)) {
4094  if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4096  }
4097  } else {
4098  ++slot_idx;
4099  ASSERT(slot_idx < idx_);
4100  Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4101  if (!IsOnInvalidatedCodeObject(pc)) {
4102  UpdateSlot(&v,
4103  DecodeSlotType(slot),
4104  reinterpret_cast<Address>(slots_[slot_idx]));
4105  }
4106  }
4107  }
4108 }
4109 
4110 
4112  return new SlotsBuffer(next_buffer);
4113 }
4114 
4115 
4117  delete buffer;
4118 }
4119 
4120 
4122  SlotsBuffer* buffer = *buffer_address;
4123  while (buffer != NULL) {
4124  SlotsBuffer* next_buffer = buffer->next();
4125  DeallocateBuffer(buffer);
4126  buffer = next_buffer;
4127  }
4128  *buffer_address = NULL;
4129 }
4130 
4131 
4132 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:172
static int SizeOf(Map *map, HeapObject *object)
Definition: objects-inl.h:5092
const uint32_t kShortcutTypeTag
Definition: objects.h:496
static void VisitGlobalPropertyCell(Heap *heap, RelocInfo *rinfo)
void ClearEvacuationCandidate()
Definition: spaces.h:593
static const int kPointerFieldsEndOffset
Definition: objects.h:4977
static const char * kGreyBitPattern
Definition: mark-compact.h:81
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:548
List< ImplicitRefGroup * > * implicit_ref_groups()
Code * builtin(Name name)
Definition: builtins.h:312
static bool IsTypedSlot(ObjectSlot slot)
static const int kCodeOffset
Definition: objects.h:5606
void VisitEmbeddedPointer(RelocInfo *rinfo)
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
INLINE(static void VisitPointer(Heap *heap, Object **p))
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kGetterOffset
Definition: objects.h:8254
FixedBodyDescriptor< kNameOffset, kThisPropertyAssignmentsOffset+kPointerSize, kSize > BodyDescriptor
Definition: objects.h:5708
#define V8PRIxPTR
Definition: globals.h:204
Address FromSpacePageHigh()
Definition: spaces.h:2207
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
CompilationCache * compilation_cache()
Definition: isolate.h:812
SharedFunctionInfoMarkingVisitor(MarkCompactCollector *collector)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void AddCandidate(JSFunction *function)
bool InNewSpace(Object *object)
Definition: heap-inl.h:292
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
static String * cast(Object *obj)
GlobalObject * global()
Definition: contexts.h:319
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:849
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
Isolate * isolate()
Definition: heap-inl.h:494
void Prepare(GCTracer *tracer)
static Smi * FromInt(int value)
Definition: objects-inl.h:973
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:3391
void FinalizeExternalString(String *string)
Definition: heap-inl.h:247
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
static void VisitCodeTarget(Heap *heap, RelocInfo *rinfo)
static HeapObject * cast(Object *obj)
static const int kProtoTransitionElementsPerEntry
Definition: objects.h:4802
static AccessorPair * cast(Object *obj)
static Map * cast(Object *obj)
void ResetAllocationInfo()
Definition: spaces.cc:1189
static void IterateElements(ObjectVisitor *v)
static const int kInstanceDescriptorsOrBitField3Offset
Definition: objects.h:4964
Builtins * builtins()
Definition: isolate.h:909
static const int kSetterOffset
Definition: objects.h:8255
bool InFromSpace(Object *object)
Definition: heap-inl.h:306
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
void Relocate(intptr_t delta)
Definition: objects.cc:8112
static bool enabled()
Definition: serialize.h:480
virtual void VisitPointers(Object **start, Object **end)
static void MoveBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:390
void IterateWeakRoots(ObjectVisitor *v)
static const int kSize
Definition: objects.h:6433
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5648
#define ASSERT(condition)
Definition: checks.h:270
static const uint32_t kBitsPerCell
Definition: spaces.h:169
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:471
void ClearFlag(int flag)
Definition: spaces.h:421
bool StartCompaction(CompactionMode mode)
#define PROFILE(isolate, Call)
Definition: cpu-profiler.h:190
void VisitCodeTarget(RelocInfo *rinfo)
static void VisitExternalReference(RelocInfo *rinfo)
void UpdateSlotsWithFilter(Heap *heap)
OldSpace * TargetSpace(HeapObject *object)
Definition: heap-inl.h:349
void VisitPointers(Object **start, Object **end)
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static Context * cast(Object *context)
Definition: contexts.h:207
static const char * kBlackBitPattern
Definition: mark-compact.h:69
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
ThreadManager * thread_manager()
Definition: isolate.h:867
void AddCandidate(SharedFunctionInfo *shared_info)
bool NonFailureIsHeapObject()
Definition: objects-inl.h:165
int global_ic_age()
Definition: heap.h:1595
#define CHECK(condition)
Definition: checks.h:56
Object ** GetKeySlot(int descriptor_number)
Definition: objects-inl.h:1962
static const int kTableOffset
Definition: objects.h:8011
static const int kPrototypeTransitionsOrBackPointerOffset
Definition: objects.h:4968
int ms_count()
Definition: heap.h:1157
uint32_t CellType
Definition: spaces.h:126
static Code * cast(Object *obj)
void ClearSweptConservatively()
Definition: spaces.h:723
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:963
StoreBuffer * store_buffer()
Definition: heap.h:1516
static Smi * cast(Object *object)
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:313
void IterateArchivedThreads(ThreadVisitor *v)
Definition: v8threads.cc:383
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1595
MarkBit Next()
Definition: spaces.h:146
static MarkBit MarkBitFrom(Address addr)
bool TryPromoteObject(HeapObject *object, int object_size)
uint8_t byte
Definition: globals.h:171
Object ** GetTransitionsSlot()
Definition: objects.h:2475
static bool IsMarked(Object *obj)
HANDLE HANDLE LPSTACKFRAME64 StackFrame
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void ClearSweptPrecisely()
Definition: spaces.h:722
#define UNREACHABLE()
Definition: checks.h:50
void Mark(Address addr)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
void VisitPointers(Object **start, Object **end)
static const int kGCMetadataOffset
Definition: objects.h:4501
CodeMarkingVisitor(MarkCompactCollector *collector)
void ClearCacheOnMap(Map *map)
Definition: heap.h:1487
void IteratePointersToNewSpace(ObjectSlotCallback callback)
#define HEAP_PROFILE(heap, call)
Definition: heap-profiler.h:39
static const int kProtoTransitionMapOffset
Definition: objects.h:4804
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static const int kScopeInfoOffset
Definition: objects.h:5607
void PushBlack(HeapObject *object)
Definition: mark-compact.h:197
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1774
virtual intptr_t SizeOfObjects()
Definition: spaces.h:1495
bool context_exit_happened()
Definition: isolate.h:1027
static void VisitEmbeddedPointer(Heap *heap, RelocInfo *rinfo)
v8::Handle< v8::Object > bottom
Definition: test-api.cc:1625
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
void CollectEvacuationCandidates(PagedSpace *space)
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2304
const int kPointerSize
Definition: globals.h:234
virtual intptr_t Size()
Definition: spaces.h:2108
static bool VisitUnmarkedObjects(Heap *heap, Object **start, Object **end)
bool IsFlagSet(int flag)
Definition: spaces.h:433
static Address & Address_at(Address addr)
Definition: v8memory.h:71
void CodeIterateBody(ObjectVisitor *v)
void MarkEvacuationCandidate()
Definition: spaces.h:588
INLINE(static void MarkObjectByPointer(MarkCompactCollector *collector, Object **anchor_slot, Object **p))
const int kHeapObjectTag
Definition: v8.h:3848
bool IsAligned(T value, U alignment)
Definition: utils.h:206
void CountFreeListItems(Page *p, FreeList::SizeStats *sizes)
Definition: spaces.h:1614
GlobalHandles * global_handles()
Definition: isolate.h:865
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1437
char kStartTable[kStartTableLines *kStartTableEntriesPerLine]
virtual Object * RetainAs(Object *object)
static void VisitRuntimeEntry(RelocInfo *rinfo)
const uint32_t kShortcutTypeMask
Definition: objects.h:492
static const int kNameOffset
Definition: objects.h:5605
const Register pc
static void IterateBody(Map *map, HeapObject *obj)
INLINE(static void VisitUnmarkedObject(MarkCompactCollector *collector, HeapObject *obj))
OldSpace * old_pointer_space()
Definition: heap.h:500
static const int kPropertiesOffset
Definition: objects.h:2113
bool TransferMark(Address old_start, Address new_start)
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3380
static void VisitDebugTarget(Heap *heap, RelocInfo *rinfo)
bool is_inline_cache_stub()
Definition: objects-inl.h:3306
void set_age_mark(Address mark)
Definition: spaces.h:2164
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
void VisitThread(Isolate *isolate, ThreadLocalTop *top)
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:701
OldSpace * code_space()
Definition: heap.h:502
Object ** global_contexts_list_address()
Definition: heap.h:1229
void DeallocateBuffer(SlotsBuffer *buffer)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:581
void UpdateSamplesAfterCompact(ObjectVisitor *visitor)
void MarkDescriptorArray(DescriptorArray *descriptors)
PropertyDetails GetDetails(int descriptor_number)
Definition: objects-inl.h:1996
bool WasSwept()
Definition: spaces.h:717
InlineCacheState ic_state()
Definition: objects-inl.h:3024
LargeObjectSpace * lo_space()
Definition: heap.h:505
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1519
void ReleasePage(Page *page)
Definition: spaces.cc:917
void MarkSweptConservatively()
Definition: spaces.h:720
static const int kNonWeakFieldsEndOffset
Definition: objects.h:5988
void FreeQueuedChunks()
Definition: heap.cc:7099
CellSpace * cell_space()
Definition: heap.h:504
static int OffsetOfElementAt(int index)
Definition: objects.h:2291
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
virtual Object * RetainAs(Object *object)
StubCache * stub_cache()
Definition: isolate.h:822
#define T(name, string, precedence)
Definition: token.cc:48
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
static const int kProtoTransitionHeaderSize
Definition: objects.h:4799
void DeallocateChain(SlotsBuffer **buffer_address)
#define V8PRIdPTR
Definition: globals.h:205
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1332
void MigrateObject(Address dst, Address src, int size, AllocationSpace to_old_space)
List< ObjectGroup * > * object_groups()
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1395
void Add(ObjectSlot slot)
Definition: mark-compact.h:292
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:859
void VisitPointers(Object **start, Object **end)
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5630
static ObjectHashTable * cast(Object *obj)
Definition: objects.h:3311
void VisitPointers(Object **start, Object **end)
void set_encountered_weak_maps(Object *weak_map)
Definition: mark-compact.h:567
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1027
void IdentifyWeakHandles(WeakSlotCallback f)
#define SLOT_ADDR(obj, offset)
static JSGlobalPropertyCell * cast(Object *obj)
Heap * heap() const
Definition: spaces.h:766
IncrementalMarking * incremental_marking()
Definition: heap.h:1524
Object * GetBackPointer()
Definition: objects-inl.h:3492
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:345
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:584
void Iterate(v8::internal::ObjectVisitor *v)
static void UpdateSlot(Heap *heap, Object **slot)
static int code_index(bool is_ascii)
Definition: objects.h:6409
static const int kProtoTransitionPrototypeOffset
Definition: objects.h:4803
#define HEAP
Definition: isolate.h:1408
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
void IterateFunctions(ObjectVisitor *v)
InstanceType instance_type()
Definition: objects-inl.h:2864
bool IsEvacuationCandidate()
Definition: spaces.h:566
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1163
void USE(T)
Definition: globals.h:303
static FixedArray * cast(Object *obj)
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1575
#define X
MapSpace * map_space()
Definition: heap.h:503
Page * next_page()
Definition: spaces-inl.h:224
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:5836
static const int kCompilationErrorValue
Definition: objects.h:6494
void EvictEvacuationCandidate(Page *page)
Definition: mark-compact.h:533
void SetPagesToSweep(Page *first)
Definition: spaces.h:1585
#define _
const int kPageSizeBits
Definition: v8globals.h:100
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
CodeFlusher(Isolate *isolate)
void MarkAccessorPairSlot(AccessorPair *accessors, int offset)
void MarkMapContents(Map *map)
T Min(T a, T b)
Definition: utils.h:229
void set_code(Code *code)
Definition: objects-inl.h:4079
static const int kUninitializedValue
Definition: objects.h:6490
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2613
void VisitDebugTarget(RelocInfo *rinfo)
void check(i::Vector< const char > string)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
NewSpace * new_space()
Definition: heap.h:499
Object ** GetValueSlot(int descriptor_number)
Definition: objects-inl.h:1976
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:327
static int saved_code_index(bool is_ascii)
Definition: objects.h:6417
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:544
static void ProcessNonLive(HeapObject *obj)
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode)
Definition: mark-compact.h:349
static void VisitExternalReference(Address *p)
FlagType type() const
Definition: flags.cc:1358
OldSpace * old_data_space()
Definition: heap.h:501
static void Clear(Address address)
Definition: ic.cc:340
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1512
static int RegionNumber(Address addr)
Definition: spaces.h:912
FixedBodyDescriptor< kHeaderSize, kHeaderSize+FIRST_WEAK_SLOT *kPointerSize, kSize > MarkCompactBodyDescriptor
Definition: contexts.h:421
AllocationSpace identity()
Definition: spaces.h:772
void RecordCodeEntrySlot(Address slot, Code *target)
static const int kPointerFieldsBeginOffset
Definition: objects.h:4976
static DeoptimizationInputData * cast(Object *obj)
Address FromSpacePageLow()
Definition: spaces.h:2206
const int MB
Definition: globals.h:222
static JSFunction * cast(Object *obj)