v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
33 #include "execution.h"
34 #include "gdb-jit.h"
35 #include "global-handles.h"
36 #include "heap-profiler.h"
37 #include "ic-inl.h"
38 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
53 
54 
55 // -------------------------------------------------------------------------
56 // MarkCompactCollector
57 
58 MarkCompactCollector::MarkCompactCollector() : // NOLINT
59 #ifdef DEBUG
60  state_(IDLE),
61 #endif
62  sweep_precisely_(false),
63  reduce_memory_footprint_(false),
64  abort_incremental_marking_(false),
65  compacting_(false),
66  was_marked_incrementally_(false),
67  tracer_(NULL),
68  migration_slots_buffer_(NULL),
69  heap_(NULL),
70  code_flusher_(NULL),
71  encountered_weak_maps_(NULL) { }
72 
73 
74 #ifdef VERIFY_HEAP
75 class VerifyMarkingVisitor: public ObjectVisitor {
76  public:
77  void VisitPointers(Object** start, Object** end) {
78  for (Object** current = start; current < end; current++) {
79  if ((*current)->IsHeapObject()) {
80  HeapObject* object = HeapObject::cast(*current);
81  CHECK(HEAP->mark_compact_collector()->IsMarked(object));
82  }
83  }
84  }
85 };
86 
87 
88 static void VerifyMarking(Address bottom, Address top) {
89  VerifyMarkingVisitor visitor;
90  HeapObject* object;
91  Address next_object_must_be_here_or_later = bottom;
92 
93  for (Address current = bottom;
94  current < top;
95  current += kPointerSize) {
96  object = HeapObject::FromAddress(current);
97  if (MarkCompactCollector::IsMarked(object)) {
98  CHECK(current >= next_object_must_be_here_or_later);
99  object->Iterate(&visitor);
100  next_object_must_be_here_or_later = current + object->Size();
101  }
102  }
103 }
104 
105 
106 static void VerifyMarking(NewSpace* space) {
107  Address end = space->top();
108  NewSpacePageIterator it(space->bottom(), end);
109  // The bottom position is at the start of its page. Allows us to use
110  // page->area_start() as start of range on all pages.
111  CHECK_EQ(space->bottom(),
112  NewSpacePage::FromAddress(space->bottom())->area_start());
113  while (it.has_next()) {
114  NewSpacePage* page = it.next();
115  Address limit = it.has_next() ? page->area_end() : end;
116  CHECK(limit == end || !page->Contains(end));
117  VerifyMarking(page->area_start(), limit);
118  }
119 }
120 
121 
122 static void VerifyMarking(PagedSpace* space) {
123  PageIterator it(space);
124 
125  while (it.has_next()) {
126  Page* p = it.next();
127  VerifyMarking(p->area_start(), p->area_end());
128  }
129 }
130 
131 
132 static void VerifyMarking(Heap* heap) {
133  VerifyMarking(heap->old_pointer_space());
134  VerifyMarking(heap->old_data_space());
135  VerifyMarking(heap->code_space());
136  VerifyMarking(heap->cell_space());
137  VerifyMarking(heap->map_space());
138  VerifyMarking(heap->new_space());
139 
140  VerifyMarkingVisitor visitor;
141 
142  LargeObjectIterator it(heap->lo_space());
143  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
145  obj->Iterate(&visitor);
146  }
147  }
148 
149  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
150 }
151 
152 
153 class VerifyEvacuationVisitor: public ObjectVisitor {
154  public:
155  void VisitPointers(Object** start, Object** end) {
156  for (Object** current = start; current < end; current++) {
157  if ((*current)->IsHeapObject()) {
158  HeapObject* object = HeapObject::cast(*current);
159  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
160  }
161  }
162  }
163 };
164 
165 
166 static void VerifyEvacuation(Address bottom, Address top) {
167  VerifyEvacuationVisitor visitor;
168  HeapObject* object;
169  Address next_object_must_be_here_or_later = bottom;
170 
171  for (Address current = bottom;
172  current < top;
173  current += kPointerSize) {
174  object = HeapObject::FromAddress(current);
175  if (MarkCompactCollector::IsMarked(object)) {
176  CHECK(current >= next_object_must_be_here_or_later);
177  object->Iterate(&visitor);
178  next_object_must_be_here_or_later = current + object->Size();
179  }
180  }
181 }
182 
183 
184 static void VerifyEvacuation(NewSpace* space) {
185  NewSpacePageIterator it(space->bottom(), space->top());
186  VerifyEvacuationVisitor visitor;
187 
188  while (it.has_next()) {
189  NewSpacePage* page = it.next();
190  Address current = page->area_start();
191  Address limit = it.has_next() ? page->area_end() : space->top();
192  CHECK(limit == space->top() || !page->Contains(space->top()));
193  while (current < limit) {
194  HeapObject* object = HeapObject::FromAddress(current);
195  object->Iterate(&visitor);
196  current += object->Size();
197  }
198  }
199 }
200 
201 
202 static void VerifyEvacuation(PagedSpace* space) {
203  PageIterator it(space);
204 
205  while (it.has_next()) {
206  Page* p = it.next();
207  if (p->IsEvacuationCandidate()) continue;
208  VerifyEvacuation(p->area_start(), p->area_end());
209  }
210 }
211 
212 
213 static void VerifyEvacuation(Heap* heap) {
214  VerifyEvacuation(heap->old_pointer_space());
215  VerifyEvacuation(heap->old_data_space());
216  VerifyEvacuation(heap->code_space());
217  VerifyEvacuation(heap->cell_space());
218  VerifyEvacuation(heap->map_space());
219  VerifyEvacuation(heap->new_space());
220 
221  VerifyEvacuationVisitor visitor;
222  heap->IterateStrongRoots(&visitor, VISIT_ALL);
223 }
224 #endif // VERIFY_HEAP
225 
226 
227 #ifdef DEBUG
228 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
229  public:
230  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
231 
232  void VisitPointers(Object** start, Object** end) {
233  for (Object** current = start; current < end; current++) {
234  if ((*current)->IsHeapObject()) {
235  HeapObject* object = HeapObject::cast(*current);
236  if (object->IsString()) continue;
237  switch (object->map()->instance_type()) {
238  case JS_FUNCTION_TYPE:
239  CheckContext(JSFunction::cast(object)->context());
240  break;
242  CheckContext(JSGlobalProxy::cast(object)->native_context());
243  break;
246  CheckContext(GlobalObject::cast(object)->native_context());
247  break;
248  case JS_ARRAY_TYPE:
249  case JS_DATE_TYPE:
250  case JS_OBJECT_TYPE:
251  case JS_REGEXP_TYPE:
252  VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
253  break;
254  case MAP_TYPE:
255  VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
256  VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
257  break;
258  case FIXED_ARRAY_TYPE:
259  if (object->IsContext()) {
260  CheckContext(object);
261  } else {
262  FixedArray* array = FixedArray::cast(object);
263  int length = array->length();
264  // Set array length to zero to prevent cycles while iterating
265  // over array bodies, this is easier than intrusive marking.
266  array->set_length(0);
267  array->IterateBody(
268  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
269  array->set_length(length);
270  }
271  break;
273  case JS_PROXY_TYPE:
274  case JS_VALUE_TYPE:
276  object->Iterate(this);
277  break;
278  case ACCESSOR_INFO_TYPE:
279  case BYTE_ARRAY_TYPE:
281  case CODE_TYPE:
283  case HEAP_NUMBER_TYPE:
285  case ODDBALL_TYPE:
286  case SCRIPT_TYPE:
288  break;
289  default:
290  UNREACHABLE();
291  }
292  }
293  }
294  }
295 
296  private:
297  void CheckContext(Object* context) {
298  if (!context->IsContext()) return;
299  Context* native_context = Context::cast(context)->native_context();
300  if (current_native_context_ == NULL) {
301  current_native_context_ = native_context;
302  } else {
303  CHECK_EQ(current_native_context_, native_context);
304  }
305  }
306 
307  Context* current_native_context_;
308 };
309 
310 
311 static void VerifyNativeContextSeparation(Heap* heap) {
312  HeapObjectIterator it(heap->code_space());
313 
314  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
315  VerifyNativeContextSeparationVisitor visitor;
316  Code::cast(object)->CodeIterateBody(&visitor);
317  }
318 }
319 #endif
320 
321 
324  evacuation_candidates_.Add(p);
325 }
326 
327 
328 static void TraceFragmentation(PagedSpace* space) {
329  int number_of_pages = space->CountTotalPages();
330  intptr_t reserved = (number_of_pages * space->AreaSize());
331  intptr_t free = reserved - space->SizeOfObjects();
332  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
333  AllocationSpaceName(space->identity()),
334  number_of_pages,
335  static_cast<int>(free),
336  static_cast<double>(free) * 100 / reserved);
337 }
338 
339 
341  if (!compacting_) {
342  ASSERT(evacuation_candidates_.length() == 0);
343 
344 #ifdef ENABLE_GDB_JIT_INTERFACE
345  // If GDBJIT interface is active disable compaction.
346  if (FLAG_gdbjit) return false;
347 #endif
348 
349  CollectEvacuationCandidates(heap()->old_pointer_space());
350  CollectEvacuationCandidates(heap()->old_data_space());
351 
352  if (FLAG_compact_code_space &&
353  (mode == NON_INCREMENTAL_COMPACTION ||
354  FLAG_incremental_code_compaction)) {
355  CollectEvacuationCandidates(heap()->code_space());
356  } else if (FLAG_trace_fragmentation) {
357  TraceFragmentation(heap()->code_space());
358  }
359 
360  if (FLAG_trace_fragmentation) {
361  TraceFragmentation(heap()->map_space());
362  TraceFragmentation(heap()->cell_space());
363  }
364 
368 
369  compacting_ = evacuation_candidates_.length() > 0;
370  }
371 
372  return compacting_;
373 }
374 
375 
377  // Make sure that Prepare() has been called. The individual steps below will
378  // update the state as they proceed.
379  ASSERT(state_ == PREPARE_GC);
380  ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
381 
382  MarkLiveObjects();
383  ASSERT(heap_->incremental_marking()->IsStopped());
384 
385  if (FLAG_collect_maps) ClearNonLiveTransitions();
386 
387  ClearWeakMaps();
388 
389 #ifdef VERIFY_HEAP
390  if (FLAG_verify_heap) {
391  VerifyMarking(heap_);
392  }
393 #endif
394 
395  SweepSpaces();
396 
397  if (!FLAG_collect_maps) ReattachInitialMaps();
398 
399 #ifdef DEBUG
400  if (FLAG_verify_native_context_separation) {
401  VerifyNativeContextSeparation(heap_);
402  }
403 #endif
404 
405  Finish();
406 
407  tracer_ = NULL;
408 }
409 
410 
411 #ifdef VERIFY_HEAP
412 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
413  PageIterator it(space);
414 
415  while (it.has_next()) {
416  Page* p = it.next();
417  CHECK(p->markbits()->IsClean());
418  CHECK_EQ(0, p->LiveBytes());
419  }
420 }
421 
422 
423 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
424  NewSpacePageIterator it(space->bottom(), space->top());
425 
426  while (it.has_next()) {
427  NewSpacePage* p = it.next();
428  CHECK(p->markbits()->IsClean());
429  CHECK_EQ(0, p->LiveBytes());
430  }
431 }
432 
433 
434 void MarkCompactCollector::VerifyMarkbitsAreClean() {
435  VerifyMarkbitsAreClean(heap_->old_pointer_space());
436  VerifyMarkbitsAreClean(heap_->old_data_space());
437  VerifyMarkbitsAreClean(heap_->code_space());
438  VerifyMarkbitsAreClean(heap_->cell_space());
439  VerifyMarkbitsAreClean(heap_->map_space());
440  VerifyMarkbitsAreClean(heap_->new_space());
441 
442  LargeObjectIterator it(heap_->lo_space());
443  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
444  MarkBit mark_bit = Marking::MarkBitFrom(obj);
445  CHECK(Marking::IsWhite(mark_bit));
446  CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
447  }
448 }
449 #endif // VERIFY_HEAP
450 
451 
452 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
453  PageIterator it(space);
454 
455  while (it.has_next()) {
456  Bitmap::Clear(it.next());
457  }
458 }
459 
460 
461 static void ClearMarkbitsInNewSpace(NewSpace* space) {
462  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
463 
464  while (it.has_next()) {
465  Bitmap::Clear(it.next());
466  }
467 }
468 
469 
471  ClearMarkbitsInPagedSpace(heap_->code_space());
472  ClearMarkbitsInPagedSpace(heap_->map_space());
473  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
474  ClearMarkbitsInPagedSpace(heap_->old_data_space());
475  ClearMarkbitsInPagedSpace(heap_->cell_space());
476  ClearMarkbitsInNewSpace(heap_->new_space());
477 
478  LargeObjectIterator it(heap_->lo_space());
479  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
480  MarkBit mark_bit = Marking::MarkBitFrom(obj);
481  mark_bit.Clear();
482  mark_bit.Next().Clear();
483  Page::FromAddress(obj->address())->ResetLiveBytes();
484  }
485 }
486 
487 
488 bool Marking::TransferMark(Address old_start, Address new_start) {
489  // This is only used when resizing an object.
490  ASSERT(MemoryChunk::FromAddress(old_start) ==
491  MemoryChunk::FromAddress(new_start));
492 
493  // If the mark doesn't move, we don't check the color of the object.
494  // It doesn't matter whether the object is black, since it hasn't changed
495  // size, so the adjustment to the live data count will be zero anyway.
496  if (old_start == new_start) return false;
497 
498  MarkBit new_mark_bit = MarkBitFrom(new_start);
499  MarkBit old_mark_bit = MarkBitFrom(old_start);
500 
501 #ifdef DEBUG
502  ObjectColor old_color = Color(old_mark_bit);
503 #endif
504 
505  if (Marking::IsBlack(old_mark_bit)) {
506  old_mark_bit.Clear();
507  ASSERT(IsWhite(old_mark_bit));
508  Marking::MarkBlack(new_mark_bit);
509  return true;
510  } else if (Marking::IsGrey(old_mark_bit)) {
511  ASSERT(heap_->incremental_marking()->IsMarking());
512  old_mark_bit.Clear();
513  old_mark_bit.Next().Clear();
514  ASSERT(IsWhite(old_mark_bit));
516  HeapObject::FromAddress(new_start), new_mark_bit);
518  }
519 
520 #ifdef DEBUG
521  ObjectColor new_color = Color(new_mark_bit);
522  ASSERT(new_color == old_color);
523 #endif
524 
525  return false;
526 }
527 
528 
530  switch (space) {
531  case NEW_SPACE: return "NEW_SPACE";
532  case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
533  case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
534  case CODE_SPACE: return "CODE_SPACE";
535  case MAP_SPACE: return "MAP_SPACE";
536  case CELL_SPACE: return "CELL_SPACE";
537  case LO_SPACE: return "LO_SPACE";
538  default:
539  UNREACHABLE();
540  }
541 
542  return NULL;
543 }
544 
545 
546 // Returns zero for pages that have so little fragmentation that it is not
547 // worth defragmenting them. Otherwise a positive integer that gives an
548 // estimate of fragmentation on an arbitrary scale.
549 static int FreeListFragmentation(PagedSpace* space, Page* p) {
550  // If page was not swept then there are no free list items on it.
551  if (!p->WasSwept()) {
552  if (FLAG_trace_fragmentation) {
553  PrintF("%p [%s]: %d bytes live (unswept)\n",
554  reinterpret_cast<void*>(p),
555  AllocationSpaceName(space->identity()),
556  p->LiveBytes());
557  }
558  return 0;
559  }
560 
561  FreeList::SizeStats sizes;
562  space->CountFreeListItems(p, &sizes);
563 
564  intptr_t ratio;
565  intptr_t ratio_threshold;
566  intptr_t area_size = space->AreaSize();
567  if (space->identity() == CODE_SPACE) {
568  ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
569  area_size;
570  ratio_threshold = 10;
571  } else {
572  ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
573  area_size;
574  ratio_threshold = 15;
575  }
576 
577  if (FLAG_trace_fragmentation) {
578  PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
579  reinterpret_cast<void*>(p),
580  AllocationSpaceName(space->identity()),
581  static_cast<int>(sizes.small_size_),
582  static_cast<double>(sizes.small_size_ * 100) /
583  area_size,
584  static_cast<int>(sizes.medium_size_),
585  static_cast<double>(sizes.medium_size_ * 100) /
586  area_size,
587  static_cast<int>(sizes.large_size_),
588  static_cast<double>(sizes.large_size_ * 100) /
589  area_size,
590  static_cast<int>(sizes.huge_size_),
591  static_cast<double>(sizes.huge_size_ * 100) /
592  area_size,
593  (ratio > ratio_threshold) ? "[fragmented]" : "");
594  }
595 
596  if (FLAG_always_compact && sizes.Total() != area_size) {
597  return 1;
598  }
599 
600  if (ratio <= ratio_threshold) return 0; // Not fragmented.
601 
602  return static_cast<int>(ratio - ratio_threshold);
603 }
604 
605 
607  ASSERT(space->identity() == OLD_POINTER_SPACE ||
608  space->identity() == OLD_DATA_SPACE ||
609  space->identity() == CODE_SPACE);
610 
611  static const int kMaxMaxEvacuationCandidates = 1000;
612  int number_of_pages = space->CountTotalPages();
613  int max_evacuation_candidates =
614  static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
615 
616  if (FLAG_stress_compaction || FLAG_always_compact) {
617  max_evacuation_candidates = kMaxMaxEvacuationCandidates;
618  }
619 
620  class Candidate {
621  public:
622  Candidate() : fragmentation_(0), page_(NULL) { }
623  Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
624 
625  int fragmentation() { return fragmentation_; }
626  Page* page() { return page_; }
627 
628  private:
629  int fragmentation_;
630  Page* page_;
631  };
632 
633  enum CompactionMode {
634  COMPACT_FREE_LISTS,
635  REDUCE_MEMORY_FOOTPRINT
636  };
637 
638  CompactionMode mode = COMPACT_FREE_LISTS;
639 
640  intptr_t reserved = number_of_pages * space->AreaSize();
641  intptr_t over_reserved = reserved - space->SizeOfObjects();
642  static const intptr_t kFreenessThreshold = 50;
643 
644  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
645  // If reduction of memory footprint was requested, we are aggressive
646  // about choosing pages to free. We expect that half-empty pages
647  // are easier to compact so slightly bump the limit.
648  mode = REDUCE_MEMORY_FOOTPRINT;
649  max_evacuation_candidates += 2;
650  }
651 
652 
653  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
654  // If over-usage is very high (more than a third of the space), we
655  // try to free all mostly empty pages. We expect that almost empty
656  // pages are even easier to compact so bump the limit even more.
657  mode = REDUCE_MEMORY_FOOTPRINT;
658  max_evacuation_candidates *= 2;
659  }
660 
661  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
662  PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
663  static_cast<double>(over_reserved) / MB,
664  static_cast<double>(reserved) / MB,
665  static_cast<int>(kFreenessThreshold));
666  }
667 
668  intptr_t estimated_release = 0;
669 
670  Candidate candidates[kMaxMaxEvacuationCandidates];
671 
672  max_evacuation_candidates =
673  Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
674 
675  int count = 0;
676  int fragmentation = 0;
677  Candidate* least = NULL;
678 
679  PageIterator it(space);
680  if (it.has_next()) it.next(); // Never compact the first page.
681 
682  while (it.has_next()) {
683  Page* p = it.next();
685 
686  if (FLAG_stress_compaction) {
687  unsigned int counter = space->heap()->ms_count();
688  uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
689  if ((counter & 1) == (page_number & 1)) fragmentation = 1;
690  } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
691  // Don't try to release too many pages.
692  if (estimated_release >= ((over_reserved * 3) / 4)) {
693  continue;
694  }
695 
696  intptr_t free_bytes = 0;
697 
698  if (!p->WasSwept()) {
699  free_bytes = (p->area_size() - p->LiveBytes());
700  } else {
701  FreeList::SizeStats sizes;
702  space->CountFreeListItems(p, &sizes);
703  free_bytes = sizes.Total();
704  }
705 
706  int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
707 
708  if (free_pct >= kFreenessThreshold) {
709  estimated_release += 2 * p->area_size() - free_bytes;
710  fragmentation = free_pct;
711  } else {
712  fragmentation = 0;
713  }
714 
715  if (FLAG_trace_fragmentation) {
716  PrintF("%p [%s]: %d (%.2f%%) free %s\n",
717  reinterpret_cast<void*>(p),
718  AllocationSpaceName(space->identity()),
719  static_cast<int>(free_bytes),
720  static_cast<double>(free_bytes * 100) / p->area_size(),
721  (fragmentation > 0) ? "[fragmented]" : "");
722  }
723  } else {
724  fragmentation = FreeListFragmentation(space, p);
725  }
726 
727  if (fragmentation != 0) {
728  if (count < max_evacuation_candidates) {
729  candidates[count++] = Candidate(fragmentation, p);
730  } else {
731  if (least == NULL) {
732  for (int i = 0; i < max_evacuation_candidates; i++) {
733  if (least == NULL ||
734  candidates[i].fragmentation() < least->fragmentation()) {
735  least = candidates + i;
736  }
737  }
738  }
739  if (least->fragmentation() < fragmentation) {
740  *least = Candidate(fragmentation, p);
741  least = NULL;
742  }
743  }
744  }
745  }
746 
747  for (int i = 0; i < count; i++) {
748  AddEvacuationCandidate(candidates[i].page());
749  }
750 
751  if (count > 0 && FLAG_trace_fragmentation) {
752  PrintF("Collected %d evacuation candidates for space %s\n",
753  count,
754  AllocationSpaceName(space->identity()));
755  }
756 }
757 
758 
760  if (compacting_) {
761  int npages = evacuation_candidates_.length();
762  for (int i = 0; i < npages; i++) {
763  Page* p = evacuation_candidates_[i];
764  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
767  }
768  compacting_ = false;
769  evacuation_candidates_.Rewind(0);
770  invalidated_code_.Rewind(0);
771  }
772  ASSERT_EQ(0, evacuation_candidates_.length());
773 }
774 
775 
776 void MarkCompactCollector::Prepare(GCTracer* tracer) {
777  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
778 
779  // Rather than passing the tracer around we stash it in a static member
780  // variable.
781  tracer_ = tracer;
782 
783 #ifdef DEBUG
784  ASSERT(state_ == IDLE);
785  state_ = PREPARE_GC;
786 #endif
787 
788  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
789 
790  // Clear marking bits if incremental marking is aborted.
791  if (was_marked_incrementally_ && abort_incremental_marking_) {
793  ClearMarkbits();
794  AbortCompaction();
795  was_marked_incrementally_ = false;
796  }
797 
798  // Don't start compaction if we are in the middle of incremental
799  // marking cycle. We did not collect any slots.
800  if (!FLAG_never_compact && !was_marked_incrementally_) {
802  }
803 
804  PagedSpaces spaces;
805  for (PagedSpace* space = spaces.next();
806  space != NULL;
807  space = spaces.next()) {
808  space->PrepareForMarkCompact();
809  }
810 
811 #ifdef VERIFY_HEAP
812  if (!was_marked_incrementally_ && FLAG_verify_heap) {
813  VerifyMarkbitsAreClean();
814  }
815 #endif
816 }
817 
818 
819 void MarkCompactCollector::Finish() {
820 #ifdef DEBUG
821  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
822  state_ = IDLE;
823 #endif
824  // The stub cache is not traversed during GC; clear the cache to
825  // force lazy re-initialization of it. This must be done after the
826  // GC, because it relies on the new address of certain old space
827  // objects (empty string, illegal builtin).
828  heap()->isolate()->stub_cache()->Clear();
829 
830  heap()->external_string_table_.CleanUp();
831 }
832 
833 
834 // -------------------------------------------------------------------------
835 // Phase 1: tracing and marking live objects.
836 // before: all objects are in normal state.
837 // after: a live object's map pointer is marked as '00'.
838 
839 // Marking all live objects in the heap as part of mark-sweep or mark-compact
840 // collection. Before marking, all objects are in their normal state. After
841 // marking, live objects' map pointers are marked indicating that the object
842 // has been found reachable.
843 //
844 // The marking algorithm is a (mostly) depth-first (because of possible stack
845 // overflow) traversal of the graph of objects reachable from the roots. It
846 // uses an explicit stack of pointers rather than recursion. The young
847 // generation's inactive ('from') space is used as a marking stack. The
848 // objects in the marking stack are the ones that have been reached and marked
849 // but their children have not yet been visited.
850 //
851 // The marking stack can overflow during traversal. In that case, we set an
852 // overflow flag. When the overflow flag is set, we continue marking objects
853 // reachable from the objects on the marking stack, but no longer push them on
854 // the marking stack. Instead, we mark them as both marked and overflowed.
855 // When the stack is in the overflowed state, objects marked as overflowed
856 // have been reached and marked but their children have not been visited yet.
857 // After emptying the marking stack, we clear the overflow flag and traverse
858 // the heap looking for objects marked as overflowed, push them on the stack,
859 // and continue with marking. This process repeats until all reachable
860 // objects have been marked.
861 
862 void CodeFlusher::ProcessJSFunctionCandidates() {
863  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
864  Object* undefined = isolate_->heap()->undefined_value();
865 
866  JSFunction* candidate = jsfunction_candidates_head_;
867  JSFunction* next_candidate;
868  while (candidate != NULL) {
869  next_candidate = GetNextCandidate(candidate);
870  ClearNextCandidate(candidate, undefined);
871 
872  SharedFunctionInfo* shared = candidate->shared();
873 
874  Code* code = shared->code();
875  MarkBit code_mark = Marking::MarkBitFrom(code);
876  if (!code_mark.Get()) {
877  shared->set_code(lazy_compile);
878  candidate->set_code(lazy_compile);
879  } else if (code == lazy_compile) {
880  candidate->set_code(lazy_compile);
881  }
882 
883  // We are in the middle of a GC cycle so the write barrier in the code
884  // setter did not record the slot update and we have to do that manually.
885  Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
886  Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
887  isolate_->heap()->mark_compact_collector()->
888  RecordCodeEntrySlot(slot, target);
889 
890  Object** shared_code_slot =
892  isolate_->heap()->mark_compact_collector()->
893  RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
894 
895  candidate = next_candidate;
896  }
897 
898  jsfunction_candidates_head_ = NULL;
899 }
900 
901 
902 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
903  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
904 
905  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
906  SharedFunctionInfo* next_candidate;
907  while (candidate != NULL) {
908  next_candidate = GetNextCandidate(candidate);
909  ClearNextCandidate(candidate);
910 
911  Code* code = candidate->code();
912  MarkBit code_mark = Marking::MarkBitFrom(code);
913  if (!code_mark.Get()) {
914  candidate->set_code(lazy_compile);
915  }
916 
917  Object** code_slot =
919  isolate_->heap()->mark_compact_collector()->
920  RecordSlot(code_slot, code_slot, *code_slot);
921 
922  candidate = next_candidate;
923  }
924 
925  shared_function_info_candidates_head_ = NULL;
926 }
927 
928 
929 MarkCompactCollector::~MarkCompactCollector() {
930  if (code_flusher_ != NULL) {
931  delete code_flusher_;
932  code_flusher_ = NULL;
933  }
934 }
935 
936 
937 static inline HeapObject* ShortCircuitConsString(Object** p) {
938  // Optimization: If the heap object pointed to by p is a non-symbol
939  // cons string whose right substring is HEAP->empty_string, update
940  // it in place to its left substring. Return the updated value.
941  //
942  // Here we assume that if we change *p, we replace it with a heap object
943  // (i.e., the left substring of a cons string is always a heap object).
944  //
945  // The check performed is:
946  // object->IsConsString() && !object->IsSymbol() &&
947  // (ConsString::cast(object)->second() == HEAP->empty_string())
948  // except the maps for the object and its possible substrings might be
949  // marked.
950  HeapObject* object = HeapObject::cast(*p);
951  if (!FLAG_clever_optimizations) return object;
952  Map* map = object->map();
953  InstanceType type = map->instance_type();
954  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
955 
956  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
957  Heap* heap = map->GetHeap();
958  if (second != heap->empty_string()) {
959  return object;
960  }
961 
962  // Since we don't have the object's start, it is impossible to update the
963  // page dirty marks. Therefore, we only replace the string with its left
964  // substring when page dirty marks do not change.
965  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
966  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
967 
968  *p = first;
969  return HeapObject::cast(first);
970 }
971 
972 
974  : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
975  public:
977  Map* map, HeapObject* obj);
978 
979  static void ObjectStatsCountFixedArray(
980  FixedArrayBase* fixed_array,
981  FixedArraySubInstanceType fast_type,
982  FixedArraySubInstanceType dictionary_type);
983 
984  template<MarkCompactMarkingVisitor::VisitorId id>
986  public:
987  static inline void Visit(Map* map, HeapObject* obj);
988  };
989 
990  static void Initialize();
991 
992  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
993  MarkObjectByPointer(heap->mark_compact_collector(), p, p);
994  }
995 
996  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
997  // Mark all objects pointed to in [start, end).
998  const int kMinRangeForMarkingRecursion = 64;
999  if (end - start >= kMinRangeForMarkingRecursion) {
1000  if (VisitUnmarkedObjects(heap, start, end)) return;
1001  // We are close to a stack overflow, so just mark the objects.
1002  }
1003  MarkCompactCollector* collector = heap->mark_compact_collector();
1004  for (Object** p = start; p < end; p++) {
1005  MarkObjectByPointer(collector, start, p);
1006  }
1007  }
1008 
1009  // Marks the object black and pushes it on the marking stack.
1010  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1011  MarkBit mark = Marking::MarkBitFrom(object);
1012  heap->mark_compact_collector()->MarkObject(object, mark);
1013  }
1014 
1015  // Marks the object black without pushing it on the marking stack.
1016  // Returns true if object needed marking and false otherwise.
1017  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1018  MarkBit mark_bit = Marking::MarkBitFrom(object);
1019  if (!mark_bit.Get()) {
1020  heap->mark_compact_collector()->SetMark(object, mark_bit);
1021  return true;
1022  }
1023  return false;
1024  }
1025 
1026  // Mark object pointed to by p.
1027  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1028  Object** anchor_slot,
1029  Object** p)) {
1030  if (!(*p)->IsHeapObject()) return;
1031  HeapObject* object = ShortCircuitConsString(p);
1032  collector->RecordSlot(anchor_slot, p, object);
1033  MarkBit mark = Marking::MarkBitFrom(object);
1034  collector->MarkObject(object, mark);
1035  }
1036 
1037 
1038  // Visit an unmarked object.
1039  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1040  HeapObject* obj)) {
1041 #ifdef DEBUG
1042  ASSERT(Isolate::Current()->heap()->Contains(obj));
1043  ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1044 #endif
1045  Map* map = obj->map();
1046  Heap* heap = obj->GetHeap();
1047  MarkBit mark = Marking::MarkBitFrom(obj);
1048  heap->mark_compact_collector()->SetMark(obj, mark);
1049  // Mark the map pointer and the body.
1050  MarkBit map_mark = Marking::MarkBitFrom(map);
1051  heap->mark_compact_collector()->MarkObject(map, map_mark);
1052  IterateBody(map, obj);
1053  }
1054 
1055  // Visit all unmarked objects pointed to by [start, end).
1056  // Returns false if the operation fails (lack of stack space).
1057  static inline bool VisitUnmarkedObjects(Heap* heap,
1058  Object** start,
1059  Object** end) {
1060  // Return false is we are close to the stack limit.
1061  StackLimitCheck check(heap->isolate());
1062  if (check.HasOverflowed()) return false;
1063 
1064  MarkCompactCollector* collector = heap->mark_compact_collector();
1065  // Visit the unmarked objects.
1066  for (Object** p = start; p < end; p++) {
1067  Object* o = *p;
1068  if (!o->IsHeapObject()) continue;
1069  collector->RecordSlot(start, p, o);
1070  HeapObject* obj = HeapObject::cast(o);
1071  MarkBit mark = Marking::MarkBitFrom(obj);
1072  if (mark.Get()) continue;
1073  VisitUnmarkedObject(collector, obj);
1074  }
1075  return true;
1076  }
1077 
1078  INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1079  SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1080  shared->BeforeVisitingPointers();
1081  }
1082 
1083  static void VisitJSWeakMap(Map* map, HeapObject* object) {
1084  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1085  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1086 
1087  // Enqueue weak map in linked list of encountered weak maps.
1088  if (weak_map->next() == Smi::FromInt(0)) {
1089  weak_map->set_next(collector->encountered_weak_maps());
1090  collector->set_encountered_weak_maps(weak_map);
1091  }
1092 
1093  // Skip visiting the backing hash table containing the mappings.
1094  int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
1096  map->GetHeap(),
1097  object,
1101  map->GetHeap(),
1102  object,
1104  object_size);
1105 
1106  // Mark the backing hash table without pushing it on the marking stack.
1107  Object* table_object = weak_map->table();
1108  if (!table_object->IsHashTable()) return;
1109  ObjectHashTable* table = ObjectHashTable::cast(table_object);
1110  Object** table_slot =
1112  MarkBit table_mark = Marking::MarkBitFrom(table);
1113  collector->RecordSlot(table_slot, table_slot, table);
1114  if (!table_mark.Get()) collector->SetMark(table, table_mark);
1115  // Recording the map slot can be skipped, because maps are not compacted.
1116  collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1118  }
1119 
1120  private:
1121  template<int id>
1122  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1123 
1124  // Code flushing support.
1125 
1126  static const int kRegExpCodeThreshold = 5;
1127 
1128  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1129  JSRegExp* re,
1130  bool is_ascii) {
1131  // Make sure that the fixed array is in fact initialized on the RegExp.
1132  // We could potentially trigger a GC when initializing the RegExp.
1133  if (HeapObject::cast(re->data())->map()->instance_type() !=
1134  FIXED_ARRAY_TYPE) return;
1135 
1136  // Make sure this is a RegExp that actually contains code.
1137  if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1138 
1139  Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
1140  if (!code->IsSmi() &&
1141  HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1142  // Save a copy that can be reinstated if we need the code again.
1144  code,
1145  heap);
1146 
1147  // Saving a copy might create a pointer into compaction candidate
1148  // that was not observed by marker. This might happen if JSRegExp data
1149  // was marked through the compilation cache before marker reached JSRegExp
1150  // object.
1151  FixedArray* data = FixedArray::cast(re->data());
1152  Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1153  heap->mark_compact_collector()->
1154  RecordSlot(slot, slot, code);
1155 
1156  // Set a number in the 0-255 range to guarantee no smi overflow.
1158  Smi::FromInt(heap->sweep_generation() & 0xff),
1159  heap);
1160  } else if (code->IsSmi()) {
1161  int value = Smi::cast(code)->value();
1162  // The regexp has not been compiled yet or there was a compilation error.
1163  if (value == JSRegExp::kUninitializedValue ||
1165  return;
1166  }
1167 
1168  // Check if we should flush now.
1169  if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1172  heap);
1175  heap);
1176  }
1177  }
1178  }
1179 
1180 
1181  // Works by setting the current sweep_generation (as a smi) in the
1182  // code object place in the data array of the RegExp and keeps a copy
1183  // around that can be reinstated if we reuse the RegExp before flushing.
1184  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1185  // we flush the code.
1186  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1187  Heap* heap = map->GetHeap();
1188  MarkCompactCollector* collector = heap->mark_compact_collector();
1189  if (!collector->is_code_flushing_enabled()) {
1190  VisitJSRegExp(map, object);
1191  return;
1192  }
1193  JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1194  // Flush code or set age on both ASCII and two byte code.
1195  UpdateRegExpCodeAgeAndFlush(heap, re, true);
1196  UpdateRegExpCodeAgeAndFlush(heap, re, false);
1197  // Visit the fields of the RegExp, including the updated FixedArray.
1198  VisitJSRegExp(map, object);
1199  }
1200 
1201  static VisitorDispatchTable<Callback> non_count_table_;
1202 };
1203 
1204 
1206  FixedArrayBase* fixed_array,
1207  FixedArraySubInstanceType fast_type,
1208  FixedArraySubInstanceType dictionary_type) {
1209  Heap* heap = fixed_array->map()->GetHeap();
1210  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1211  fixed_array->map() != heap->fixed_double_array_map() &&
1212  fixed_array != heap->empty_fixed_array()) {
1213  if (fixed_array->IsDictionary()) {
1215  dictionary_type,
1216  fixed_array->Size());
1217  } else {
1219  fast_type,
1220  fixed_array->Size());
1221  }
1222  }
1223 }
1224 
1225 
1228  Heap* heap = map->GetHeap();
1229  int object_size = obj->Size();
1230  heap->RecordObjectStats(map->instance_type(), -1, object_size);
1231  non_count_table_.GetVisitorById(id)(map, obj);
1232  if (obj->IsJSObject()) {
1233  JSObject* object = JSObject::cast(obj);
1234  ObjectStatsCountFixedArray(object->elements(),
1235  DICTIONARY_ELEMENTS_SUB_TYPE,
1236  FAST_ELEMENTS_SUB_TYPE);
1237  ObjectStatsCountFixedArray(object->properties(),
1238  DICTIONARY_PROPERTIES_SUB_TYPE,
1239  FAST_PROPERTIES_SUB_TYPE);
1240  }
1241 }
1242 
1243 
1244 template<MarkCompactMarkingVisitor::VisitorId id>
1246  Map* map, HeapObject* obj) {
1247  ObjectStatsVisitBase(id, map, obj);
1248 }
1249 
1250 
1251 template<>
1253  MarkCompactMarkingVisitor::kVisitMap> {
1254  public:
1255  static inline void Visit(Map* map, HeapObject* obj) {
1256  Heap* heap = map->GetHeap();
1257  Map* map_obj = Map::cast(obj);
1258  ASSERT(map->instance_type() == MAP_TYPE);
1259  DescriptorArray* array = map_obj->instance_descriptors();
1260  if (map_obj->owns_descriptors() &&
1261  array != heap->empty_descriptor_array()) {
1262  int fixed_array_size = array->Size();
1264  DESCRIPTOR_ARRAY_SUB_TYPE,
1265  fixed_array_size);
1266  }
1267  if (map_obj->HasTransitionArray()) {
1268  int fixed_array_size = map_obj->transitions()->Size();
1270  TRANSITION_ARRAY_SUB_TYPE,
1271  fixed_array_size);
1272  }
1273  if (map_obj->code_cache() != heap->empty_fixed_array()) {
1274  heap->RecordObjectStats(
1276  MAP_CODE_CACHE_SUB_TYPE,
1277  FixedArray::cast(map_obj->code_cache())->Size());
1278  }
1279  ObjectStatsVisitBase(kVisitMap, map, obj);
1280  }
1281 };
1282 
1283 
1284 template<>
1286  MarkCompactMarkingVisitor::kVisitCode> {
1287  public:
1288  static inline void Visit(Map* map, HeapObject* obj) {
1289  Heap* heap = map->GetHeap();
1290  int object_size = obj->Size();
1291  ASSERT(map->instance_type() == CODE_TYPE);
1292  heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
1293  ObjectStatsVisitBase(kVisitCode, map, obj);
1294  }
1295 };
1296 
1297 
1298 template<>
1300  MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1301  public:
1302  static inline void Visit(Map* map, HeapObject* obj) {
1303  Heap* heap = map->GetHeap();
1305  if (sfi->scope_info() != heap->empty_fixed_array()) {
1306  heap->RecordObjectStats(
1308  SCOPE_INFO_SUB_TYPE,
1309  FixedArray::cast(sfi->scope_info())->Size());
1310  }
1311  ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1312  }
1313 };
1314 
1315 
1316 template<>
1318  MarkCompactMarkingVisitor::kVisitFixedArray> {
1319  public:
1320  static inline void Visit(Map* map, HeapObject* obj) {
1321  Heap* heap = map->GetHeap();
1322  FixedArray* fixed_array = FixedArray::cast(obj);
1323  if (fixed_array == heap->symbol_table()) {
1324  heap->RecordObjectStats(
1326  SYMBOL_TABLE_SUB_TYPE,
1327  fixed_array->Size());
1328  }
1329  ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1330  }
1331 };
1332 
1333 
1336 
1337  table_.Register(kVisitJSRegExp,
1338  &VisitRegExpAndFlushCode);
1339 
1340  if (FLAG_track_gc_object_stats) {
1341  // Copy the visitor table to make call-through possible.
1342  non_count_table_.CopyFrom(&table_);
1343 #define VISITOR_ID_COUNT_FUNCTION(id) \
1344  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1346 #undef VISITOR_ID_COUNT_FUNCTION
1347  }
1348 }
1349 
1350 
1352  MarkCompactMarkingVisitor::non_count_table_;
1353 
1354 
1355 class MarkingVisitor : public ObjectVisitor {
1356  public:
1357  explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1358 
1359  void VisitPointer(Object** p) {
1360  MarkCompactMarkingVisitor::VisitPointer(heap_, p);
1361  }
1362 
1363  void VisitPointers(Object** start, Object** end) {
1364  MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
1365  }
1366 
1367  private:
1368  Heap* heap_;
1369 };
1370 
1371 
1373  public:
1375  : collector_(collector) {}
1376 
1377  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1378  collector_->PrepareThreadForCodeFlushing(isolate, top);
1379  }
1380 
1381  private:
1382  MarkCompactCollector* collector_;
1383 };
1384 
1385 
1386 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1387  public:
1389  : collector_(collector) {}
1390 
1391  void VisitPointers(Object** start, Object** end) {
1392  for (Object** p = start; p < end; p++) VisitPointer(p);
1393  }
1394 
1395  void VisitPointer(Object** slot) {
1396  Object* obj = *slot;
1397  if (obj->IsSharedFunctionInfo()) {
1398  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1399  MarkBit shared_mark = Marking::MarkBitFrom(shared);
1400  MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1401  collector_->MarkObject(shared->code(), code_mark);
1402  collector_->MarkObject(shared, shared_mark);
1403  }
1404  }
1405 
1406  private:
1407  MarkCompactCollector* collector_;
1408 };
1409 
1410 
1411 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1412  ThreadLocalTop* top) {
1413  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1414  // Note: for the frame that has a pending lazy deoptimization
1415  // StackFrame::unchecked_code will return a non-optimized code object for
1416  // the outermost function and StackFrame::LookupCode will return
1417  // actual optimized code object.
1418  StackFrame* frame = it.frame();
1419  Code* code = frame->unchecked_code();
1420  MarkBit code_mark = Marking::MarkBitFrom(code);
1421  MarkObject(code, code_mark);
1422  if (frame->is_optimized()) {
1424  frame->LookupCode());
1425  }
1426  }
1427 }
1428 
1429 
1430 void MarkCompactCollector::PrepareForCodeFlushing() {
1431  ASSERT(heap() == Isolate::Current()->heap());
1432 
1433  // TODO(1609) Currently incremental marker does not support code flushing.
1434  if (!FLAG_flush_code || was_marked_incrementally_) {
1435  EnableCodeFlushing(false);
1436  return;
1437  }
1438 
1439 #ifdef ENABLE_DEBUGGER_SUPPORT
1440  if (heap()->isolate()->debug()->IsLoaded() ||
1441  heap()->isolate()->debug()->has_break_points()) {
1442  EnableCodeFlushing(false);
1443  return;
1444  }
1445 #endif
1446 
1447  EnableCodeFlushing(true);
1448 
1449  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1450  // relies on it being marked before any other descriptor array.
1451  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1452  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1453  MarkObject(descriptor_array, descriptor_array_mark);
1454 
1455  // Make sure we are not referencing the code from the stack.
1456  ASSERT(this == heap()->mark_compact_collector());
1457  PrepareThreadForCodeFlushing(heap()->isolate(),
1458  heap()->isolate()->thread_local_top());
1459 
1460  // Iterate the archived stacks in all threads to check if
1461  // the code is referenced.
1462  CodeMarkingVisitor code_marking_visitor(this);
1464  &code_marking_visitor);
1465 
1466  SharedFunctionInfoMarkingVisitor visitor(this);
1467  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1468  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1469 
1470  ProcessMarkingDeque();
1471 }
1472 
1473 
1474 // Visitor class for marking heap roots.
1475 class RootMarkingVisitor : public ObjectVisitor {
1476  public:
1477  explicit RootMarkingVisitor(Heap* heap)
1478  : collector_(heap->mark_compact_collector()) { }
1479 
1480  void VisitPointer(Object** p) {
1481  MarkObjectByPointer(p);
1482  }
1483 
1484  void VisitPointers(Object** start, Object** end) {
1485  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1486  }
1487 
1488  private:
1489  void MarkObjectByPointer(Object** p) {
1490  if (!(*p)->IsHeapObject()) return;
1491 
1492  // Replace flat cons strings in place.
1493  HeapObject* object = ShortCircuitConsString(p);
1494  MarkBit mark_bit = Marking::MarkBitFrom(object);
1495  if (mark_bit.Get()) return;
1496 
1497  Map* map = object->map();
1498  // Mark the object.
1499  collector_->SetMark(object, mark_bit);
1500 
1501  // Mark the map pointer and body, and push them on the marking stack.
1502  MarkBit map_mark = Marking::MarkBitFrom(map);
1503  collector_->MarkObject(map, map_mark);
1505 
1506  // Mark all the objects reachable from the map and body. May leave
1507  // overflowed objects in the heap.
1508  collector_->EmptyMarkingDeque();
1509  }
1510 
1511  MarkCompactCollector* collector_;
1512 };
1513 
1514 
1515 // Helper class for pruning the symbol table.
1516 class SymbolTableCleaner : public ObjectVisitor {
1517  public:
1518  explicit SymbolTableCleaner(Heap* heap)
1519  : heap_(heap), pointers_removed_(0) { }
1520 
1521  virtual void VisitPointers(Object** start, Object** end) {
1522  // Visit all HeapObject pointers in [start, end).
1523  for (Object** p = start; p < end; p++) {
1524  Object* o = *p;
1525  if (o->IsHeapObject() &&
1527  // Check if the symbol being pruned is an external symbol. We need to
1528  // delete the associated external data as this symbol is going away.
1529 
1530  // Since no objects have yet been moved we can safely access the map of
1531  // the object.
1532  if (o->IsExternalString()) {
1534  }
1535  // Set the entry to the_hole_value (as deleted).
1536  *p = heap_->the_hole_value();
1537  pointers_removed_++;
1538  }
1539  }
1540  }
1541 
1543  return pointers_removed_;
1544  }
1545 
1546  private:
1547  Heap* heap_;
1548  int pointers_removed_;
1549 };
1550 
1551 
1552 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1553 // are retained.
1555  public:
1556  virtual Object* RetainAs(Object* object) {
1557  if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1558  return object;
1559  } else {
1560  return NULL;
1561  }
1562  }
1563 };
1564 
1565 
1566 // Fill the marking stack with overflowed objects returned by the given
1567 // iterator. Stop when the marking stack is filled or the end of the space
1568 // is reached, whichever comes first.
1569 template<class T>
1570 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1571  MarkingDeque* marking_deque,
1572  T* it) {
1573  // The caller should ensure that the marking stack is initially not full,
1574  // so that we don't waste effort pointlessly scanning for objects.
1575  ASSERT(!marking_deque->IsFull());
1576 
1577  Map* filler_map = heap->one_pointer_filler_map();
1578  for (HeapObject* object = it->Next();
1579  object != NULL;
1580  object = it->Next()) {
1581  MarkBit markbit = Marking::MarkBitFrom(object);
1582  if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1583  Marking::GreyToBlack(markbit);
1584  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1585  marking_deque->PushBlack(object);
1586  if (marking_deque->IsFull()) return;
1587  }
1588  }
1589 }
1590 
1591 
1592 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1593 
1594 
1595 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1596  ASSERT(!marking_deque->IsFull());
1597  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1598  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1599  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1600  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1601 
1602  MarkBit::CellType* cells = p->markbits()->cells();
1603 
1604  int last_cell_index =
1605  Bitmap::IndexToCell(
1606  Bitmap::CellAlignIndex(
1607  p->AddressToMarkbitIndex(p->area_end())));
1608 
1609  Address cell_base = p->area_start();
1610  int cell_index = Bitmap::IndexToCell(
1611  Bitmap::CellAlignIndex(
1612  p->AddressToMarkbitIndex(cell_base)));
1613 
1614 
1615  for (;
1616  cell_index < last_cell_index;
1617  cell_index++, cell_base += 32 * kPointerSize) {
1618  ASSERT((unsigned)cell_index ==
1619  Bitmap::IndexToCell(
1620  Bitmap::CellAlignIndex(
1621  p->AddressToMarkbitIndex(cell_base))));
1622 
1623  const MarkBit::CellType current_cell = cells[cell_index];
1624  if (current_cell == 0) continue;
1625 
1626  const MarkBit::CellType next_cell = cells[cell_index + 1];
1627  MarkBit::CellType grey_objects = current_cell &
1628  ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1629 
1630  int offset = 0;
1631  while (grey_objects != 0) {
1632  int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1633  grey_objects >>= trailing_zeros;
1634  offset += trailing_zeros;
1635  MarkBit markbit(&cells[cell_index], 1 << offset, false);
1636  ASSERT(Marking::IsGrey(markbit));
1637  Marking::GreyToBlack(markbit);
1638  Address addr = cell_base + offset * kPointerSize;
1639  HeapObject* object = HeapObject::FromAddress(addr);
1640  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1641  marking_deque->PushBlack(object);
1642  if (marking_deque->IsFull()) return;
1643  offset += 2;
1644  grey_objects >>= 2;
1645  }
1646 
1647  grey_objects >>= (Bitmap::kBitsPerCell - 1);
1648  }
1649 }
1650 
1651 
1652 static void DiscoverGreyObjectsInSpace(Heap* heap,
1653  MarkingDeque* marking_deque,
1654  PagedSpace* space) {
1655  if (!space->was_swept_conservatively()) {
1656  HeapObjectIterator it(space);
1657  DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
1658  } else {
1659  PageIterator it(space);
1660  while (it.has_next()) {
1661  Page* p = it.next();
1662  DiscoverGreyObjectsOnPage(marking_deque, p);
1663  if (marking_deque->IsFull()) return;
1664  }
1665  }
1666 }
1667 
1668 
1669 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1670  Object* o = *p;
1671  if (!o->IsHeapObject()) return false;
1672  HeapObject* heap_object = HeapObject::cast(o);
1673  MarkBit mark = Marking::MarkBitFrom(heap_object);
1674  return !mark.Get();
1675 }
1676 
1677 
1678 void MarkCompactCollector::MarkSymbolTable() {
1679  SymbolTable* symbol_table = heap()->symbol_table();
1680  // Mark the symbol table itself.
1681  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
1682  SetMark(symbol_table, symbol_table_mark);
1683  // Explicitly mark the prefix.
1684  MarkingVisitor marker(heap());
1685  symbol_table->IteratePrefix(&marker);
1686  ProcessMarkingDeque();
1687 }
1688 
1689 
1690 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1691  // Mark the heap roots including global variables, stack variables,
1692  // etc., and all objects reachable from them.
1694 
1695  // Handle the symbol table specially.
1696  MarkSymbolTable();
1697 
1698  // There may be overflowed objects in the heap. Visit them now.
1699  while (marking_deque_.overflowed()) {
1700  RefillMarkingDeque();
1701  EmptyMarkingDeque();
1702  }
1703 }
1704 
1705 
1706 void MarkCompactCollector::MarkObjectGroups() {
1707  List<ObjectGroup*>* object_groups =
1709 
1710  int last = 0;
1711  for (int i = 0; i < object_groups->length(); i++) {
1712  ObjectGroup* entry = object_groups->at(i);
1713  ASSERT(entry != NULL);
1714 
1715  Object*** objects = entry->objects_;
1716  bool group_marked = false;
1717  for (size_t j = 0; j < entry->length_; j++) {
1718  Object* object = *objects[j];
1719  if (object->IsHeapObject()) {
1720  HeapObject* heap_object = HeapObject::cast(object);
1721  MarkBit mark = Marking::MarkBitFrom(heap_object);
1722  if (mark.Get()) {
1723  group_marked = true;
1724  break;
1725  }
1726  }
1727  }
1728 
1729  if (!group_marked) {
1730  (*object_groups)[last++] = entry;
1731  continue;
1732  }
1733 
1734  // An object in the group is marked, so mark as grey all white heap
1735  // objects in the group.
1736  for (size_t j = 0; j < entry->length_; ++j) {
1737  Object* object = *objects[j];
1738  if (object->IsHeapObject()) {
1739  HeapObject* heap_object = HeapObject::cast(object);
1740  MarkBit mark = Marking::MarkBitFrom(heap_object);
1741  MarkObject(heap_object, mark);
1742  }
1743  }
1744 
1745  // Once the entire group has been colored grey, set the object group
1746  // to NULL so it won't be processed again.
1747  entry->Dispose();
1748  object_groups->at(i) = NULL;
1749  }
1750  object_groups->Rewind(last);
1751 }
1752 
1753 
1754 void MarkCompactCollector::MarkImplicitRefGroups() {
1755  List<ImplicitRefGroup*>* ref_groups =
1757 
1758  int last = 0;
1759  for (int i = 0; i < ref_groups->length(); i++) {
1760  ImplicitRefGroup* entry = ref_groups->at(i);
1761  ASSERT(entry != NULL);
1762 
1763  if (!IsMarked(*entry->parent_)) {
1764  (*ref_groups)[last++] = entry;
1765  continue;
1766  }
1767 
1768  Object*** children = entry->children_;
1769  // A parent object is marked, so mark all child heap objects.
1770  for (size_t j = 0; j < entry->length_; ++j) {
1771  if ((*children[j])->IsHeapObject()) {
1772  HeapObject* child = HeapObject::cast(*children[j]);
1773  MarkBit mark = Marking::MarkBitFrom(child);
1774  MarkObject(child, mark);
1775  }
1776  }
1777 
1778  // Once the entire group has been marked, dispose it because it's
1779  // not needed anymore.
1780  entry->Dispose();
1781  }
1782  ref_groups->Rewind(last);
1783 }
1784 
1785 
1786 // Mark all objects reachable from the objects on the marking stack.
1787 // Before: the marking stack contains zero or more heap object pointers.
1788 // After: the marking stack is empty, and all objects reachable from the
1789 // marking stack have been marked, or are overflowed in the heap.
1790 void MarkCompactCollector::EmptyMarkingDeque() {
1791  while (!marking_deque_.IsEmpty()) {
1792  while (!marking_deque_.IsEmpty()) {
1793  HeapObject* object = marking_deque_.Pop();
1794  ASSERT(object->IsHeapObject());
1795  ASSERT(heap()->Contains(object));
1797 
1798  Map* map = object->map();
1799  MarkBit map_mark = Marking::MarkBitFrom(map);
1800  MarkObject(map, map_mark);
1801 
1803  }
1804 
1805  // Process encountered weak maps, mark objects only reachable by those
1806  // weak maps and repeat until fix-point is reached.
1807  ProcessWeakMaps();
1808  }
1809 }
1810 
1811 
1812 // Sweep the heap for overflowed objects, clear their overflow bits, and
1813 // push them on the marking stack. Stop early if the marking stack fills
1814 // before sweeping completes. If sweeping completes, there are no remaining
1815 // overflowed objects in the heap so the overflow flag on the markings stack
1816 // is cleared.
1817 void MarkCompactCollector::RefillMarkingDeque() {
1818  ASSERT(marking_deque_.overflowed());
1819 
1820  SemiSpaceIterator new_it(heap()->new_space());
1821  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
1822  if (marking_deque_.IsFull()) return;
1823 
1824  DiscoverGreyObjectsInSpace(heap(),
1825  &marking_deque_,
1826  heap()->old_pointer_space());
1827  if (marking_deque_.IsFull()) return;
1828 
1829  DiscoverGreyObjectsInSpace(heap(),
1830  &marking_deque_,
1831  heap()->old_data_space());
1832  if (marking_deque_.IsFull()) return;
1833 
1834  DiscoverGreyObjectsInSpace(heap(),
1835  &marking_deque_,
1836  heap()->code_space());
1837  if (marking_deque_.IsFull()) return;
1838 
1839  DiscoverGreyObjectsInSpace(heap(),
1840  &marking_deque_,
1841  heap()->map_space());
1842  if (marking_deque_.IsFull()) return;
1843 
1844  DiscoverGreyObjectsInSpace(heap(),
1845  &marking_deque_,
1846  heap()->cell_space());
1847  if (marking_deque_.IsFull()) return;
1848 
1849  LargeObjectIterator lo_it(heap()->lo_space());
1850  DiscoverGreyObjectsWithIterator(heap(),
1851  &marking_deque_,
1852  &lo_it);
1853  if (marking_deque_.IsFull()) return;
1854 
1855  marking_deque_.ClearOverflowed();
1856 }
1857 
1858 
1859 // Mark all objects reachable (transitively) from objects on the marking
1860 // stack. Before: the marking stack contains zero or more heap object
1861 // pointers. After: the marking stack is empty and there are no overflowed
1862 // objects in the heap.
1863 void MarkCompactCollector::ProcessMarkingDeque() {
1864  EmptyMarkingDeque();
1865  while (marking_deque_.overflowed()) {
1866  RefillMarkingDeque();
1867  EmptyMarkingDeque();
1868  }
1869 }
1870 
1871 
1872 void MarkCompactCollector::ProcessExternalMarking() {
1873  bool work_to_do = true;
1874  ASSERT(marking_deque_.IsEmpty());
1875  while (work_to_do) {
1876  MarkObjectGroups();
1877  MarkImplicitRefGroups();
1878  work_to_do = !marking_deque_.IsEmpty();
1879  ProcessMarkingDeque();
1880  }
1881 }
1882 
1883 
1884 void MarkCompactCollector::MarkLiveObjects() {
1885  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1886  // The recursive GC marker detects when it is nearing stack overflow,
1887  // and switches to a different marking system. JS interrupts interfere
1888  // with the C stack limit check.
1889  PostponeInterruptsScope postpone(heap()->isolate());
1890 
1891  bool incremental_marking_overflowed = false;
1892  IncrementalMarking* incremental_marking = heap_->incremental_marking();
1893  if (was_marked_incrementally_) {
1894  // Finalize the incremental marking and check whether we had an overflow.
1895  // Both markers use grey color to mark overflowed objects so
1896  // non-incremental marker can deal with them as if overflow
1897  // occured during normal marking.
1898  // But incremental marker uses a separate marking deque
1899  // so we have to explicitly copy its overflow state.
1900  incremental_marking->Finalize();
1901  incremental_marking_overflowed =
1902  incremental_marking->marking_deque()->overflowed();
1903  incremental_marking->marking_deque()->ClearOverflowed();
1904  } else {
1905  // Abort any pending incremental activities e.g. incremental sweeping.
1906  incremental_marking->Abort();
1907  }
1908 
1909 #ifdef DEBUG
1910  ASSERT(state_ == PREPARE_GC);
1911  state_ = MARK_LIVE_OBJECTS;
1912 #endif
1913  // The to space contains live objects, a page in from space is used as a
1914  // marking stack.
1915  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
1916  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
1917  if (FLAG_force_marking_deque_overflows) {
1918  marking_deque_end = marking_deque_start + 64 * kPointerSize;
1919  }
1920  marking_deque_.Initialize(marking_deque_start,
1921  marking_deque_end);
1922  ASSERT(!marking_deque_.overflowed());
1923 
1924  if (incremental_marking_overflowed) {
1925  // There are overflowed objects left in the heap after incremental marking.
1926  marking_deque_.SetOverflowed();
1927  }
1928 
1929  PrepareForCodeFlushing();
1930 
1931  if (was_marked_incrementally_) {
1932  // There is no write barrier on cells so we have to scan them now at the end
1933  // of the incremental marking.
1934  {
1935  HeapObjectIterator cell_iterator(heap()->cell_space());
1936  HeapObject* cell;
1937  while ((cell = cell_iterator.Next()) != NULL) {
1938  ASSERT(cell->IsJSGlobalPropertyCell());
1939  if (IsMarked(cell)) {
1941  MarkCompactMarkingVisitor::VisitPointer(
1942  heap(),
1943  reinterpret_cast<Object**>(cell->address() + offset));
1944  }
1945  }
1946  }
1947  }
1948 
1949  RootMarkingVisitor root_visitor(heap());
1950  MarkRoots(&root_visitor);
1951 
1952  // The objects reachable from the roots are marked, yet unreachable
1953  // objects are unmarked. Mark objects reachable due to host
1954  // application specific logic.
1955  ProcessExternalMarking();
1956 
1957  // The objects reachable from the roots or object groups are marked,
1958  // yet unreachable objects are unmarked. Mark objects reachable
1959  // only from weak global handles.
1960  //
1961  // First we identify nonlive weak handles and mark them as pending
1962  // destruction.
1964  &IsUnmarkedHeapObject);
1965  // Then we mark the objects and process the transitive closure.
1966  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
1967  while (marking_deque_.overflowed()) {
1968  RefillMarkingDeque();
1969  EmptyMarkingDeque();
1970  }
1971 
1972  // Repeat host application specific marking to mark unmarked objects
1973  // reachable from the weak roots.
1974  ProcessExternalMarking();
1975 
1976  AfterMarking();
1977 }
1978 
1979 
1980 void MarkCompactCollector::AfterMarking() {
1981  // Object literal map caches reference symbols (cache keys) and maps
1982  // (cache values). At this point still useful maps have already been
1983  // marked. Mark the keys for the alive values before we process the
1984  // symbol table.
1985  ProcessMapCaches();
1986 
1987  // Prune the symbol table removing all symbols only pointed to by the
1988  // symbol table. Cannot use symbol_table() here because the symbol
1989  // table is marked.
1990  SymbolTable* symbol_table = heap()->symbol_table();
1991  SymbolTableCleaner v(heap());
1992  symbol_table->IterateElements(&v);
1993  symbol_table->ElementsRemoved(v.PointersRemoved());
1994  heap()->external_string_table_.Iterate(&v);
1995  heap()->external_string_table_.CleanUp();
1996 
1997  // Process the weak references.
1998  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1999  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2000 
2001  // Remove object groups after marking phase.
2004 
2005  // Flush code from collected candidates.
2006  if (is_code_flushing_enabled()) {
2007  code_flusher_->ProcessCandidates();
2008  // TODO(1609) Currently incremental marker does not support code flushing,
2009  // we need to disable it before incremental marking steps for next cycle.
2010  EnableCodeFlushing(false);
2011  }
2012 
2013  if (!FLAG_watch_ic_patching) {
2014  // Clean up dead objects from the runtime profiler.
2016  }
2017 
2018  if (FLAG_track_gc_object_stats) {
2020  }
2021 }
2022 
2023 
2024 void MarkCompactCollector::ProcessMapCaches() {
2025  Object* raw_context = heap()->native_contexts_list_;
2026  while (raw_context != heap()->undefined_value()) {
2027  Context* context = reinterpret_cast<Context*>(raw_context);
2028  if (IsMarked(context)) {
2029  HeapObject* raw_map_cache =
2031  // A map cache may be reachable from the stack. In this case
2032  // it's already transitively marked and it's too late to clean
2033  // up its parts.
2034  if (!IsMarked(raw_map_cache) &&
2035  raw_map_cache != heap()->undefined_value()) {
2036  MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2037  int existing_elements = map_cache->NumberOfElements();
2038  int used_elements = 0;
2039  for (int i = MapCache::kElementsStartIndex;
2040  i < map_cache->length();
2041  i += MapCache::kEntrySize) {
2042  Object* raw_key = map_cache->get(i);
2043  if (raw_key == heap()->undefined_value() ||
2044  raw_key == heap()->the_hole_value()) continue;
2046  Object* raw_map = map_cache->get(i + 1);
2047  if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2048  ++used_elements;
2049  } else {
2050  // Delete useless entries with unmarked maps.
2051  ASSERT(raw_map->IsMap());
2052  map_cache->set_the_hole(i);
2053  map_cache->set_the_hole(i + 1);
2054  }
2055  }
2056  if (used_elements == 0) {
2057  context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2058  } else {
2059  // Note: we don't actually shrink the cache here to avoid
2060  // extra complexity during GC. We rely on subsequent cache
2061  // usages (EnsureCapacity) to do this.
2062  map_cache->ElementsRemoved(existing_elements - used_elements);
2063  MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2064  MarkObject(map_cache, map_cache_markbit);
2065  }
2066  }
2067  }
2068  // Move to next element in the list.
2069  raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2070  }
2071  ProcessMarkingDeque();
2072 }
2073 
2074 
2075 void MarkCompactCollector::ReattachInitialMaps() {
2076  HeapObjectIterator map_iterator(heap()->map_space());
2077  for (HeapObject* obj = map_iterator.Next();
2078  obj != NULL;
2079  obj = map_iterator.Next()) {
2080  if (obj->IsFreeSpace()) continue;
2081  Map* map = Map::cast(obj);
2082 
2084  if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2085 
2086  if (map->attached_to_shared_function_info()) {
2087  JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2088  }
2089  }
2090 }
2091 
2092 
2093 void MarkCompactCollector::ClearNonLiveTransitions() {
2094  HeapObjectIterator map_iterator(heap()->map_space());
2095  // Iterate over the map space, setting map transitions that go from
2096  // a marked map to an unmarked map to null transitions. This action
2097  // is carried out only on maps of JSObjects and related subtypes.
2098  for (HeapObject* obj = map_iterator.Next();
2099  obj != NULL; obj = map_iterator.Next()) {
2100  Map* map = reinterpret_cast<Map*>(obj);
2101  MarkBit map_mark = Marking::MarkBitFrom(map);
2102  if (map->IsFreeSpace()) continue;
2103 
2104  ASSERT(map->IsMap());
2105  // Only JSObject and subtypes have map transitions and back pointers.
2107  if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
2108 
2109  if (map_mark.Get() &&
2110  map->attached_to_shared_function_info()) {
2111  // This map is used for inobject slack tracking and has been detached
2112  // from SharedFunctionInfo during the mark phase.
2113  // Since it survived the GC, reattach it now.
2114  map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2115  }
2116 
2117  ClearNonLivePrototypeTransitions(map);
2118  ClearNonLiveMapTransitions(map, map_mark);
2119  }
2120 }
2121 
2122 
2123 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2124  int number_of_transitions = map->NumberOfProtoTransitions();
2125  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2126 
2127  int new_number_of_transitions = 0;
2128  const int header = Map::kProtoTransitionHeaderSize;
2129  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2130  const int map_offset = header + Map::kProtoTransitionMapOffset;
2131  const int step = Map::kProtoTransitionElementsPerEntry;
2132  for (int i = 0; i < number_of_transitions; i++) {
2133  Object* prototype = prototype_transitions->get(proto_offset + i * step);
2134  Object* cached_map = prototype_transitions->get(map_offset + i * step);
2135  if (IsMarked(prototype) && IsMarked(cached_map)) {
2136  int proto_index = proto_offset + new_number_of_transitions * step;
2137  int map_index = map_offset + new_number_of_transitions * step;
2138  if (new_number_of_transitions != i) {
2139  prototype_transitions->set_unchecked(
2140  heap_,
2141  proto_index,
2142  prototype,
2144  prototype_transitions->set_unchecked(
2145  heap_,
2146  map_index,
2147  cached_map,
2149  }
2150  Object** slot =
2151  HeapObject::RawField(prototype_transitions,
2152  FixedArray::OffsetOfElementAt(proto_index));
2153  RecordSlot(slot, slot, prototype);
2154  new_number_of_transitions++;
2155  }
2156  }
2157 
2158  if (new_number_of_transitions != number_of_transitions) {
2159  map->SetNumberOfProtoTransitions(new_number_of_transitions);
2160  }
2161 
2162  // Fill slots that became free with undefined value.
2163  for (int i = new_number_of_transitions * step;
2164  i < number_of_transitions * step;
2165  i++) {
2166  prototype_transitions->set_undefined(heap_, header + i);
2167  }
2168 }
2169 
2170 
2171 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2172  MarkBit map_mark) {
2173  Object* potential_parent = map->GetBackPointer();
2174  if (!potential_parent->IsMap()) return;
2175  Map* parent = Map::cast(potential_parent);
2176 
2177  // Follow back pointer, check whether we are dealing with a map transition
2178  // from a live map to a dead path and in case clear transitions of parent.
2179  bool current_is_alive = map_mark.Get();
2180  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2181  if (!current_is_alive && parent_is_alive) {
2182  parent->ClearNonLiveTransitions(heap());
2183  }
2184 }
2185 
2186 
2187 void MarkCompactCollector::ProcessWeakMaps() {
2188  Object* weak_map_obj = encountered_weak_maps();
2189  while (weak_map_obj != Smi::FromInt(0)) {
2191  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2192  ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2193  Object** anchor = reinterpret_cast<Object**>(table->address());
2194  for (int i = 0; i < table->Capacity(); i++) {
2195  if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2196  Object** key_slot =
2199  RecordSlot(anchor, key_slot, *key_slot);
2200  Object** value_slot =
2202  ObjectHashTable::EntryToValueIndex(i)));
2203  MarkCompactMarkingVisitor::MarkObjectByPointer(
2204  this, anchor, value_slot);
2205  }
2206  }
2207  weak_map_obj = weak_map->next();
2208  }
2209 }
2210 
2211 
2212 void MarkCompactCollector::ClearWeakMaps() {
2213  Object* weak_map_obj = encountered_weak_maps();
2214  while (weak_map_obj != Smi::FromInt(0)) {
2216  JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2217  ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2218  for (int i = 0; i < table->Capacity(); i++) {
2219  if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2220  table->RemoveEntry(i);
2221  }
2222  }
2223  weak_map_obj = weak_map->next();
2224  weak_map->set_next(Smi::FromInt(0));
2225  }
2227 }
2228 
2229 
2230 // We scavange new space simultaneously with sweeping. This is done in two
2231 // passes.
2232 //
2233 // The first pass migrates all alive objects from one semispace to another or
2234 // promotes them to old space. Forwarding address is written directly into
2235 // first word of object without any encoding. If object is dead we write
2236 // NULL as a forwarding address.
2237 //
2238 // The second pass updates pointers to new space in all spaces. It is possible
2239 // to encounter pointers to dead new space objects during traversal of pointers
2240 // to new space. We should clear them to avoid encountering them during next
2241 // pointer iteration. This is an issue if the store buffer overflows and we
2242 // have to scan the entire old space, including dead objects, looking for
2243 // pointers to new space.
2245  Address src,
2246  int size,
2247  AllocationSpace dest) {
2248  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2249  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2250  Address src_slot = src;
2251  Address dst_slot = dst;
2252  ASSERT(IsAligned(size, kPointerSize));
2253 
2254  for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2255  Object* value = Memory::Object_at(src_slot);
2256 
2257  Memory::Object_at(dst_slot) = value;
2258 
2259  if (heap_->InNewSpace(value)) {
2260  heap_->store_buffer()->Mark(dst_slot);
2261  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2262  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2263  &migration_slots_buffer_,
2264  reinterpret_cast<Object**>(dst_slot),
2266  }
2267 
2268  src_slot += kPointerSize;
2269  dst_slot += kPointerSize;
2270  }
2271 
2272  if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2273  Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2274  Address code_entry = Memory::Address_at(code_entry_slot);
2275 
2276  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2277  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2278  &migration_slots_buffer_,
2280  code_entry_slot,
2282  }
2283  }
2284  } else if (dest == CODE_SPACE) {
2285  PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2286  heap()->MoveBlock(dst, src, size);
2287  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2288  &migration_slots_buffer_,
2290  dst,
2292  Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2293  } else {
2294  ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2295  heap()->MoveBlock(dst, src, size);
2296  }
2297  Memory::Address_at(src) = dst;
2298 }
2299 
2300 
2301 // Visitor for updating pointers from live objects in old spaces to new space.
2302 // It does not expect to encounter pointers to dead objects.
2303 class PointersUpdatingVisitor: public ObjectVisitor {
2304  public:
2305  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2306 
2307  void VisitPointer(Object** p) {
2308  UpdatePointer(p);
2309  }
2310 
2311  void VisitPointers(Object** start, Object** end) {
2312  for (Object** p = start; p < end; p++) UpdatePointer(p);
2313  }
2314 
2315  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2316  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2317  Object* target = rinfo->target_object();
2318  Object* old_target = target;
2319  VisitPointer(&target);
2320  // Avoid unnecessary changes that might unnecessary flush the instruction
2321  // cache.
2322  if (target != old_target) {
2323  rinfo->set_target_object(target);
2324  }
2325  }
2326 
2327  void VisitCodeTarget(RelocInfo* rinfo) {
2328  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2329  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2330  Object* old_target = target;
2331  VisitPointer(&target);
2332  if (target != old_target) {
2333  rinfo->set_target_address(Code::cast(target)->instruction_start());
2334  }
2335  }
2336 
2337  void VisitDebugTarget(RelocInfo* rinfo) {
2338  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2339  rinfo->IsPatchedReturnSequence()) ||
2340  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2341  rinfo->IsPatchedDebugBreakSlotSequence()));
2342  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2343  VisitPointer(&target);
2344  rinfo->set_call_address(Code::cast(target)->instruction_start());
2345  }
2346 
2347  static inline void UpdateSlot(Heap* heap, Object** slot) {
2348  Object* obj = *slot;
2349 
2350  if (!obj->IsHeapObject()) return;
2351 
2352  HeapObject* heap_obj = HeapObject::cast(obj);
2353 
2354  MapWord map_word = heap_obj->map_word();
2355  if (map_word.IsForwardingAddress()) {
2356  ASSERT(heap->InFromSpace(heap_obj) ||
2357  MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2358  HeapObject* target = map_word.ToForwardingAddress();
2359  *slot = target;
2360  ASSERT(!heap->InFromSpace(target) &&
2361  !MarkCompactCollector::IsOnEvacuationCandidate(target));
2362  }
2363  }
2364 
2365  private:
2366  inline void UpdatePointer(Object** p) {
2367  UpdateSlot(heap_, p);
2368  }
2369 
2370  Heap* heap_;
2371 };
2372 
2373 
2374 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2375  ASSERT(*p == object);
2376 
2377  Address old_addr = object->address();
2378 
2379  Address new_addr = Memory::Address_at(old_addr);
2380 
2381  // The new space sweep will overwrite the map word of dead objects
2382  // with NULL. In this case we do not need to transfer this entry to
2383  // the store buffer which we are rebuilding.
2384  if (new_addr != NULL) {
2385  *p = HeapObject::FromAddress(new_addr);
2386  } else {
2387  // We have to zap this pointer, because the store buffer may overflow later,
2388  // and then we have to scan the entire heap and we don't want to find
2389  // spurious newspace pointers in the old space.
2390  // TODO(mstarzinger): This was changed to a sentinel value to track down
2391  // rare crashes, change it back to Smi::FromInt(0) later.
2392  *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
2393  }
2394 }
2395 
2396 
2397 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2398  Object** p) {
2399  MapWord map_word = HeapObject::cast(*p)->map_word();
2400 
2401  if (map_word.IsForwardingAddress()) {
2402  return String::cast(map_word.ToForwardingAddress());
2403  }
2404 
2405  return String::cast(*p);
2406 }
2407 
2408 
2410  int object_size) {
2411  Object* result;
2412 
2413  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
2414  MaybeObject* maybe_result =
2415  heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2416  if (maybe_result->ToObject(&result)) {
2417  HeapObject* target = HeapObject::cast(result);
2418  MigrateObject(target->address(),
2419  object->address(),
2420  object_size,
2421  LO_SPACE);
2423  increment_promoted_objects_size(object_size);
2424  return true;
2425  }
2426  } else {
2427  OldSpace* target_space = heap()->TargetSpace(object);
2428 
2429  ASSERT(target_space == heap()->old_pointer_space() ||
2430  target_space == heap()->old_data_space());
2431  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2432  if (maybe_result->ToObject(&result)) {
2433  HeapObject* target = HeapObject::cast(result);
2434  MigrateObject(target->address(),
2435  object->address(),
2436  object_size,
2437  target_space->identity());
2439  increment_promoted_objects_size(object_size);
2440  return true;
2441  }
2442  }
2443 
2444  return false;
2445 }
2446 
2447 
2448 void MarkCompactCollector::EvacuateNewSpace() {
2449  // There are soft limits in the allocation code, designed trigger a mark
2450  // sweep collection by failing allocations. But since we are already in
2451  // a mark-sweep allocation, there is no sense in trying to trigger one.
2452  AlwaysAllocateScope scope;
2454 
2455  NewSpace* new_space = heap()->new_space();
2456 
2457  // Store allocation range before flipping semispaces.
2458  Address from_bottom = new_space->bottom();
2459  Address from_top = new_space->top();
2460 
2461  // Flip the semispaces. After flipping, to space is empty, from space has
2462  // live objects.
2463  new_space->Flip();
2464  new_space->ResetAllocationInfo();
2465 
2466  int survivors_size = 0;
2467 
2468  // First pass: traverse all objects in inactive semispace, remove marks,
2469  // migrate live objects and write forwarding addresses. This stage puts
2470  // new entries in the store buffer and may cause some pages to be marked
2471  // scan-on-scavenge.
2472  SemiSpaceIterator from_it(from_bottom, from_top);
2473  for (HeapObject* object = from_it.Next();
2474  object != NULL;
2475  object = from_it.Next()) {
2476  MarkBit mark_bit = Marking::MarkBitFrom(object);
2477  if (mark_bit.Get()) {
2478  mark_bit.Clear();
2479  // Don't bother decrementing live bytes count. We'll discard the
2480  // entire page at the end.
2481  int size = object->Size();
2482  survivors_size += size;
2483 
2484  // Aggressively promote young survivors to the old space.
2485  if (TryPromoteObject(object, size)) {
2486  continue;
2487  }
2488 
2489  // Promotion failed. Just migrate object to another semispace.
2490  MaybeObject* allocation = new_space->AllocateRaw(size);
2491  if (allocation->IsFailure()) {
2492  if (!new_space->AddFreshPage()) {
2493  // Shouldn't happen. We are sweeping linearly, and to-space
2494  // has the same number of pages as from-space, so there is
2495  // always room.
2496  UNREACHABLE();
2497  }
2498  allocation = new_space->AllocateRaw(size);
2499  ASSERT(!allocation->IsFailure());
2500  }
2501  Object* target = allocation->ToObjectUnchecked();
2502 
2503  MigrateObject(HeapObject::cast(target)->address(),
2504  object->address(),
2505  size,
2506  NEW_SPACE);
2507  } else {
2508  // Process the dead object before we write a NULL into its header.
2510 
2511  // Mark dead objects in the new space with null in their map field.
2512  Memory::Address_at(object->address()) = NULL;
2513  }
2514  }
2515 
2516  heap_->IncrementYoungSurvivorsCounter(survivors_size);
2517  new_space->set_age_mark(new_space->top());
2518 }
2519 
2520 
2521 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2522  AlwaysAllocateScope always_allocate;
2523  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2524  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2525  MarkBit::CellType* cells = p->markbits()->cells();
2526  p->MarkSweptPrecisely();
2527 
2528  int last_cell_index =
2529  Bitmap::IndexToCell(
2530  Bitmap::CellAlignIndex(
2531  p->AddressToMarkbitIndex(p->area_end())));
2532 
2533  Address cell_base = p->area_start();
2534  int cell_index = Bitmap::IndexToCell(
2535  Bitmap::CellAlignIndex(
2536  p->AddressToMarkbitIndex(cell_base)));
2537 
2538  int offsets[16];
2539 
2540  for (;
2541  cell_index < last_cell_index;
2542  cell_index++, cell_base += 32 * kPointerSize) {
2543  ASSERT((unsigned)cell_index ==
2544  Bitmap::IndexToCell(
2545  Bitmap::CellAlignIndex(
2546  p->AddressToMarkbitIndex(cell_base))));
2547  if (cells[cell_index] == 0) continue;
2548 
2549  int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2550  for (int i = 0; i < live_objects; i++) {
2551  Address object_addr = cell_base + offsets[i] * kPointerSize;
2552  HeapObject* object = HeapObject::FromAddress(object_addr);
2554 
2555  int size = object->Size();
2556 
2557  MaybeObject* target = space->AllocateRaw(size);
2558  if (target->IsFailure()) {
2559  // OS refused to give us memory.
2560  V8::FatalProcessOutOfMemory("Evacuation");
2561  return;
2562  }
2563 
2564  Object* target_object = target->ToObjectUnchecked();
2565 
2566  MigrateObject(HeapObject::cast(target_object)->address(),
2567  object_addr,
2568  size,
2569  space->identity());
2570  ASSERT(object->map_word().IsForwardingAddress());
2571  }
2572 
2573  // Clear marking bits for current cell.
2574  cells[cell_index] = 0;
2575  }
2576  p->ResetLiveBytes();
2577 }
2578 
2579 
2580 void MarkCompactCollector::EvacuatePages() {
2581  int npages = evacuation_candidates_.length();
2582  for (int i = 0; i < npages; i++) {
2583  Page* p = evacuation_candidates_[i];
2584  ASSERT(p->IsEvacuationCandidate() ||
2585  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2586  if (p->IsEvacuationCandidate()) {
2587  // During compaction we might have to request a new page.
2588  // Check that space still have room for that.
2589  if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2590  EvacuateLiveObjectsFromPage(p);
2591  } else {
2592  // Without room for expansion evacuation is not guaranteed to succeed.
2593  // Pessimistically abandon unevacuated pages.
2594  for (int j = i; j < npages; j++) {
2595  Page* page = evacuation_candidates_[j];
2596  slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2597  page->ClearEvacuationCandidate();
2598  page->SetFlag(Page::RESCAN_ON_EVACUATION);
2599  }
2600  return;
2601  }
2602  }
2603  }
2604 }
2605 
2606 
2608  public:
2609  virtual Object* RetainAs(Object* object) {
2610  if (object->IsHeapObject()) {
2611  HeapObject* heap_object = HeapObject::cast(object);
2612  MapWord map_word = heap_object->map_word();
2613  if (map_word.IsForwardingAddress()) {
2614  return map_word.ToForwardingAddress();
2615  }
2616  }
2617  return object;
2618  }
2619 };
2620 
2621 
2622 static inline void UpdateSlot(ObjectVisitor* v,
2623  SlotsBuffer::SlotType slot_type,
2624  Address addr) {
2625  switch (slot_type) {
2627  RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
2628  rinfo.Visit(v);
2629  break;
2630  }
2632  v->VisitCodeEntry(addr);
2633  break;
2634  }
2636  HeapObject* obj = HeapObject::FromAddress(addr);
2637  Code::cast(obj)->CodeIterateBody(v);
2638  break;
2639  }
2641  RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
2642  if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2643  break;
2644  }
2646  RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
2647  if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
2648  break;
2649  }
2651  RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2652  rinfo.Visit(v);
2653  break;
2654  }
2655  default:
2656  UNREACHABLE();
2657  break;
2658  }
2659 }
2660 
2661 
2665 };
2666 
2667 
2671 };
2672 
2673 
2674 // Sweep a space precisely. After this has been done the space can
2675 // be iterated precisely, hitting only the live objects. Code space
2676 // is always swept precisely because we want to be able to iterate
2677 // over it. Map space is swept precisely, because it is not compacted.
2678 // Slots in live objects pointing into evacuation candidates are updated
2679 // if requested.
2680 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
2681 static void SweepPrecisely(PagedSpace* space,
2682  Page* p,
2683  ObjectVisitor* v) {
2684  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
2685  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
2686  space->identity() == CODE_SPACE);
2687  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
2688 
2689  MarkBit::CellType* cells = p->markbits()->cells();
2690  p->MarkSweptPrecisely();
2691 
2692  int last_cell_index =
2693  Bitmap::IndexToCell(
2694  Bitmap::CellAlignIndex(
2695  p->AddressToMarkbitIndex(p->area_end())));
2696 
2697  Address free_start = p->area_start();
2698  int cell_index =
2699  Bitmap::IndexToCell(
2700  Bitmap::CellAlignIndex(
2701  p->AddressToMarkbitIndex(free_start)));
2702 
2703  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
2704  Address object_address = free_start;
2705  int offsets[16];
2706 
2707  SkipList* skip_list = p->skip_list();
2708  int curr_region = -1;
2709  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
2710  skip_list->Clear();
2711  }
2712 
2713  for (;
2714  cell_index < last_cell_index;
2715  cell_index++, object_address += 32 * kPointerSize) {
2716  ASSERT((unsigned)cell_index ==
2717  Bitmap::IndexToCell(
2718  Bitmap::CellAlignIndex(
2719  p->AddressToMarkbitIndex(object_address))));
2720  int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2721  int live_index = 0;
2722  for ( ; live_objects != 0; live_objects--) {
2723  Address free_end = object_address + offsets[live_index++] * kPointerSize;
2724  if (free_end != free_start) {
2725  space->Free(free_start, static_cast<int>(free_end - free_start));
2726  }
2727  HeapObject* live_object = HeapObject::FromAddress(free_end);
2729  Map* map = live_object->map();
2730  int size = live_object->SizeFromMap(map);
2731  if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
2732  live_object->IterateBody(map->instance_type(), size, v);
2733  }
2734  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
2735  int new_region_start =
2736  SkipList::RegionNumber(free_end);
2737  int new_region_end =
2738  SkipList::RegionNumber(free_end + size - kPointerSize);
2739  if (new_region_start != curr_region ||
2740  new_region_end != curr_region) {
2741  skip_list->AddObject(free_end, size);
2742  curr_region = new_region_end;
2743  }
2744  }
2745  free_start = free_end + size;
2746  }
2747  // Clear marking bits for current cell.
2748  cells[cell_index] = 0;
2749  }
2750  if (free_start != p->area_end()) {
2751  space->Free(free_start, static_cast<int>(p->area_end() - free_start));
2752  }
2753  p->ResetLiveBytes();
2754 }
2755 
2756 
2757 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
2758  Page* p = Page::FromAddress(code->address());
2759 
2760  if (p->IsEvacuationCandidate() ||
2761  p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2762  return false;
2763  }
2764 
2765  Address code_start = code->address();
2766  Address code_end = code_start + code->Size();
2767 
2768  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
2769  uint32_t end_index =
2770  MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
2771 
2772  Bitmap* b = p->markbits();
2773 
2774  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
2775  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
2776 
2777  MarkBit::CellType* start_cell = start_mark_bit.cell();
2778  MarkBit::CellType* end_cell = end_mark_bit.cell();
2779 
2780  if (value) {
2781  MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
2782  MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
2783 
2784  if (start_cell == end_cell) {
2785  *start_cell |= start_mask & end_mask;
2786  } else {
2787  *start_cell |= start_mask;
2788  for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
2789  *cell = ~0;
2790  }
2791  *end_cell |= end_mask;
2792  }
2793  } else {
2794  for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
2795  *cell = 0;
2796  }
2797  }
2798 
2799  return true;
2800 }
2801 
2802 
2803 static bool IsOnInvalidatedCodeObject(Address addr) {
2804  // We did not record any slots in large objects thus
2805  // we can safely go to the page from the slot address.
2806  Page* p = Page::FromAddress(addr);
2807 
2808  // First check owner's identity because old pointer and old data spaces
2809  // are swept lazily and might still have non-zero mark-bits on some
2810  // pages.
2811  if (p->owner()->identity() != CODE_SPACE) return false;
2812 
2813  // In code space only bits on evacuation candidates (but we don't record
2814  // any slots on them) and under invalidated code objects are non-zero.
2815  MarkBit mark_bit =
2816  p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
2817 
2818  return mark_bit.Get();
2819 }
2820 
2821 
2823  if (heap_->incremental_marking()->IsCompacting() &&
2824  !ShouldSkipEvacuationSlotRecording(code)) {
2825  ASSERT(compacting_);
2826 
2827  // If the object is white than no slots were recorded on it yet.
2828  MarkBit mark_bit = Marking::MarkBitFrom(code);
2829  if (Marking::IsWhite(mark_bit)) return;
2830 
2831  invalidated_code_.Add(code);
2832  }
2833 }
2834 
2835 
2836 bool MarkCompactCollector::MarkInvalidatedCode() {
2837  bool code_marked = false;
2838 
2839  int length = invalidated_code_.length();
2840  for (int i = 0; i < length; i++) {
2841  Code* code = invalidated_code_[i];
2842 
2843  if (SetMarkBitsUnderInvalidatedCode(code, true)) {
2844  code_marked = true;
2845  }
2846  }
2847 
2848  return code_marked;
2849 }
2850 
2851 
2852 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
2853  int length = invalidated_code_.length();
2854  for (int i = 0; i < length; i++) {
2855  if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
2856  }
2857 }
2858 
2859 
2860 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
2861  int length = invalidated_code_.length();
2862  for (int i = 0; i < length; i++) {
2863  Code* code = invalidated_code_[i];
2864  if (code != NULL) {
2865  code->Iterate(visitor);
2866  SetMarkBitsUnderInvalidatedCode(code, false);
2867  }
2868  }
2869  invalidated_code_.Rewind(0);
2870 }
2871 
2872 
2873 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2874  Heap::RelocationLock relocation_lock(heap());
2875 
2876  bool code_slots_filtering_required;
2877  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2878  code_slots_filtering_required = MarkInvalidatedCode();
2879 
2880  EvacuateNewSpace();
2881  }
2882 
2883 
2884  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
2885  EvacuatePages();
2886  }
2887 
2888  // Second pass: find pointers to new space and update them.
2889  PointersUpdatingVisitor updating_visitor(heap());
2890 
2891  { GCTracer::Scope gc_scope(tracer_,
2892  GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
2893  // Update pointers in to space.
2894  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2895  heap()->new_space()->top());
2896  for (HeapObject* object = to_it.Next();
2897  object != NULL;
2898  object = to_it.Next()) {
2899  Map* map = object->map();
2900  object->IterateBody(map->instance_type(),
2901  object->SizeFromMap(map),
2902  &updating_visitor);
2903  }
2904  }
2905 
2906  { GCTracer::Scope gc_scope(tracer_,
2907  GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
2908  // Update roots.
2909  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2910  LiveObjectList::IterateElements(&updating_visitor);
2911  }
2912 
2913  { GCTracer::Scope gc_scope(tracer_,
2914  GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
2915  StoreBufferRebuildScope scope(heap_,
2916  heap_->store_buffer(),
2917  &Heap::ScavengeStoreBufferCallback);
2918  heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2919  }
2920 
2921  { GCTracer::Scope gc_scope(tracer_,
2922  GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
2924  migration_slots_buffer_,
2925  code_slots_filtering_required);
2926  if (FLAG_trace_fragmentation) {
2927  PrintF(" migration slots buffer: %d\n",
2928  SlotsBuffer::SizeOfChain(migration_slots_buffer_));
2929  }
2930 
2931  if (compacting_ && was_marked_incrementally_) {
2932  // It's difficult to filter out slots recorded for large objects.
2933  LargeObjectIterator it(heap_->lo_space());
2934  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2935  // LargeObjectSpace is not swept yet thus we have to skip
2936  // dead objects explicitly.
2937  if (!IsMarked(obj)) continue;
2938 
2939  Page* p = Page::FromAddress(obj->address());
2940  if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2941  obj->Iterate(&updating_visitor);
2943  }
2944  }
2945  }
2946  }
2947 
2948  int npages = evacuation_candidates_.length();
2949  { GCTracer::Scope gc_scope(
2950  tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
2951  for (int i = 0; i < npages; i++) {
2952  Page* p = evacuation_candidates_[i];
2953  ASSERT(p->IsEvacuationCandidate() ||
2954  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2955 
2956  if (p->IsEvacuationCandidate()) {
2958  p->slots_buffer(),
2959  code_slots_filtering_required);
2960  if (FLAG_trace_fragmentation) {
2961  PrintF(" page %p slots buffer: %d\n",
2962  reinterpret_cast<void*>(p),
2963  SlotsBuffer::SizeOfChain(p->slots_buffer()));
2964  }
2965 
2966  // Important: skip list should be cleared only after roots were updated
2967  // because root iteration traverses the stack and might have to find
2968  // code objects from non-updated pc pointing into evacuation candidate.
2969  SkipList* list = p->skip_list();
2970  if (list != NULL) list->Clear();
2971  } else {
2972  if (FLAG_gc_verbose) {
2973  PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
2974  reinterpret_cast<intptr_t>(p));
2975  }
2976  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2977  p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
2978 
2979  switch (space->identity()) {
2980  case OLD_DATA_SPACE:
2981  SweepConservatively(space, p);
2982  break;
2983  case OLD_POINTER_SPACE:
2984  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
2985  space, p, &updating_visitor);
2986  break;
2987  case CODE_SPACE:
2988  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
2989  space, p, &updating_visitor);
2990  break;
2991  default:
2992  UNREACHABLE();
2993  break;
2994  }
2995  }
2996  }
2997  }
2998 
2999  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3000 
3001  // Update pointers from cells.
3002  HeapObjectIterator cell_iterator(heap_->cell_space());
3003  for (HeapObject* cell = cell_iterator.Next();
3004  cell != NULL;
3005  cell = cell_iterator.Next()) {
3006  if (cell->IsJSGlobalPropertyCell()) {
3007  Address value_address =
3008  reinterpret_cast<Address>(cell) +
3010  updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3011  }
3012  }
3013 
3014  // Update pointer from the native contexts list.
3015  updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3016 
3017  heap_->symbol_table()->Iterate(&updating_visitor);
3018 
3019  // Update pointers from external string table.
3021  &UpdateReferenceInExternalStringTableEntry);
3022 
3023  if (!FLAG_watch_ic_patching) {
3024  // Update JSFunction pointers from the runtime profiler.
3026  &updating_visitor);
3027  }
3028 
3029  EvacuationWeakObjectRetainer evacuation_object_retainer;
3030  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3031 
3032  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3033  // under it.
3034  ProcessInvalidatedCode(&updating_visitor);
3035 
3036  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3037 
3038 #ifdef VERIFY_HEAP
3039  if (FLAG_verify_heap) {
3040  VerifyEvacuation(heap_);
3041  }
3042 #endif
3043 
3044  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3045  ASSERT(migration_slots_buffer_ == NULL);
3046  for (int i = 0; i < npages; i++) {
3047  Page* p = evacuation_candidates_[i];
3048  if (!p->IsEvacuationCandidate()) continue;
3049  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3050  space->Free(p->area_start(), p->area_size());
3051  p->set_scan_on_scavenge(false);
3052  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3053  p->ResetLiveBytes();
3054  space->ReleasePage(p);
3055  }
3056  evacuation_candidates_.Rewind(0);
3057  compacting_ = false;
3058 }
3059 
3060 
3061 static const int kStartTableEntriesPerLine = 5;
3062 static const int kStartTableLines = 171;
3063 static const int kStartTableInvalidLine = 127;
3064 static const int kStartTableUnusedEntry = 126;
3065 
3066 #define _ kStartTableUnusedEntry
3067 #define X kStartTableInvalidLine
3068 // Mark-bit to object start offset table.
3069 //
3070 // The line is indexed by the mark bits in a byte. The first number on
3071 // the line describes the number of live object starts for the line and the
3072 // other numbers on the line describe the offsets (in words) of the object
3073 // starts.
3074 //
3075 // Since objects are at least 2 words large we don't have entries for two
3076 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3077 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3078  0, _, _, _, _, // 0
3079  1, 0, _, _, _, // 1
3080  1, 1, _, _, _, // 2
3081  X, _, _, _, _, // 3
3082  1, 2, _, _, _, // 4
3083  2, 0, 2, _, _, // 5
3084  X, _, _, _, _, // 6
3085  X, _, _, _, _, // 7
3086  1, 3, _, _, _, // 8
3087  2, 0, 3, _, _, // 9
3088  2, 1, 3, _, _, // 10
3089  X, _, _, _, _, // 11
3090  X, _, _, _, _, // 12
3091  X, _, _, _, _, // 13
3092  X, _, _, _, _, // 14
3093  X, _, _, _, _, // 15
3094  1, 4, _, _, _, // 16
3095  2, 0, 4, _, _, // 17
3096  2, 1, 4, _, _, // 18
3097  X, _, _, _, _, // 19
3098  2, 2, 4, _, _, // 20
3099  3, 0, 2, 4, _, // 21
3100  X, _, _, _, _, // 22
3101  X, _, _, _, _, // 23
3102  X, _, _, _, _, // 24
3103  X, _, _, _, _, // 25
3104  X, _, _, _, _, // 26
3105  X, _, _, _, _, // 27
3106  X, _, _, _, _, // 28
3107  X, _, _, _, _, // 29
3108  X, _, _, _, _, // 30
3109  X, _, _, _, _, // 31
3110  1, 5, _, _, _, // 32
3111  2, 0, 5, _, _, // 33
3112  2, 1, 5, _, _, // 34
3113  X, _, _, _, _, // 35
3114  2, 2, 5, _, _, // 36
3115  3, 0, 2, 5, _, // 37
3116  X, _, _, _, _, // 38
3117  X, _, _, _, _, // 39
3118  2, 3, 5, _, _, // 40
3119  3, 0, 3, 5, _, // 41
3120  3, 1, 3, 5, _, // 42
3121  X, _, _, _, _, // 43
3122  X, _, _, _, _, // 44
3123  X, _, _, _, _, // 45
3124  X, _, _, _, _, // 46
3125  X, _, _, _, _, // 47
3126  X, _, _, _, _, // 48
3127  X, _, _, _, _, // 49
3128  X, _, _, _, _, // 50
3129  X, _, _, _, _, // 51
3130  X, _, _, _, _, // 52
3131  X, _, _, _, _, // 53
3132  X, _, _, _, _, // 54
3133  X, _, _, _, _, // 55
3134  X, _, _, _, _, // 56
3135  X, _, _, _, _, // 57
3136  X, _, _, _, _, // 58
3137  X, _, _, _, _, // 59
3138  X, _, _, _, _, // 60
3139  X, _, _, _, _, // 61
3140  X, _, _, _, _, // 62
3141  X, _, _, _, _, // 63
3142  1, 6, _, _, _, // 64
3143  2, 0, 6, _, _, // 65
3144  2, 1, 6, _, _, // 66
3145  X, _, _, _, _, // 67
3146  2, 2, 6, _, _, // 68
3147  3, 0, 2, 6, _, // 69
3148  X, _, _, _, _, // 70
3149  X, _, _, _, _, // 71
3150  2, 3, 6, _, _, // 72
3151  3, 0, 3, 6, _, // 73
3152  3, 1, 3, 6, _, // 74
3153  X, _, _, _, _, // 75
3154  X, _, _, _, _, // 76
3155  X, _, _, _, _, // 77
3156  X, _, _, _, _, // 78
3157  X, _, _, _, _, // 79
3158  2, 4, 6, _, _, // 80
3159  3, 0, 4, 6, _, // 81
3160  3, 1, 4, 6, _, // 82
3161  X, _, _, _, _, // 83
3162  3, 2, 4, 6, _, // 84
3163  4, 0, 2, 4, 6, // 85
3164  X, _, _, _, _, // 86
3165  X, _, _, _, _, // 87
3166  X, _, _, _, _, // 88
3167  X, _, _, _, _, // 89
3168  X, _, _, _, _, // 90
3169  X, _, _, _, _, // 91
3170  X, _, _, _, _, // 92
3171  X, _, _, _, _, // 93
3172  X, _, _, _, _, // 94
3173  X, _, _, _, _, // 95
3174  X, _, _, _, _, // 96
3175  X, _, _, _, _, // 97
3176  X, _, _, _, _, // 98
3177  X, _, _, _, _, // 99
3178  X, _, _, _, _, // 100
3179  X, _, _, _, _, // 101
3180  X, _, _, _, _, // 102
3181  X, _, _, _, _, // 103
3182  X, _, _, _, _, // 104
3183  X, _, _, _, _, // 105
3184  X, _, _, _, _, // 106
3185  X, _, _, _, _, // 107
3186  X, _, _, _, _, // 108
3187  X, _, _, _, _, // 109
3188  X, _, _, _, _, // 110
3189  X, _, _, _, _, // 111
3190  X, _, _, _, _, // 112
3191  X, _, _, _, _, // 113
3192  X, _, _, _, _, // 114
3193  X, _, _, _, _, // 115
3194  X, _, _, _, _, // 116
3195  X, _, _, _, _, // 117
3196  X, _, _, _, _, // 118
3197  X, _, _, _, _, // 119
3198  X, _, _, _, _, // 120
3199  X, _, _, _, _, // 121
3200  X, _, _, _, _, // 122
3201  X, _, _, _, _, // 123
3202  X, _, _, _, _, // 124
3203  X, _, _, _, _, // 125
3204  X, _, _, _, _, // 126
3205  X, _, _, _, _, // 127
3206  1, 7, _, _, _, // 128
3207  2, 0, 7, _, _, // 129
3208  2, 1, 7, _, _, // 130
3209  X, _, _, _, _, // 131
3210  2, 2, 7, _, _, // 132
3211  3, 0, 2, 7, _, // 133
3212  X, _, _, _, _, // 134
3213  X, _, _, _, _, // 135
3214  2, 3, 7, _, _, // 136
3215  3, 0, 3, 7, _, // 137
3216  3, 1, 3, 7, _, // 138
3217  X, _, _, _, _, // 139
3218  X, _, _, _, _, // 140
3219  X, _, _, _, _, // 141
3220  X, _, _, _, _, // 142
3221  X, _, _, _, _, // 143
3222  2, 4, 7, _, _, // 144
3223  3, 0, 4, 7, _, // 145
3224  3, 1, 4, 7, _, // 146
3225  X, _, _, _, _, // 147
3226  3, 2, 4, 7, _, // 148
3227  4, 0, 2, 4, 7, // 149
3228  X, _, _, _, _, // 150
3229  X, _, _, _, _, // 151
3230  X, _, _, _, _, // 152
3231  X, _, _, _, _, // 153
3232  X, _, _, _, _, // 154
3233  X, _, _, _, _, // 155
3234  X, _, _, _, _, // 156
3235  X, _, _, _, _, // 157
3236  X, _, _, _, _, // 158
3237  X, _, _, _, _, // 159
3238  2, 5, 7, _, _, // 160
3239  3, 0, 5, 7, _, // 161
3240  3, 1, 5, 7, _, // 162
3241  X, _, _, _, _, // 163
3242  3, 2, 5, 7, _, // 164
3243  4, 0, 2, 5, 7, // 165
3244  X, _, _, _, _, // 166
3245  X, _, _, _, _, // 167
3246  3, 3, 5, 7, _, // 168
3247  4, 0, 3, 5, 7, // 169
3248  4, 1, 3, 5, 7 // 170
3249 };
3250 #undef _
3251 #undef X
3252 
3253 
3254 // Takes a word of mark bits. Returns the number of objects that start in the
3255 // range. Puts the offsets of the words in the supplied array.
3256 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3257  int objects = 0;
3258  int offset = 0;
3259 
3260  // No consecutive 1 bits.
3261  ASSERT((mark_bits & 0x180) != 0x180);
3262  ASSERT((mark_bits & 0x18000) != 0x18000);
3263  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3264 
3265  while (mark_bits != 0) {
3266  int byte = (mark_bits & 0xff);
3267  mark_bits >>= 8;
3268  if (byte != 0) {
3269  ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3270  char* table = kStartTable + byte * kStartTableEntriesPerLine;
3271  int objects_in_these_8_words = table[0];
3272  ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3273  ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3274  for (int i = 0; i < objects_in_these_8_words; i++) {
3275  starts[objects++] = offset + table[1 + i];
3276  }
3277  }
3278  offset += 8;
3279  }
3280  return objects;
3281 }
3282 
3283 
3284 static inline Address DigestFreeStart(Address approximate_free_start,
3285  uint32_t free_start_cell) {
3286  ASSERT(free_start_cell != 0);
3287 
3288  // No consecutive 1 bits.
3289  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3290 
3291  int offsets[16];
3292  uint32_t cell = free_start_cell;
3293  int offset_of_last_live;
3294  if ((cell & 0x80000000u) != 0) {
3295  // This case would overflow below.
3296  offset_of_last_live = 31;
3297  } else {
3298  // Remove all but one bit, the most significant. This is an optimization
3299  // that may or may not be worthwhile.
3300  cell |= cell >> 16;
3301  cell |= cell >> 8;
3302  cell |= cell >> 4;
3303  cell |= cell >> 2;
3304  cell |= cell >> 1;
3305  cell = (cell + 1) >> 1;
3306  int live_objects = MarkWordToObjectStarts(cell, offsets);
3307  ASSERT(live_objects == 1);
3308  offset_of_last_live = offsets[live_objects - 1];
3309  }
3310  Address last_live_start =
3311  approximate_free_start + offset_of_last_live * kPointerSize;
3312  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3313  Address free_start = last_live_start + last_live->Size();
3314  return free_start;
3315 }
3316 
3317 
3318 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3319  ASSERT(cell != 0);
3320 
3321  // No consecutive 1 bits.
3322  ASSERT((cell & (cell << 1)) == 0);
3323 
3324  int offsets[16];
3325  if (cell == 0x80000000u) { // Avoid overflow below.
3326  return block_address + 31 * kPointerSize;
3327  }
3328  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3329  ASSERT((first_set_bit & cell) == first_set_bit);
3330  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3331  ASSERT(live_objects == 1);
3332  USE(live_objects);
3333  return block_address + offsets[0] * kPointerSize;
3334 }
3335 
3336 
3337 // Sweeps a space conservatively. After this has been done the larger free
3338 // spaces have been put on the free list and the smaller ones have been
3339 // ignored and left untouched. A free space is always either ignored or put
3340 // on the free list, never split up into two parts. This is important
3341 // because it means that any FreeSpace maps left actually describe a region of
3342 // memory that can be ignored when scanning. Dead objects other than free
3343 // spaces will not contain the free space map.
3345  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3346  MarkBit::CellType* cells = p->markbits()->cells();
3348 
3349  int last_cell_index =
3350  Bitmap::IndexToCell(
3351  Bitmap::CellAlignIndex(
3352  p->AddressToMarkbitIndex(p->area_end())));
3353 
3354  int cell_index =
3355  Bitmap::IndexToCell(
3356  Bitmap::CellAlignIndex(
3357  p->AddressToMarkbitIndex(p->area_start())));
3358 
3359  intptr_t freed_bytes = 0;
3360 
3361  // This is the start of the 32 word block that we are currently looking at.
3362  Address block_address = p->area_start();
3363 
3364  // Skip over all the dead objects at the start of the page and mark them free.
3365  for (;
3366  cell_index < last_cell_index;
3367  cell_index++, block_address += 32 * kPointerSize) {
3368  if (cells[cell_index] != 0) break;
3369  }
3370  size_t size = block_address - p->area_start();
3371  if (cell_index == last_cell_index) {
3372  freed_bytes += static_cast<int>(space->Free(p->area_start(),
3373  static_cast<int>(size)));
3374  ASSERT_EQ(0, p->LiveBytes());
3375  return freed_bytes;
3376  }
3377  // Grow the size of the start-of-page free space a little to get up to the
3378  // first live object.
3379  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3380  // Free the first free space.
3381  size = free_end - p->area_start();
3382  freed_bytes += space->Free(p->area_start(),
3383  static_cast<int>(size));
3384  // The start of the current free area is represented in undigested form by
3385  // the address of the last 32-word section that contained a live object and
3386  // the marking bitmap for that cell, which describes where the live object
3387  // started. Unless we find a large free space in the bitmap we will not
3388  // digest this pair into a real address. We start the iteration here at the
3389  // first word in the marking bit map that indicates a live object.
3390  Address free_start = block_address;
3391  uint32_t free_start_cell = cells[cell_index];
3392 
3393  for ( ;
3394  cell_index < last_cell_index;
3395  cell_index++, block_address += 32 * kPointerSize) {
3396  ASSERT((unsigned)cell_index ==
3397  Bitmap::IndexToCell(
3398  Bitmap::CellAlignIndex(
3399  p->AddressToMarkbitIndex(block_address))));
3400  uint32_t cell = cells[cell_index];
3401  if (cell != 0) {
3402  // We have a live object. Check approximately whether it is more than 32
3403  // words since the last live object.
3404  if (block_address - free_start > 32 * kPointerSize) {
3405  free_start = DigestFreeStart(free_start, free_start_cell);
3406  if (block_address - free_start > 32 * kPointerSize) {
3407  // Now that we know the exact start of the free space it still looks
3408  // like we have a large enough free space to be worth bothering with.
3409  // so now we need to find the start of the first live object at the
3410  // end of the free space.
3411  free_end = StartOfLiveObject(block_address, cell);
3412  freed_bytes += space->Free(free_start,
3413  static_cast<int>(free_end - free_start));
3414  }
3415  }
3416  // Update our undigested record of where the current free area started.
3417  free_start = block_address;
3418  free_start_cell = cell;
3419  // Clear marking bits for current cell.
3420  cells[cell_index] = 0;
3421  }
3422  }
3423 
3424  // Handle the free space at the end of the page.
3425  if (block_address - free_start > 32 * kPointerSize) {
3426  free_start = DigestFreeStart(free_start, free_start_cell);
3427  freed_bytes += space->Free(free_start,
3428  static_cast<int>(block_address - free_start));
3429  }
3430 
3431  p->ResetLiveBytes();
3432  return freed_bytes;
3433 }
3434 
3435 
3436 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3437  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3438  sweeper == LAZY_CONSERVATIVE);
3439 
3440  space->ClearStats();
3441 
3442  PageIterator it(space);
3443 
3444  intptr_t freed_bytes = 0;
3445  int pages_swept = 0;
3446  intptr_t newspace_size = space->heap()->new_space()->Size();
3447  bool lazy_sweeping_active = false;
3448  bool unused_page_present = false;
3449 
3450  while (it.has_next()) {
3451  Page* p = it.next();
3452 
3453  // Clear sweeping flags indicating that marking bits are still intact.
3454  p->ClearSweptPrecisely();
3456 
3457  if (p->IsEvacuationCandidate()) {
3458  ASSERT(evacuation_candidates_.length() > 0);
3459  continue;
3460  }
3461 
3463  // Will be processed in EvacuateNewSpaceAndCandidates.
3464  continue;
3465  }
3466 
3467  // One unused page is kept, all further are released before sweeping them.
3468  if (p->LiveBytes() == 0) {
3469  if (unused_page_present) {
3470  if (FLAG_gc_verbose) {
3471  PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3472  reinterpret_cast<intptr_t>(p));
3473  }
3474  // Adjust unswept free bytes because releasing a page expects said
3475  // counter to be accurate for unswept pages.
3476  space->IncreaseUnsweptFreeBytes(p);
3477  space->ReleasePage(p);
3478  continue;
3479  }
3480  unused_page_present = true;
3481  }
3482 
3483  if (lazy_sweeping_active) {
3484  if (FLAG_gc_verbose) {
3485  PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3486  reinterpret_cast<intptr_t>(p));
3487  }
3488  space->IncreaseUnsweptFreeBytes(p);
3489  continue;
3490  }
3491 
3492  switch (sweeper) {
3493  case CONSERVATIVE: {
3494  if (FLAG_gc_verbose) {
3495  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3496  reinterpret_cast<intptr_t>(p));
3497  }
3498  SweepConservatively(space, p);
3499  pages_swept++;
3500  break;
3501  }
3502  case LAZY_CONSERVATIVE: {
3503  if (FLAG_gc_verbose) {
3504  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3505  reinterpret_cast<intptr_t>(p));
3506  }
3507  freed_bytes += SweepConservatively(space, p);
3508  pages_swept++;
3509  if (freed_bytes > 2 * newspace_size) {
3510  space->SetPagesToSweep(p->next_page());
3511  lazy_sweeping_active = true;
3512  } else {
3513  if (FLAG_gc_verbose) {
3514  PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3515  freed_bytes);
3516  }
3517  }
3518  break;
3519  }
3520  case PRECISE: {
3521  if (FLAG_gc_verbose) {
3522  PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3523  reinterpret_cast<intptr_t>(p));
3524  }
3525  if (space->identity() == CODE_SPACE) {
3526  SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3527  } else {
3528  SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3529  }
3530  pages_swept++;
3531  break;
3532  }
3533  default: {
3534  UNREACHABLE();
3535  }
3536  }
3537  }
3538 
3539  if (FLAG_gc_verbose) {
3540  PrintF("SweepSpace: %s (%d pages swept)\n",
3541  AllocationSpaceName(space->identity()),
3542  pages_swept);
3543  }
3544 
3545  // Give pages that are queued to be freed back to the OS.
3546  heap()->FreeQueuedChunks();
3547 }
3548 
3549 
3550 void MarkCompactCollector::SweepSpaces() {
3551  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3552 #ifdef DEBUG
3553  state_ = SWEEP_SPACES;
3554 #endif
3555  SweeperType how_to_sweep =
3556  FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3557  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
3558  if (sweep_precisely_) how_to_sweep = PRECISE;
3559  // Noncompacting collections simply sweep the spaces to clear the mark
3560  // bits and free the nonlive blocks (for old and map spaces). We sweep
3561  // the map space last because freeing non-live maps overwrites them and
3562  // the other spaces rely on possibly non-live maps to get the sizes for
3563  // non-live objects.
3564  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3565  SweepSpace(heap()->old_data_space(), how_to_sweep);
3566 
3567  RemoveDeadInvalidatedCode();
3568  SweepSpace(heap()->code_space(), PRECISE);
3569 
3570  SweepSpace(heap()->cell_space(), PRECISE);
3571 
3572  EvacuateNewSpaceAndCandidates();
3573 
3574  // ClearNonLiveTransitions depends on precise sweeping of map space to
3575  // detect whether unmarked map became dead in this collection or in one
3576  // of the previous ones.
3577  SweepSpace(heap()->map_space(), PRECISE);
3578 
3579  // Deallocate unmarked objects and clear marked bits for marked objects.
3580  heap_->lo_space()->FreeUnmarkedObjects();
3581 }
3582 
3583 
3585  if (enable) {
3586  if (code_flusher_ != NULL) return;
3587  code_flusher_ = new CodeFlusher(heap()->isolate());
3588  } else {
3589  if (code_flusher_ == NULL) return;
3590  delete code_flusher_;
3591  code_flusher_ = NULL;
3592  }
3593 }
3594 
3595 
3596 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3597 // Our profiling tools do not expect intersections between
3598 // code objects. We should either reenable it or change our tools.
3600  Isolate* isolate) {
3601 #ifdef ENABLE_GDB_JIT_INTERFACE
3602  if (obj->IsCode()) {
3603  GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3604  }
3605 #endif
3606  if (obj->IsCode()) {
3607  PROFILE(isolate, CodeDeleteEvent(obj->address()));
3608  }
3609 }
3610 
3611 
3615 }
3616 
3617 
3619  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3620 }
3621 
3622 
3624  SlotsBuffer** buffer_address,
3625  SlotType type,
3626  Address addr,
3627  AdditionMode mode) {
3628  SlotsBuffer* buffer = *buffer_address;
3629  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
3630  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
3631  allocator->DeallocateChain(buffer_address);
3632  return false;
3633  }
3634  buffer = allocator->AllocateBuffer(buffer);
3635  *buffer_address = buffer;
3636  }
3637  ASSERT(buffer->HasSpaceForTypedSlot());
3638  buffer->Add(reinterpret_cast<ObjectSlot>(type));
3639  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
3640  return true;
3641 }
3642 
3643 
3644 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
3645  if (RelocInfo::IsCodeTarget(rmode)) {
3647  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
3649  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
3651  } else if (RelocInfo::IsJSReturn(rmode)) {
3653  }
3654  UNREACHABLE();
3656 }
3657 
3658 
3659 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
3660  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
3661  if (target_page->IsEvacuationCandidate() &&
3662  (rinfo->host() == NULL ||
3663  !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
3664  if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3665  target_page->slots_buffer_address(),
3666  SlotTypeForRMode(rinfo->rmode()),
3667  rinfo->pc(),
3669  EvictEvacuationCandidate(target_page);
3670  }
3671  }
3672 }
3673 
3674 
3676  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
3677  if (target_page->IsEvacuationCandidate() &&
3678  !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
3679  if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3680  target_page->slots_buffer_address(),
3682  slot,
3684  EvictEvacuationCandidate(target_page);
3685  }
3686  }
3687 }
3688 
3689 
3691  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
3692  if (is_compacting()) {
3693  Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
3694  GcSafeFindCodeForInnerPointer(pc);
3695  MarkBit mark_bit = Marking::MarkBitFrom(host);
3696  if (Marking::IsBlack(mark_bit)) {
3697  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
3698  RecordRelocSlot(&rinfo, target);
3699  }
3700  }
3701 }
3702 
3703 
3704 static inline SlotsBuffer::SlotType DecodeSlotType(
3705  SlotsBuffer::ObjectSlot slot) {
3706  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
3707 }
3708 
3709 
3711  PointersUpdatingVisitor v(heap);
3712 
3713  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3714  ObjectSlot slot = slots_[slot_idx];
3715  if (!IsTypedSlot(slot)) {
3717  } else {
3718  ++slot_idx;
3719  ASSERT(slot_idx < idx_);
3720  UpdateSlot(&v,
3721  DecodeSlotType(slot),
3722  reinterpret_cast<Address>(slots_[slot_idx]));
3723  }
3724  }
3725 }
3726 
3727 
3729  PointersUpdatingVisitor v(heap);
3730 
3731  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3732  ObjectSlot slot = slots_[slot_idx];
3733  if (!IsTypedSlot(slot)) {
3734  if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
3736  }
3737  } else {
3738  ++slot_idx;
3739  ASSERT(slot_idx < idx_);
3740  Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
3741  if (!IsOnInvalidatedCodeObject(pc)) {
3742  UpdateSlot(&v,
3743  DecodeSlotType(slot),
3744  reinterpret_cast<Address>(slots_[slot_idx]));
3745  }
3746  }
3747  }
3748 }
3749 
3750 
3752  return new SlotsBuffer(next_buffer);
3753 }
3754 
3755 
3757  delete buffer;
3758 }
3759 
3760 
3762  SlotsBuffer* buffer = *buffer_address;
3763  while (buffer != NULL) {
3764  SlotsBuffer* next_buffer = buffer->next();
3765  DeallocateBuffer(buffer);
3766  buffer = next_buffer;
3767  }
3768  *buffer_address = NULL;
3769 }
3770 
3771 
3772 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
static int SizeOf(Map *map, HeapObject *object)
Definition: objects-inl.h:5454
const uint32_t kShortcutTypeTag
Definition: objects.h:515
void ClearEvacuationCandidate()
Definition: spaces.h:608
static const char * kGreyBitPattern
Definition: mark-compact.h:81
FixedArraySubInstanceType
Definition: objects.h:686
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:563
List< ImplicitRefGroup * > * implicit_ref_groups()
Code * builtin(Name name)
Definition: builtins.h:320
static bool IsTypedSlot(ObjectSlot slot)
static const int kCodeOffset
Definition: objects.h:5796
void VisitEmbeddedPointer(RelocInfo *rinfo)
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
static const int kCodeEntryOffset
Definition: objects.h:6182
Object * DataAtUnchecked(int index)
Definition: objects-inl.h:4750
#define V8PRIxPTR
Definition: globals.h:189
Address FromSpacePageHigh()
Definition: spaces.h:2232
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
CompilationCache * compilation_cache()
Definition: isolate.h:827
Object ** native_contexts_list_address()
Definition: heap.h:1264
void RecordObjectStats(InstanceType type, int sub_type, size_t size)
Definition: heap.h:1659
SharedFunctionInfoMarkingVisitor(MarkCompactCollector *collector)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
bool InNewSpace(Object *object)
Definition: heap-inl.h:288
static String * cast(Object *obj)
static void VisitJSRegExp(Map *map, HeapObject *object)
INLINE(static void MarkObject(Heap *heap, HeapObject *object))
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:864
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
Isolate * isolate()
Definition: heap-inl.h:503
static void MarkInlinedFunctionsCode(Heap *heap, Code *code)
void Prepare(GCTracer *tracer)
static Smi * FromInt(int value)
Definition: objects-inl.h:981
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:3570
void FinalizeExternalString(String *string)
Definition: heap-inl.h:250
int sweep_generation()
Definition: heap.h:1573
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
INLINE(static void VisitUnmarkedObject(MarkCompactCollector *collector, HeapObject *obj))
static HeapObject * cast(Object *obj)
static const int kProtoTransitionElementsPerEntry
Definition: objects.h:4904
static Map * cast(Object *obj)
INLINE(static void VisitPointer(Heap *heap, Object **p))
void SetDataAtUnchecked(int index, Object *value, Heap *heap)
Definition: objects-inl.h:4764
void ResetAllocationInfo()
Definition: spaces.cc:1192
static void IterateElements(ObjectVisitor *v)
Builtins * builtins()
Definition: isolate.h:924
bool InFromSpace(Object *object)
Definition: heap-inl.h:302
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
void Relocate(intptr_t delta)
Definition: objects.cc:8283
virtual void VisitPointers(Object **start, Object **end)
static void MoveBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:386
void IterateWeakRoots(ObjectVisitor *v)
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5758
#define ASSERT(condition)
Definition: checks.h:270
static const uint32_t kBitsPerCell
Definition: spaces.h:166
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
void ClearFlag(int flag)
Definition: spaces.h:425
bool StartCompaction(CompactionMode mode)
#define PROFILE(isolate, Call)
Definition: cpu-profiler.h:190
void VisitCodeTarget(RelocInfo *rinfo)
void UpdateSlotsWithFilter(Heap *heap)
OldSpace * TargetSpace(HeapObject *object)
Definition: heap-inl.h:345
void VisitPointers(Object **start, Object **end)
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static Context * cast(Object *context)
Definition: contexts.h:212
#define VISITOR_ID_LIST(V)
static const char * kBlackBitPattern
Definition: mark-compact.h:69
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
ThreadManager * thread_manager()
Definition: isolate.h:882
static SharedFunctionInfo * cast(Object *obj)
#define CHECK(condition)
Definition: checks.h:56
static const int kTableOffset
Definition: objects.h:8236
static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map *map, HeapObject *obj)
uint32_t CellType
Definition: spaces.h:123
static Code * cast(Object *obj)
void ClearSweptConservatively()
Definition: spaces.h:739
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:971
StoreBuffer * store_buffer()
Definition: heap.h:1545
static Smi * cast(Object *object)
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:333
void IterateArchivedThreads(ThreadVisitor *v)
Definition: v8threads.cc:400
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1617
MarkBit Next()
Definition: spaces.h:143
static MarkBit MarkBitFrom(Address addr)
bool TryPromoteObject(HeapObject *object, int object_size)
uint8_t byte
Definition: globals.h:156
unsigned int ms_count()
Definition: heap.h:1192
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void ClearSweptPrecisely()
Definition: spaces.h:738
#define UNREACHABLE()
Definition: checks.h:50
void Mark(Address addr)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static JSGlobalProxy * cast(Object *obj)
void VisitPointers(Object **start, Object **end)
CodeMarkingVisitor(MarkCompactCollector *collector)
void IteratePointersToNewSpace(ObjectSlotCallback callback)
#define HEAP_PROFILE(heap, call)
Definition: heap-profiler.h:39
static const int kProtoTransitionMapOffset
Definition: objects.h:4906
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1796
Context * native_context()
Definition: contexts.cc:58
virtual intptr_t SizeOfObjects()
Definition: spaces.h:1515
v8::Handle< v8::Object > bottom
Definition: test-api.cc:1688
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
void CollectEvacuationCandidates(PagedSpace *space)
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2339
const int kPointerSize
Definition: globals.h:220
virtual intptr_t Size()
Definition: spaces.h:2133
static void ObjectStatsCountFixedArray(FixedArrayBase *fixed_array, FixedArraySubInstanceType fast_type, FixedArraySubInstanceType dictionary_type)
bool IsFlagSet(int flag)
Definition: spaces.h:437
static Address & Address_at(Address addr)
Definition: v8memory.h:71
void CodeIterateBody(ObjectVisitor *v)
void MarkEvacuationCandidate()
Definition: spaces.h:603
void CheckpointObjectStats()
Definition: heap.cc:7302
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
void CountFreeListItems(Page *p, FreeList::SizeStats *sizes)
Definition: spaces.h:1636
GlobalHandles * global_handles()
Definition: isolate.h:880
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1473
char kStartTable[kStartTableLines *kStartTableEntriesPerLine]
virtual Object * RetainAs(Object *object)
const uint32_t kShortcutTypeMask
Definition: objects.h:511
const Register pc
OldSpace * old_pointer_space()
Definition: heap.h:506
bool TransferMark(Address old_start, Address new_start)
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3559
void set_age_mark(Address mark)
Definition: spaces.h:2189
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
void VisitThread(Isolate *isolate, ThreadLocalTop *top)
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:717
OldSpace * code_space()
Definition: heap.h:508
void DeallocateBuffer(SlotsBuffer *buffer)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:590
void UpdateSamplesAfterCompact(ObjectVisitor *visitor)
bool WasSwept()
Definition: spaces.h:733
LargeObjectSpace * lo_space()
Definition: heap.h:511
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1539
void ReleasePage(Page *page)
Definition: spaces.cc:922
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
void MarkSweptConservatively()
Definition: spaces.h:736
void FreeQueuedChunks()
Definition: heap.cc:7226
CellSpace * cell_space()
Definition: heap.h:510
static int OffsetOfElementAt(int index)
Definition: objects.h:2356
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
virtual Object * RetainAs(Object *object)
StubCache * stub_cache()
Definition: isolate.h:837
#define T(name, string, precedence)
Definition: token.cc:48
static int SizeFor(int length)
Definition: objects.h:2353
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
static const int kProtoTransitionHeaderSize
Definition: objects.h:4902
void DeallocateChain(SlotsBuffer **buffer_address)
#define V8PRIdPTR
Definition: globals.h:190
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1404
void MigrateObject(Address dst, Address src, int size, AllocationSpace to_old_space)
List< ObjectGroup * > * object_groups()
bool HasTransitionArray()
Definition: objects-inl.h:3675
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1467
#define VISITOR_ID_COUNT_FUNCTION(id)
static const int kMapOffset
Definition: objects.h:1261
void Add(ObjectSlot slot)
Definition: mark-compact.h:292
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:874
void VisitPointers(Object **start, Object **end)
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5740
static ObjectHashTable * cast(Object *obj)
Definition: objects.h:3340
void VisitPointers(Object **start, Object **end)
void set_encountered_weak_maps(Object *weak_map)
Definition: mark-compact.h:634
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1095
void IdentifyWeakHandles(WeakSlotCallback f)
Heap * heap() const
Definition: spaces.h:782
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:365
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:599
void Iterate(v8::internal::ObjectVisitor *v)
static void UpdateSlot(Heap *heap, Object **slot)
static int code_index(bool is_ascii)
Definition: objects.h:6603
INLINE(static void MarkObjectByPointer(MarkCompactCollector *collector, Object **anchor_slot, Object **p))
static const int kProtoTransitionPrototypeOffset
Definition: objects.h:4905
#define HEAP
Definition: isolate.h:1433
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void IterateFunctions(ObjectVisitor *v)
InstanceType instance_type()
Definition: objects-inl.h:3009
bool IsEvacuationCandidate()
Definition: spaces.h:581
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
void USE(T)
Definition: globals.h:289
static const int kConstructorOffset
Definition: objects.h:5127
INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject *object))
INLINE(static bool MarkObjectWithoutPush(Heap *heap, HeapObject *object))
static FixedArray * cast(Object *obj)
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1597
#define X
MapSpace * map_space()
Definition: heap.h:509
Page * next_page()
Definition: spaces-inl.h:224
void RecordCodeTargetPatch(Address pc, Code *target)
static const int kCompilationErrorValue
Definition: objects.h:6686
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
void EvictEvacuationCandidate(Page *page)
Definition: mark-compact.h:599
void SetPagesToSweep(Page *first)
Definition: spaces.h:1607
#define _
const int kPageSizeBits
Definition: v8globals.h:92
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
static GlobalObject * cast(Object *obj)
T Min(T a, T b)
Definition: utils.h:229
static bool VisitUnmarkedObjects(Heap *heap, Object **start, Object **end)
static const int kUninitializedValue
Definition: objects.h:6682
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2650
void VisitDebugTarget(RelocInfo *rinfo)
void check(i::Vector< const char > string)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
NewSpace * new_space()
Definition: heap.h:505
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:347
static int saved_code_index(bool is_ascii)
Definition: objects.h:6611
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:559
static void ProcessNonLive(HeapObject *obj)
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode)
Definition: mark-compact.h:369
static JSObject * cast(Object *obj)
OldSpace * old_data_space()
Definition: heap.h:507
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1541
static int RegionNumber(Address addr)
Definition: spaces.h:920
AllocationSpace identity()
Definition: spaces.h:788
static void VisitJSWeakMap(Map *map, HeapObject *object)
void RecordCodeEntrySlot(Address slot, Code *target)
Address FromSpacePageLow()
Definition: spaces.h:2231
const int MB
Definition: globals.h:208
static JSFunction * cast(Object *obj)