v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "cpu-profiler.h"
33 #include "deoptimizer.h"
34 #include "execution.h"
35 #include "gdb-jit.h"
36 #include "global-handles.h"
37 #include "heap-profiler.h"
38 #include "ic-inl.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 #include "sweeper-thread.h"
45 
46 namespace v8 {
47 namespace internal {
48 
49 
50 const char* Marking::kWhiteBitPattern = "00";
51 const char* Marking::kBlackBitPattern = "10";
52 const char* Marking::kGreyBitPattern = "11";
53 const char* Marking::kImpossibleBitPattern = "01";
54 
55 
56 // -------------------------------------------------------------------------
57 // MarkCompactCollector
58 
59 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
60 #ifdef DEBUG
61  state_(IDLE),
62 #endif
63  sweep_precisely_(false),
64  reduce_memory_footprint_(false),
65  abort_incremental_marking_(false),
66  marking_parity_(ODD_MARKING_PARITY),
67  compacting_(false),
68  was_marked_incrementally_(false),
69  sweeping_pending_(false),
70  pending_sweeper_jobs_semaphore_(0),
71  sequential_sweeping_(false),
72  tracer_(NULL),
73  migration_slots_buffer_(NULL),
74  heap_(heap),
75  code_flusher_(NULL),
76  encountered_weak_collections_(NULL),
77  have_code_to_deoptimize_(false) { }
78 
79 #ifdef VERIFY_HEAP
80 class VerifyMarkingVisitor: public ObjectVisitor {
81  public:
82  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
83 
84  void VisitPointers(Object** start, Object** end) {
85  for (Object** current = start; current < end; current++) {
86  if ((*current)->IsHeapObject()) {
87  HeapObject* object = HeapObject::cast(*current);
88  CHECK(heap_->mark_compact_collector()->IsMarked(object));
89  }
90  }
91  }
92 
93  void VisitEmbeddedPointer(RelocInfo* rinfo) {
94  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
95  if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
96  Object* p = rinfo->target_object();
97  VisitPointer(&p);
98  }
99  }
100 
101  void VisitCell(RelocInfo* rinfo) {
102  Code* code = rinfo->host();
103  ASSERT(rinfo->rmode() == RelocInfo::CELL);
104  if (!code->IsWeakObject(rinfo->target_cell())) {
105  ObjectVisitor::VisitCell(rinfo);
106  }
107  }
108 
109  private:
110  Heap* heap_;
111 };
112 
113 
114 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
115  VerifyMarkingVisitor visitor(heap);
116  HeapObject* object;
117  Address next_object_must_be_here_or_later = bottom;
118 
119  for (Address current = bottom;
120  current < top;
121  current += kPointerSize) {
122  object = HeapObject::FromAddress(current);
123  if (MarkCompactCollector::IsMarked(object)) {
124  CHECK(current >= next_object_must_be_here_or_later);
125  object->Iterate(&visitor);
126  next_object_must_be_here_or_later = current + object->Size();
127  }
128  }
129 }
130 
131 
132 static void VerifyMarking(NewSpace* space) {
133  Address end = space->top();
134  NewSpacePageIterator it(space->bottom(), end);
135  // The bottom position is at the start of its page. Allows us to use
136  // page->area_start() as start of range on all pages.
137  CHECK_EQ(space->bottom(),
138  NewSpacePage::FromAddress(space->bottom())->area_start());
139  while (it.has_next()) {
140  NewSpacePage* page = it.next();
141  Address limit = it.has_next() ? page->area_end() : end;
142  CHECK(limit == end || !page->Contains(end));
143  VerifyMarking(space->heap(), page->area_start(), limit);
144  }
145 }
146 
147 
148 static void VerifyMarking(PagedSpace* space) {
149  PageIterator it(space);
150 
151  while (it.has_next()) {
152  Page* p = it.next();
153  VerifyMarking(space->heap(), p->area_start(), p->area_end());
154  }
155 }
156 
157 
158 static void VerifyMarking(Heap* heap) {
159  VerifyMarking(heap->old_pointer_space());
160  VerifyMarking(heap->old_data_space());
161  VerifyMarking(heap->code_space());
162  VerifyMarking(heap->cell_space());
163  VerifyMarking(heap->property_cell_space());
164  VerifyMarking(heap->map_space());
165  VerifyMarking(heap->new_space());
166 
167  VerifyMarkingVisitor visitor(heap);
168 
169  LargeObjectIterator it(heap->lo_space());
170  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
172  obj->Iterate(&visitor);
173  }
174  }
175 
176  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
177 }
178 
179 
180 class VerifyEvacuationVisitor: public ObjectVisitor {
181  public:
182  void VisitPointers(Object** start, Object** end) {
183  for (Object** current = start; current < end; current++) {
184  if ((*current)->IsHeapObject()) {
185  HeapObject* object = HeapObject::cast(*current);
186  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
187  }
188  }
189  }
190 };
191 
192 
193 static void VerifyEvacuation(Address bottom, Address top) {
194  VerifyEvacuationVisitor visitor;
195  HeapObject* object;
196  Address next_object_must_be_here_or_later = bottom;
197 
198  for (Address current = bottom;
199  current < top;
200  current += kPointerSize) {
201  object = HeapObject::FromAddress(current);
202  if (MarkCompactCollector::IsMarked(object)) {
203  CHECK(current >= next_object_must_be_here_or_later);
204  object->Iterate(&visitor);
205  next_object_must_be_here_or_later = current + object->Size();
206  }
207  }
208 }
209 
210 
211 static void VerifyEvacuation(NewSpace* space) {
212  NewSpacePageIterator it(space->bottom(), space->top());
213  VerifyEvacuationVisitor visitor;
214 
215  while (it.has_next()) {
216  NewSpacePage* page = it.next();
217  Address current = page->area_start();
218  Address limit = it.has_next() ? page->area_end() : space->top();
219  CHECK(limit == space->top() || !page->Contains(space->top()));
220  while (current < limit) {
221  HeapObject* object = HeapObject::FromAddress(current);
222  object->Iterate(&visitor);
223  current += object->Size();
224  }
225  }
226 }
227 
228 
229 static void VerifyEvacuation(PagedSpace* space) {
230  // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
231  // swept pages.
232  if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
233  space->was_swept_conservatively()) return;
234  PageIterator it(space);
235 
236  while (it.has_next()) {
237  Page* p = it.next();
238  if (p->IsEvacuationCandidate()) continue;
239  VerifyEvacuation(p->area_start(), p->area_end());
240  }
241 }
242 
243 
244 static void VerifyEvacuation(Heap* heap) {
245  VerifyEvacuation(heap->old_pointer_space());
246  VerifyEvacuation(heap->old_data_space());
247  VerifyEvacuation(heap->code_space());
248  VerifyEvacuation(heap->cell_space());
249  VerifyEvacuation(heap->property_cell_space());
250  VerifyEvacuation(heap->map_space());
251  VerifyEvacuation(heap->new_space());
252 
253  VerifyEvacuationVisitor visitor;
254  heap->IterateStrongRoots(&visitor, VISIT_ALL);
255 }
256 #endif // VERIFY_HEAP
257 
258 
259 #ifdef DEBUG
260 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
261  public:
262  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
263 
264  void VisitPointers(Object** start, Object** end) {
265  for (Object** current = start; current < end; current++) {
266  if ((*current)->IsHeapObject()) {
267  HeapObject* object = HeapObject::cast(*current);
268  if (object->IsString()) continue;
269  switch (object->map()->instance_type()) {
270  case JS_FUNCTION_TYPE:
271  CheckContext(JSFunction::cast(object)->context());
272  break;
274  CheckContext(JSGlobalProxy::cast(object)->native_context());
275  break;
278  CheckContext(GlobalObject::cast(object)->native_context());
279  break;
280  case JS_ARRAY_TYPE:
281  case JS_DATE_TYPE:
282  case JS_OBJECT_TYPE:
283  case JS_REGEXP_TYPE:
284  VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
285  break;
286  case MAP_TYPE:
287  VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
288  VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
289  break;
290  case FIXED_ARRAY_TYPE:
291  if (object->IsContext()) {
292  CheckContext(object);
293  } else {
294  FixedArray* array = FixedArray::cast(object);
295  int length = array->length();
296  // Set array length to zero to prevent cycles while iterating
297  // over array bodies, this is easier than intrusive marking.
298  array->set_length(0);
299  array->IterateBody(
300  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
301  array->set_length(length);
302  }
303  break;
304  case CELL_TYPE:
305  case JS_PROXY_TYPE:
306  case JS_VALUE_TYPE:
308  object->Iterate(this);
309  break;
312  case BYTE_ARRAY_TYPE:
314  case CODE_TYPE:
316  case HEAP_NUMBER_TYPE:
318  case ODDBALL_TYPE:
319  case SCRIPT_TYPE:
321  break;
322  default:
323  UNREACHABLE();
324  }
325  }
326  }
327  }
328 
329  private:
330  void CheckContext(Object* context) {
331  if (!context->IsContext()) return;
332  Context* native_context = Context::cast(context)->native_context();
333  if (current_native_context_ == NULL) {
334  current_native_context_ = native_context;
335  } else {
336  CHECK_EQ(current_native_context_, native_context);
337  }
338  }
339 
340  Context* current_native_context_;
341 };
342 
343 
344 static void VerifyNativeContextSeparation(Heap* heap) {
345  HeapObjectIterator it(heap->code_space());
346 
347  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
348  VerifyNativeContextSeparationVisitor visitor;
349  Code::cast(object)->CodeIterateBody(&visitor);
350  }
351 }
352 #endif
353 
354 
356  free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
357  free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
358 }
359 
360 
362  AbortCompaction();
363 }
364 
365 
368  evacuation_candidates_.Add(p);
369 }
370 
371 
372 static void TraceFragmentation(PagedSpace* space) {
373  int number_of_pages = space->CountTotalPages();
374  intptr_t reserved = (number_of_pages * space->AreaSize());
375  intptr_t free = reserved - space->SizeOfObjects();
376  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
377  AllocationSpaceName(space->identity()),
378  number_of_pages,
379  static_cast<int>(free),
380  static_cast<double>(free) * 100 / reserved);
381 }
382 
383 
385  if (!compacting_) {
386  ASSERT(evacuation_candidates_.length() == 0);
387 
388 #ifdef ENABLE_GDB_JIT_INTERFACE
389  // If GDBJIT interface is active disable compaction.
390  if (FLAG_gdbjit) return false;
391 #endif
392 
393  CollectEvacuationCandidates(heap()->old_pointer_space());
394  CollectEvacuationCandidates(heap()->old_data_space());
395 
396  if (FLAG_compact_code_space &&
397  (mode == NON_INCREMENTAL_COMPACTION ||
398  FLAG_incremental_code_compaction)) {
399  CollectEvacuationCandidates(heap()->code_space());
400  } else if (FLAG_trace_fragmentation) {
401  TraceFragmentation(heap()->code_space());
402  }
403 
404  if (FLAG_trace_fragmentation) {
405  TraceFragmentation(heap()->map_space());
406  TraceFragmentation(heap()->cell_space());
407  TraceFragmentation(heap()->property_cell_space());
408  }
409 
413 
414  compacting_ = evacuation_candidates_.length() > 0;
415  }
416 
417  return compacting_;
418 }
419 
420 
422  // Make sure that Prepare() has been called. The individual steps below will
423  // update the state as they proceed.
424  ASSERT(state_ == PREPARE_GC);
425  ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
426 
427  MarkLiveObjects();
428  ASSERT(heap_->incremental_marking()->IsStopped());
429 
430  if (FLAG_collect_maps) ClearNonLiveReferences();
431 
432  ClearWeakCollections();
433 
434 #ifdef VERIFY_HEAP
435  if (FLAG_verify_heap) {
436  VerifyMarking(heap_);
437  }
438 #endif
439 
440  SweepSpaces();
441 
442  if (!FLAG_collect_maps) ReattachInitialMaps();
443 
444 #ifdef DEBUG
445  if (FLAG_verify_native_context_separation) {
446  VerifyNativeContextSeparation(heap_);
447  }
448 #endif
449 
450 #ifdef VERIFY_HEAP
451  if (heap()->weak_embedded_objects_verification_enabled()) {
452  VerifyWeakEmbeddedObjectsInOptimizedCode();
453  }
454  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
455  VerifyOmittedMapChecks();
456  }
457 #endif
458 
459  Finish();
460 
461  if (marking_parity_ == EVEN_MARKING_PARITY) {
462  marking_parity_ = ODD_MARKING_PARITY;
463  } else {
464  ASSERT(marking_parity_ == ODD_MARKING_PARITY);
465  marking_parity_ = EVEN_MARKING_PARITY;
466  }
467 
468  tracer_ = NULL;
469 }
470 
471 
472 #ifdef VERIFY_HEAP
473 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
474  PageIterator it(space);
475 
476  while (it.has_next()) {
477  Page* p = it.next();
478  CHECK(p->markbits()->IsClean());
479  CHECK_EQ(0, p->LiveBytes());
480  }
481 }
482 
483 
484 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
485  NewSpacePageIterator it(space->bottom(), space->top());
486 
487  while (it.has_next()) {
488  NewSpacePage* p = it.next();
489  CHECK(p->markbits()->IsClean());
490  CHECK_EQ(0, p->LiveBytes());
491  }
492 }
493 
494 
495 void MarkCompactCollector::VerifyMarkbitsAreClean() {
496  VerifyMarkbitsAreClean(heap_->old_pointer_space());
497  VerifyMarkbitsAreClean(heap_->old_data_space());
498  VerifyMarkbitsAreClean(heap_->code_space());
499  VerifyMarkbitsAreClean(heap_->cell_space());
500  VerifyMarkbitsAreClean(heap_->property_cell_space());
501  VerifyMarkbitsAreClean(heap_->map_space());
502  VerifyMarkbitsAreClean(heap_->new_space());
503 
504  LargeObjectIterator it(heap_->lo_space());
505  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
506  MarkBit mark_bit = Marking::MarkBitFrom(obj);
507  CHECK(Marking::IsWhite(mark_bit));
508  CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
509  }
510 }
511 
512 
513 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
514  HeapObjectIterator code_iterator(heap()->code_space());
515  for (HeapObject* obj = code_iterator.Next();
516  obj != NULL;
517  obj = code_iterator.Next()) {
518  Code* code = Code::cast(obj);
519  if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
520  if (WillBeDeoptimized(code)) continue;
521  code->VerifyEmbeddedObjectsDependency();
522  }
523 }
524 
525 
526 void MarkCompactCollector::VerifyOmittedMapChecks() {
527  HeapObjectIterator iterator(heap()->map_space());
528  for (HeapObject* obj = iterator.Next();
529  obj != NULL;
530  obj = iterator.Next()) {
531  Map* map = Map::cast(obj);
532  map->VerifyOmittedMapChecks();
533  }
534 }
535 #endif // VERIFY_HEAP
536 
537 
538 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
539  PageIterator it(space);
540 
541  while (it.has_next()) {
542  Bitmap::Clear(it.next());
543  }
544 }
545 
546 
547 static void ClearMarkbitsInNewSpace(NewSpace* space) {
548  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
549 
550  while (it.has_next()) {
551  Bitmap::Clear(it.next());
552  }
553 }
554 
555 
557  ClearMarkbitsInPagedSpace(heap_->code_space());
558  ClearMarkbitsInPagedSpace(heap_->map_space());
559  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
560  ClearMarkbitsInPagedSpace(heap_->old_data_space());
561  ClearMarkbitsInPagedSpace(heap_->cell_space());
562  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
563  ClearMarkbitsInNewSpace(heap_->new_space());
564 
565  LargeObjectIterator it(heap_->lo_space());
566  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
567  MarkBit mark_bit = Marking::MarkBitFrom(obj);
568  mark_bit.Clear();
569  mark_bit.Next().Clear();
570  Page::FromAddress(obj->address())->ResetProgressBar();
571  Page::FromAddress(obj->address())->ResetLiveBytes();
572  }
573 }
574 
575 
577  public:
578  SweeperTask(Heap* heap, PagedSpace* space)
579  : heap_(heap), space_(space) {}
580 
581  virtual ~SweeperTask() {}
582 
583  private:
584  // v8::Task overrides.
585  virtual void Run() V8_OVERRIDE {
586  heap_->mark_compact_collector()->SweepInParallel(space_);
587  heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
588  }
589 
590  Heap* heap_;
591  PagedSpace* space_;
592 
593  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
594 };
595 
596 
597 void MarkCompactCollector::StartSweeperThreads() {
598  // TODO(hpayer): This check is just used for debugging purpose and
599  // should be removed or turned into an assert after investigating the
600  // crash in concurrent sweeping.
601  CHECK(free_list_old_pointer_space_.get()->IsEmpty());
602  CHECK(free_list_old_data_space_.get()->IsEmpty());
603  sweeping_pending_ = true;
604  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
606  }
607  if (FLAG_job_based_sweeping) {
609  new SweeperTask(heap(), heap()->old_data_space()),
612  new SweeperTask(heap(), heap()->old_pointer_space()),
614  }
615 }
616 
617 
619  ASSERT(sweeping_pending_ == true);
620  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
622  }
623  if (FLAG_job_based_sweeping) {
624  // Wait twice for both jobs.
625  pending_sweeper_jobs_semaphore_.Wait();
626  pending_sweeper_jobs_semaphore_.Wait();
627  }
628  ParallelSweepSpacesComplete();
629  sweeping_pending_ = false;
630  RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
631  RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
634 }
635 
636 
638  FreeList* free_list;
639 
640  if (space == heap()->old_pointer_space()) {
641  free_list = free_list_old_pointer_space_.get();
642  } else if (space == heap()->old_data_space()) {
643  free_list = free_list_old_data_space_.get();
644  } else {
645  // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
646  // to only refill them for old data and pointer spaces.
647  return 0;
648  }
649 
650  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
651  space->AddToAccountingStats(freed_bytes);
652  space->DecrementUnsweptFreeBytes(freed_bytes);
653  return freed_bytes;
654 }
655 
656 
658  return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
659 }
660 
661 
663  return sweeping_pending_;
664 }
665 
666 
667 void Marking::TransferMark(Address old_start, Address new_start) {
668  // This is only used when resizing an object.
669  ASSERT(MemoryChunk::FromAddress(old_start) ==
670  MemoryChunk::FromAddress(new_start));
671 
672  if (!heap_->incremental_marking()->IsMarking()) return;
673 
674  // If the mark doesn't move, we don't check the color of the object.
675  // It doesn't matter whether the object is black, since it hasn't changed
676  // size, so the adjustment to the live data count will be zero anyway.
677  if (old_start == new_start) return;
678 
679  MarkBit new_mark_bit = MarkBitFrom(new_start);
680  MarkBit old_mark_bit = MarkBitFrom(old_start);
681 
682 #ifdef DEBUG
683  ObjectColor old_color = Color(old_mark_bit);
684 #endif
685 
686  if (Marking::IsBlack(old_mark_bit)) {
687  old_mark_bit.Clear();
688  ASSERT(IsWhite(old_mark_bit));
689  Marking::MarkBlack(new_mark_bit);
690  return;
691  } else if (Marking::IsGrey(old_mark_bit)) {
692  old_mark_bit.Clear();
693  old_mark_bit.Next().Clear();
694  ASSERT(IsWhite(old_mark_bit));
696  HeapObject::FromAddress(new_start), new_mark_bit);
698  }
699 
700 #ifdef DEBUG
701  ObjectColor new_color = Color(new_mark_bit);
702  ASSERT(new_color == old_color);
703 #endif
704 }
705 
706 
708  switch (space) {
709  case NEW_SPACE: return "NEW_SPACE";
710  case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
711  case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
712  case CODE_SPACE: return "CODE_SPACE";
713  case MAP_SPACE: return "MAP_SPACE";
714  case CELL_SPACE: return "CELL_SPACE";
715  case PROPERTY_CELL_SPACE:
716  return "PROPERTY_CELL_SPACE";
717  case LO_SPACE: return "LO_SPACE";
718  default:
719  UNREACHABLE();
720  }
721 
722  return NULL;
723 }
724 
725 
726 // Returns zero for pages that have so little fragmentation that it is not
727 // worth defragmenting them. Otherwise a positive integer that gives an
728 // estimate of fragmentation on an arbitrary scale.
729 static int FreeListFragmentation(PagedSpace* space, Page* p) {
730  // If page was not swept then there are no free list items on it.
731  if (!p->WasSwept()) {
732  if (FLAG_trace_fragmentation) {
733  PrintF("%p [%s]: %d bytes live (unswept)\n",
734  reinterpret_cast<void*>(p),
735  AllocationSpaceName(space->identity()),
736  p->LiveBytes());
737  }
738  return 0;
739  }
740 
741  PagedSpace::SizeStats sizes;
742  space->ObtainFreeListStatistics(p, &sizes);
743 
744  intptr_t ratio;
745  intptr_t ratio_threshold;
746  intptr_t area_size = space->AreaSize();
747  if (space->identity() == CODE_SPACE) {
748  ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
749  area_size;
750  ratio_threshold = 10;
751  } else {
752  ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
753  area_size;
754  ratio_threshold = 15;
755  }
756 
757  if (FLAG_trace_fragmentation) {
758  PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
759  reinterpret_cast<void*>(p),
760  AllocationSpaceName(space->identity()),
761  static_cast<int>(sizes.small_size_),
762  static_cast<double>(sizes.small_size_ * 100) /
763  area_size,
764  static_cast<int>(sizes.medium_size_),
765  static_cast<double>(sizes.medium_size_ * 100) /
766  area_size,
767  static_cast<int>(sizes.large_size_),
768  static_cast<double>(sizes.large_size_ * 100) /
769  area_size,
770  static_cast<int>(sizes.huge_size_),
771  static_cast<double>(sizes.huge_size_ * 100) /
772  area_size,
773  (ratio > ratio_threshold) ? "[fragmented]" : "");
774  }
775 
776  if (FLAG_always_compact && sizes.Total() != area_size) {
777  return 1;
778  }
779 
780  if (ratio <= ratio_threshold) return 0; // Not fragmented.
781 
782  return static_cast<int>(ratio - ratio_threshold);
783 }
784 
785 
787  ASSERT(space->identity() == OLD_POINTER_SPACE ||
788  space->identity() == OLD_DATA_SPACE ||
789  space->identity() == CODE_SPACE);
790 
791  static const int kMaxMaxEvacuationCandidates = 1000;
792  int number_of_pages = space->CountTotalPages();
793  int max_evacuation_candidates =
794  static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
795 
796  if (FLAG_stress_compaction || FLAG_always_compact) {
797  max_evacuation_candidates = kMaxMaxEvacuationCandidates;
798  }
799 
800  class Candidate {
801  public:
802  Candidate() : fragmentation_(0), page_(NULL) { }
803  Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
804 
805  int fragmentation() { return fragmentation_; }
806  Page* page() { return page_; }
807 
808  private:
809  int fragmentation_;
810  Page* page_;
811  };
812 
813  enum CompactionMode {
814  COMPACT_FREE_LISTS,
815  REDUCE_MEMORY_FOOTPRINT
816  };
817 
818  CompactionMode mode = COMPACT_FREE_LISTS;
819 
820  intptr_t reserved = number_of_pages * space->AreaSize();
821  intptr_t over_reserved = reserved - space->SizeOfObjects();
822  static const intptr_t kFreenessThreshold = 50;
823 
824  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
825  // If reduction of memory footprint was requested, we are aggressive
826  // about choosing pages to free. We expect that half-empty pages
827  // are easier to compact so slightly bump the limit.
828  mode = REDUCE_MEMORY_FOOTPRINT;
829  max_evacuation_candidates += 2;
830  }
831 
832 
833  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
834  // If over-usage is very high (more than a third of the space), we
835  // try to free all mostly empty pages. We expect that almost empty
836  // pages are even easier to compact so bump the limit even more.
837  mode = REDUCE_MEMORY_FOOTPRINT;
838  max_evacuation_candidates *= 2;
839  }
840 
841  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
842  PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
843  "evacuation candidate limit: %d\n",
844  static_cast<double>(over_reserved) / MB,
845  static_cast<double>(reserved) / MB,
846  static_cast<int>(kFreenessThreshold),
847  max_evacuation_candidates);
848  }
849 
850  intptr_t estimated_release = 0;
851 
852  Candidate candidates[kMaxMaxEvacuationCandidates];
853 
854  max_evacuation_candidates =
855  Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
856 
857  int count = 0;
858  int fragmentation = 0;
859  Candidate* least = NULL;
860 
861  PageIterator it(space);
862  if (it.has_next()) it.next(); // Never compact the first page.
863 
864  while (it.has_next()) {
865  Page* p = it.next();
867 
868  if (FLAG_stress_compaction) {
869  unsigned int counter = space->heap()->ms_count();
870  uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
871  if ((counter & 1) == (page_number & 1)) fragmentation = 1;
872  } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
873  // Don't try to release too many pages.
874  if (estimated_release >= over_reserved) {
875  continue;
876  }
877 
878  intptr_t free_bytes = 0;
879 
880  if (!p->WasSwept()) {
881  free_bytes = (p->area_size() - p->LiveBytes());
882  } else {
883  PagedSpace::SizeStats sizes;
884  space->ObtainFreeListStatistics(p, &sizes);
885  free_bytes = sizes.Total();
886  }
887 
888  int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
889 
890  if (free_pct >= kFreenessThreshold) {
891  estimated_release += free_bytes;
892  fragmentation = free_pct;
893  } else {
894  fragmentation = 0;
895  }
896 
897  if (FLAG_trace_fragmentation) {
898  PrintF("%p [%s]: %d (%.2f%%) free %s\n",
899  reinterpret_cast<void*>(p),
900  AllocationSpaceName(space->identity()),
901  static_cast<int>(free_bytes),
902  static_cast<double>(free_bytes * 100) / p->area_size(),
903  (fragmentation > 0) ? "[fragmented]" : "");
904  }
905  } else {
906  fragmentation = FreeListFragmentation(space, p);
907  }
908 
909  if (fragmentation != 0) {
910  if (count < max_evacuation_candidates) {
911  candidates[count++] = Candidate(fragmentation, p);
912  } else {
913  if (least == NULL) {
914  for (int i = 0; i < max_evacuation_candidates; i++) {
915  if (least == NULL ||
916  candidates[i].fragmentation() < least->fragmentation()) {
917  least = candidates + i;
918  }
919  }
920  }
921  if (least->fragmentation() < fragmentation) {
922  *least = Candidate(fragmentation, p);
923  least = NULL;
924  }
925  }
926  }
927  }
928 
929  for (int i = 0; i < count; i++) {
930  AddEvacuationCandidate(candidates[i].page());
931  }
932 
933  if (count > 0 && FLAG_trace_fragmentation) {
934  PrintF("Collected %d evacuation candidates for space %s\n",
935  count,
936  AllocationSpaceName(space->identity()));
937  }
938 }
939 
940 
942  if (compacting_) {
943  int npages = evacuation_candidates_.length();
944  for (int i = 0; i < npages; i++) {
945  Page* p = evacuation_candidates_[i];
946  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
949  }
950  compacting_ = false;
951  evacuation_candidates_.Rewind(0);
952  invalidated_code_.Rewind(0);
953  }
954  ASSERT_EQ(0, evacuation_candidates_.length());
955 }
956 
957 
958 void MarkCompactCollector::Prepare(GCTracer* tracer) {
959  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
960 
961  // Rather than passing the tracer around we stash it in a static member
962  // variable.
963  tracer_ = tracer;
964 
965 #ifdef DEBUG
966  ASSERT(state_ == IDLE);
967  state_ = PREPARE_GC;
968 #endif
969 
970  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
971 
973  // Instead of waiting we could also abort the sweeper threads here.
975  }
976 
977  // Clear marking bits if incremental marking is aborted.
978  if (was_marked_incrementally_ && abort_incremental_marking_) {
980  ClearMarkbits();
981  AbortCompaction();
982  was_marked_incrementally_ = false;
983  }
984 
985  // Don't start compaction if we are in the middle of incremental
986  // marking cycle. We did not collect any slots.
987  if (!FLAG_never_compact && !was_marked_incrementally_) {
989  }
990 
991  PagedSpaces spaces(heap());
992  for (PagedSpace* space = spaces.next();
993  space != NULL;
994  space = spaces.next()) {
995  space->PrepareForMarkCompact();
996  }
997 
998 #ifdef VERIFY_HEAP
999  if (!was_marked_incrementally_ && FLAG_verify_heap) {
1000  VerifyMarkbitsAreClean();
1001  }
1002 #endif
1003 }
1004 
1005 
1006 void MarkCompactCollector::Finish() {
1007 #ifdef DEBUG
1008  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1009  state_ = IDLE;
1010 #endif
1011  // The stub cache is not traversed during GC; clear the cache to
1012  // force lazy re-initialization of it. This must be done after the
1013  // GC, because it relies on the new address of certain old space
1014  // objects (empty string, illegal builtin).
1015  isolate()->stub_cache()->Clear();
1016 
1017  if (have_code_to_deoptimize_) {
1018  // Some code objects were marked for deoptimization during the GC.
1020  have_code_to_deoptimize_ = false;
1021  }
1022 }
1023 
1024 
1025 // -------------------------------------------------------------------------
1026 // Phase 1: tracing and marking live objects.
1027 // before: all objects are in normal state.
1028 // after: a live object's map pointer is marked as '00'.
1029 
1030 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1031 // collection. Before marking, all objects are in their normal state. After
1032 // marking, live objects' map pointers are marked indicating that the object
1033 // has been found reachable.
1034 //
1035 // The marking algorithm is a (mostly) depth-first (because of possible stack
1036 // overflow) traversal of the graph of objects reachable from the roots. It
1037 // uses an explicit stack of pointers rather than recursion. The young
1038 // generation's inactive ('from') space is used as a marking stack. The
1039 // objects in the marking stack are the ones that have been reached and marked
1040 // but their children have not yet been visited.
1041 //
1042 // The marking stack can overflow during traversal. In that case, we set an
1043 // overflow flag. When the overflow flag is set, we continue marking objects
1044 // reachable from the objects on the marking stack, but no longer push them on
1045 // the marking stack. Instead, we mark them as both marked and overflowed.
1046 // When the stack is in the overflowed state, objects marked as overflowed
1047 // have been reached and marked but their children have not been visited yet.
1048 // After emptying the marking stack, we clear the overflow flag and traverse
1049 // the heap looking for objects marked as overflowed, push them on the stack,
1050 // and continue with marking. This process repeats until all reachable
1051 // objects have been marked.
1052 
1053 void CodeFlusher::ProcessJSFunctionCandidates() {
1054  Code* lazy_compile =
1055  isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1056  Object* undefined = isolate_->heap()->undefined_value();
1057 
1058  JSFunction* candidate = jsfunction_candidates_head_;
1059  JSFunction* next_candidate;
1060  while (candidate != NULL) {
1061  next_candidate = GetNextCandidate(candidate);
1062  ClearNextCandidate(candidate, undefined);
1063 
1064  SharedFunctionInfo* shared = candidate->shared();
1065 
1066  Code* code = shared->code();
1067  MarkBit code_mark = Marking::MarkBitFrom(code);
1068  if (!code_mark.Get()) {
1069  if (FLAG_trace_code_flushing && shared->is_compiled()) {
1070  PrintF("[code-flushing clears: ");
1071  shared->ShortPrint();
1072  PrintF(" - age: %d]\n", code->GetAge());
1073  }
1074  shared->set_code(lazy_compile);
1075  candidate->set_code(lazy_compile);
1076  } else {
1077  candidate->set_code(code);
1078  }
1079 
1080  // We are in the middle of a GC cycle so the write barrier in the code
1081  // setter did not record the slot update and we have to do that manually.
1082  Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1083  Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1084  isolate_->heap()->mark_compact_collector()->
1085  RecordCodeEntrySlot(slot, target);
1086 
1087  Object** shared_code_slot =
1089  isolate_->heap()->mark_compact_collector()->
1090  RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1091 
1092  candidate = next_candidate;
1093  }
1094 
1095  jsfunction_candidates_head_ = NULL;
1096 }
1097 
1098 
1099 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1100  Code* lazy_compile =
1101  isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1102 
1103  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1104  SharedFunctionInfo* next_candidate;
1105  while (candidate != NULL) {
1106  next_candidate = GetNextCandidate(candidate);
1107  ClearNextCandidate(candidate);
1108 
1109  Code* code = candidate->code();
1110  MarkBit code_mark = Marking::MarkBitFrom(code);
1111  if (!code_mark.Get()) {
1112  if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1113  PrintF("[code-flushing clears: ");
1114  candidate->ShortPrint();
1115  PrintF(" - age: %d]\n", code->GetAge());
1116  }
1117  candidate->set_code(lazy_compile);
1118  }
1119 
1120  Object** code_slot =
1122  isolate_->heap()->mark_compact_collector()->
1123  RecordSlot(code_slot, code_slot, *code_slot);
1124 
1125  candidate = next_candidate;
1126  }
1127 
1128  shared_function_info_candidates_head_ = NULL;
1129 }
1130 
1131 
1132 void CodeFlusher::ProcessOptimizedCodeMaps() {
1134 
1135  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1136  SharedFunctionInfo* next_holder;
1137 
1138  while (holder != NULL) {
1139  next_holder = GetNextCodeMap(holder);
1140  ClearNextCodeMap(holder);
1141 
1142  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1143  int new_length = SharedFunctionInfo::kEntriesStart;
1144  int old_length = code_map->length();
1145  for (int i = SharedFunctionInfo::kEntriesStart;
1146  i < old_length;
1148  Code* code =
1150  if (!Marking::MarkBitFrom(code).Get()) continue;
1151 
1152  // Move every slot in the entry.
1153  for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1154  int dst_index = new_length++;
1155  Object** slot = code_map->RawFieldOfElementAt(dst_index);
1156  Object* object = code_map->get(i + j);
1157  code_map->set(dst_index, object);
1159  ASSERT(object->IsSmi());
1160  } else {
1161  ASSERT(Marking::IsBlack(
1162  Marking::MarkBitFrom(HeapObject::cast(*slot))));
1163  isolate_->heap()->mark_compact_collector()->
1164  RecordSlot(slot, slot, *slot);
1165  }
1166  }
1167  }
1168 
1169  // Trim the optimized code map if entries have been removed.
1170  if (new_length < old_length) {
1171  holder->TrimOptimizedCodeMap(old_length - new_length);
1172  }
1173 
1174  holder = next_holder;
1175  }
1176 
1177  optimized_code_map_holder_head_ = NULL;
1178 }
1179 
1180 
1182  // Make sure previous flushing decisions are revisited.
1183  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1184 
1185  if (FLAG_trace_code_flushing) {
1186  PrintF("[code-flushing abandons function-info: ");
1187  shared_info->ShortPrint();
1188  PrintF("]\n");
1189  }
1190 
1191  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1192  SharedFunctionInfo* next_candidate;
1193  if (candidate == shared_info) {
1194  next_candidate = GetNextCandidate(shared_info);
1195  shared_function_info_candidates_head_ = next_candidate;
1196  ClearNextCandidate(shared_info);
1197  } else {
1198  while (candidate != NULL) {
1199  next_candidate = GetNextCandidate(candidate);
1200 
1201  if (next_candidate == shared_info) {
1202  next_candidate = GetNextCandidate(shared_info);
1203  SetNextCandidate(candidate, next_candidate);
1204  ClearNextCandidate(shared_info);
1205  break;
1206  }
1207 
1208  candidate = next_candidate;
1209  }
1210  }
1211 }
1212 
1213 
1215  ASSERT(!function->next_function_link()->IsUndefined());
1216  Object* undefined = isolate_->heap()->undefined_value();
1217 
1218  // Make sure previous flushing decisions are revisited.
1219  isolate_->heap()->incremental_marking()->RecordWrites(function);
1220  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1221 
1222  if (FLAG_trace_code_flushing) {
1223  PrintF("[code-flushing abandons closure: ");
1224  function->shared()->ShortPrint();
1225  PrintF("]\n");
1226  }
1227 
1228  JSFunction* candidate = jsfunction_candidates_head_;
1229  JSFunction* next_candidate;
1230  if (candidate == function) {
1231  next_candidate = GetNextCandidate(function);
1232  jsfunction_candidates_head_ = next_candidate;
1233  ClearNextCandidate(function, undefined);
1234  } else {
1235  while (candidate != NULL) {
1236  next_candidate = GetNextCandidate(candidate);
1237 
1238  if (next_candidate == function) {
1239  next_candidate = GetNextCandidate(function);
1240  SetNextCandidate(candidate, next_candidate);
1241  ClearNextCandidate(function, undefined);
1242  break;
1243  }
1244 
1245  candidate = next_candidate;
1246  }
1247  }
1248 }
1249 
1250 
1252  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1253  get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1254 
1255  // Make sure previous flushing decisions are revisited.
1256  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1257 
1258  if (FLAG_trace_code_flushing) {
1259  PrintF("[code-flushing abandons code-map: ");
1260  code_map_holder->ShortPrint();
1261  PrintF("]\n");
1262  }
1263 
1264  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1265  SharedFunctionInfo* next_holder;
1266  if (holder == code_map_holder) {
1267  next_holder = GetNextCodeMap(code_map_holder);
1268  optimized_code_map_holder_head_ = next_holder;
1269  ClearNextCodeMap(code_map_holder);
1270  } else {
1271  while (holder != NULL) {
1272  next_holder = GetNextCodeMap(holder);
1273 
1274  if (next_holder == code_map_holder) {
1275  next_holder = GetNextCodeMap(code_map_holder);
1276  SetNextCodeMap(holder, next_holder);
1277  ClearNextCodeMap(code_map_holder);
1278  break;
1279  }
1280 
1281  holder = next_holder;
1282  }
1283  }
1284 }
1285 
1286 
1287 void CodeFlusher::EvictJSFunctionCandidates() {
1288  JSFunction* candidate = jsfunction_candidates_head_;
1289  JSFunction* next_candidate;
1290  while (candidate != NULL) {
1291  next_candidate = GetNextCandidate(candidate);
1292  EvictCandidate(candidate);
1293  candidate = next_candidate;
1294  }
1295  ASSERT(jsfunction_candidates_head_ == NULL);
1296 }
1297 
1298 
1299 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1300  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1301  SharedFunctionInfo* next_candidate;
1302  while (candidate != NULL) {
1303  next_candidate = GetNextCandidate(candidate);
1304  EvictCandidate(candidate);
1305  candidate = next_candidate;
1306  }
1307  ASSERT(shared_function_info_candidates_head_ == NULL);
1308 }
1309 
1310 
1311 void CodeFlusher::EvictOptimizedCodeMaps() {
1312  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1313  SharedFunctionInfo* next_holder;
1314  while (holder != NULL) {
1315  next_holder = GetNextCodeMap(holder);
1316  EvictOptimizedCodeMap(holder);
1317  holder = next_holder;
1318  }
1319  ASSERT(optimized_code_map_holder_head_ == NULL);
1320 }
1321 
1322 
1324  Heap* heap = isolate_->heap();
1325 
1326  JSFunction** slot = &jsfunction_candidates_head_;
1327  JSFunction* candidate = jsfunction_candidates_head_;
1328  while (candidate != NULL) {
1329  if (heap->InFromSpace(candidate)) {
1330  v->VisitPointer(reinterpret_cast<Object**>(slot));
1331  }
1332  candidate = GetNextCandidate(*slot);
1333  slot = GetNextCandidateSlot(*slot);
1334  }
1335 }
1336 
1337 
1338 MarkCompactCollector::~MarkCompactCollector() {
1339  if (code_flusher_ != NULL) {
1340  delete code_flusher_;
1341  code_flusher_ = NULL;
1342  }
1343 }
1344 
1345 
1346 static inline HeapObject* ShortCircuitConsString(Object** p) {
1347  // Optimization: If the heap object pointed to by p is a non-internalized
1348  // cons string whose right substring is HEAP->empty_string, update
1349  // it in place to its left substring. Return the updated value.
1350  //
1351  // Here we assume that if we change *p, we replace it with a heap object
1352  // (i.e., the left substring of a cons string is always a heap object).
1353  //
1354  // The check performed is:
1355  // object->IsConsString() && !object->IsInternalizedString() &&
1356  // (ConsString::cast(object)->second() == HEAP->empty_string())
1357  // except the maps for the object and its possible substrings might be
1358  // marked.
1359  HeapObject* object = HeapObject::cast(*p);
1360  if (!FLAG_clever_optimizations) return object;
1361  Map* map = object->map();
1362  InstanceType type = map->instance_type();
1363  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1364 
1365  Object* second = reinterpret_cast<ConsString*>(object)->second();
1366  Heap* heap = map->GetHeap();
1367  if (second != heap->empty_string()) {
1368  return object;
1369  }
1370 
1371  // Since we don't have the object's start, it is impossible to update the
1372  // page dirty marks. Therefore, we only replace the string with its left
1373  // substring when page dirty marks do not change.
1374  Object* first = reinterpret_cast<ConsString*>(object)->first();
1375  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1376 
1377  *p = first;
1378  return HeapObject::cast(first);
1379 }
1380 
1381 
1383  : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1384  public:
1386  Map* map, HeapObject* obj);
1387 
1388  static void ObjectStatsCountFixedArray(
1389  FixedArrayBase* fixed_array,
1390  FixedArraySubInstanceType fast_type,
1391  FixedArraySubInstanceType dictionary_type);
1392 
1393  template<MarkCompactMarkingVisitor::VisitorId id>
1395  public:
1396  static inline void Visit(Map* map, HeapObject* obj);
1397  };
1398 
1399  static void Initialize();
1400 
1401  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1402  MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1403  }
1404 
1405  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1406  // Mark all objects pointed to in [start, end).
1407  const int kMinRangeForMarkingRecursion = 64;
1408  if (end - start >= kMinRangeForMarkingRecursion) {
1409  if (VisitUnmarkedObjects(heap, start, end)) return;
1410  // We are close to a stack overflow, so just mark the objects.
1411  }
1412  MarkCompactCollector* collector = heap->mark_compact_collector();
1413  for (Object** p = start; p < end; p++) {
1414  MarkObjectByPointer(collector, start, p);
1415  }
1416  }
1417 
1418  // Marks the object black and pushes it on the marking stack.
1419  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1420  MarkBit mark = Marking::MarkBitFrom(object);
1421  heap->mark_compact_collector()->MarkObject(object, mark);
1422  }
1423 
1424  // Marks the object black without pushing it on the marking stack.
1425  // Returns true if object needed marking and false otherwise.
1426  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1427  MarkBit mark_bit = Marking::MarkBitFrom(object);
1428  if (!mark_bit.Get()) {
1429  heap->mark_compact_collector()->SetMark(object, mark_bit);
1430  return true;
1431  }
1432  return false;
1433  }
1434 
1435  // Mark object pointed to by p.
1436  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1437  Object** anchor_slot,
1438  Object** p)) {
1439  if (!(*p)->IsHeapObject()) return;
1440  HeapObject* object = ShortCircuitConsString(p);
1441  collector->RecordSlot(anchor_slot, p, object);
1442  MarkBit mark = Marking::MarkBitFrom(object);
1443  collector->MarkObject(object, mark);
1444  }
1445 
1446 
1447  // Visit an unmarked object.
1448  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1449  HeapObject* obj)) {
1450 #ifdef DEBUG
1451  ASSERT(collector->heap()->Contains(obj));
1452  ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1453 #endif
1454  Map* map = obj->map();
1455  Heap* heap = obj->GetHeap();
1456  MarkBit mark = Marking::MarkBitFrom(obj);
1457  heap->mark_compact_collector()->SetMark(obj, mark);
1458  // Mark the map pointer and the body.
1459  MarkBit map_mark = Marking::MarkBitFrom(map);
1460  heap->mark_compact_collector()->MarkObject(map, map_mark);
1461  IterateBody(map, obj);
1462  }
1463 
1464  // Visit all unmarked objects pointed to by [start, end).
1465  // Returns false if the operation fails (lack of stack space).
1466  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1467  Object** start,
1468  Object** end)) {
1469  // Return false is we are close to the stack limit.
1470  StackLimitCheck check(heap->isolate());
1471  if (check.HasOverflowed()) return false;
1472 
1473  MarkCompactCollector* collector = heap->mark_compact_collector();
1474  // Visit the unmarked objects.
1475  for (Object** p = start; p < end; p++) {
1476  Object* o = *p;
1477  if (!o->IsHeapObject()) continue;
1478  collector->RecordSlot(start, p, o);
1479  HeapObject* obj = HeapObject::cast(o);
1480  MarkBit mark = Marking::MarkBitFrom(obj);
1481  if (mark.Get()) continue;
1482  VisitUnmarkedObject(collector, obj);
1483  }
1484  return true;
1485  }
1486 
1487  INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1488  SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1489  shared->BeforeVisitingPointers();
1490  }
1491 
1492  static void VisitWeakCollection(Map* map, HeapObject* object) {
1493  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1494  JSWeakCollection* weak_collection =
1495  reinterpret_cast<JSWeakCollection*>(object);
1496 
1497  // Enqueue weak map in linked list of encountered weak maps.
1498  if (weak_collection->next() == Smi::FromInt(0)) {
1499  weak_collection->set_next(collector->encountered_weak_collections());
1500  collector->set_encountered_weak_collections(weak_collection);
1501  }
1502 
1503  // Skip visiting the backing hash table containing the mappings.
1504  int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1506  map->GetHeap(),
1507  object,
1511  map->GetHeap(),
1512  object,
1514  object_size);
1515 
1516  // Mark the backing hash table without pushing it on the marking stack.
1517  Object* table_object = weak_collection->table();
1518  if (!table_object->IsHashTable()) return;
1519  WeakHashTable* table = WeakHashTable::cast(table_object);
1520  Object** table_slot =
1522  MarkBit table_mark = Marking::MarkBitFrom(table);
1523  collector->RecordSlot(table_slot, table_slot, table);
1524  if (!table_mark.Get()) collector->SetMark(table, table_mark);
1525  // Recording the map slot can be skipped, because maps are not compacted.
1526  collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1528  }
1529 
1530  private:
1531  template<int id>
1532  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1533 
1534  // Code flushing support.
1535 
1536  static const int kRegExpCodeThreshold = 5;
1537 
1538  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1539  JSRegExp* re,
1540  bool is_ascii) {
1541  // Make sure that the fixed array is in fact initialized on the RegExp.
1542  // We could potentially trigger a GC when initializing the RegExp.
1543  if (HeapObject::cast(re->data())->map()->instance_type() !=
1544  FIXED_ARRAY_TYPE) return;
1545 
1546  // Make sure this is a RegExp that actually contains code.
1547  if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1548 
1549  Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1550  if (!code->IsSmi() &&
1551  HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1552  // Save a copy that can be reinstated if we need the code again.
1553  re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1554 
1555  // Saving a copy might create a pointer into compaction candidate
1556  // that was not observed by marker. This might happen if JSRegExp data
1557  // was marked through the compilation cache before marker reached JSRegExp
1558  // object.
1559  FixedArray* data = FixedArray::cast(re->data());
1560  Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1561  heap->mark_compact_collector()->
1562  RecordSlot(slot, slot, code);
1563 
1564  // Set a number in the 0-255 range to guarantee no smi overflow.
1565  re->SetDataAt(JSRegExp::code_index(is_ascii),
1566  Smi::FromInt(heap->sweep_generation() & 0xff));
1567  } else if (code->IsSmi()) {
1568  int value = Smi::cast(code)->value();
1569  // The regexp has not been compiled yet or there was a compilation error.
1570  if (value == JSRegExp::kUninitializedValue ||
1572  return;
1573  }
1574 
1575  // Check if we should flush now.
1576  if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1577  re->SetDataAt(JSRegExp::code_index(is_ascii),
1579  re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1581  }
1582  }
1583  }
1584 
1585 
1586  // Works by setting the current sweep_generation (as a smi) in the
1587  // code object place in the data array of the RegExp and keeps a copy
1588  // around that can be reinstated if we reuse the RegExp before flushing.
1589  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1590  // we flush the code.
1591  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1592  Heap* heap = map->GetHeap();
1593  MarkCompactCollector* collector = heap->mark_compact_collector();
1594  if (!collector->is_code_flushing_enabled()) {
1595  VisitJSRegExp(map, object);
1596  return;
1597  }
1598  JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1599  // Flush code or set age on both ASCII and two byte code.
1600  UpdateRegExpCodeAgeAndFlush(heap, re, true);
1601  UpdateRegExpCodeAgeAndFlush(heap, re, false);
1602  // Visit the fields of the RegExp, including the updated FixedArray.
1603  VisitJSRegExp(map, object);
1604  }
1605 
1606  static VisitorDispatchTable<Callback> non_count_table_;
1607 };
1608 
1609 
1611  FixedArrayBase* fixed_array,
1612  FixedArraySubInstanceType fast_type,
1613  FixedArraySubInstanceType dictionary_type) {
1614  Heap* heap = fixed_array->map()->GetHeap();
1615  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1616  fixed_array->map() != heap->fixed_double_array_map() &&
1617  fixed_array != heap->empty_fixed_array()) {
1618  if (fixed_array->IsDictionary()) {
1619  heap->RecordFixedArraySubTypeStats(dictionary_type,
1620  fixed_array->Size());
1621  } else {
1622  heap->RecordFixedArraySubTypeStats(fast_type,
1623  fixed_array->Size());
1624  }
1625  }
1626 }
1627 
1628 
1631  Heap* heap = map->GetHeap();
1632  int object_size = obj->Size();
1633  heap->RecordObjectStats(map->instance_type(), object_size);
1634  non_count_table_.GetVisitorById(id)(map, obj);
1635  if (obj->IsJSObject()) {
1636  JSObject* object = JSObject::cast(obj);
1637  ObjectStatsCountFixedArray(object->elements(),
1638  DICTIONARY_ELEMENTS_SUB_TYPE,
1639  FAST_ELEMENTS_SUB_TYPE);
1640  ObjectStatsCountFixedArray(object->properties(),
1641  DICTIONARY_PROPERTIES_SUB_TYPE,
1642  FAST_PROPERTIES_SUB_TYPE);
1643  }
1644 }
1645 
1646 
1647 template<MarkCompactMarkingVisitor::VisitorId id>
1649  Map* map, HeapObject* obj) {
1650  ObjectStatsVisitBase(id, map, obj);
1651 }
1652 
1653 
1654 template<>
1656  MarkCompactMarkingVisitor::kVisitMap> {
1657  public:
1658  static inline void Visit(Map* map, HeapObject* obj) {
1659  Heap* heap = map->GetHeap();
1660  Map* map_obj = Map::cast(obj);
1661  ASSERT(map->instance_type() == MAP_TYPE);
1662  DescriptorArray* array = map_obj->instance_descriptors();
1663  if (map_obj->owns_descriptors() &&
1664  array != heap->empty_descriptor_array()) {
1665  int fixed_array_size = array->Size();
1666  heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1667  fixed_array_size);
1668  }
1669  if (map_obj->HasTransitionArray()) {
1670  int fixed_array_size = map_obj->transitions()->Size();
1671  heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1672  fixed_array_size);
1673  }
1674  if (map_obj->has_code_cache()) {
1675  CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1676  heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1677  cache->default_cache()->Size());
1678  if (!cache->normal_type_cache()->IsUndefined()) {
1680  MAP_CODE_CACHE_SUB_TYPE,
1681  FixedArray::cast(cache->normal_type_cache())->Size());
1682  }
1683  }
1684  ObjectStatsVisitBase(kVisitMap, map, obj);
1685  }
1686 };
1687 
1688 
1689 template<>
1691  MarkCompactMarkingVisitor::kVisitCode> {
1692  public:
1693  static inline void Visit(Map* map, HeapObject* obj) {
1694  Heap* heap = map->GetHeap();
1695  int object_size = obj->Size();
1696  ASSERT(map->instance_type() == CODE_TYPE);
1697  Code* code_obj = Code::cast(obj);
1698  heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1699  object_size);
1700  ObjectStatsVisitBase(kVisitCode, map, obj);
1701  }
1702 };
1703 
1704 
1705 template<>
1707  MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1708  public:
1709  static inline void Visit(Map* map, HeapObject* obj) {
1710  Heap* heap = map->GetHeap();
1712  if (sfi->scope_info() != heap->empty_fixed_array()) {
1714  SCOPE_INFO_SUB_TYPE,
1715  FixedArray::cast(sfi->scope_info())->Size());
1716  }
1717  ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1718  }
1719 };
1720 
1721 
1722 template<>
1724  MarkCompactMarkingVisitor::kVisitFixedArray> {
1725  public:
1726  static inline void Visit(Map* map, HeapObject* obj) {
1727  Heap* heap = map->GetHeap();
1728  FixedArray* fixed_array = FixedArray::cast(obj);
1729  if (fixed_array == heap->string_table()) {
1731  STRING_TABLE_SUB_TYPE,
1732  fixed_array->Size());
1733  }
1734  ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1735  }
1736 };
1737 
1738 
1741 
1742  table_.Register(kVisitJSRegExp,
1743  &VisitRegExpAndFlushCode);
1744 
1745  if (FLAG_track_gc_object_stats) {
1746  // Copy the visitor table to make call-through possible.
1747  non_count_table_.CopyFrom(&table_);
1748 #define VISITOR_ID_COUNT_FUNCTION(id) \
1749  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1751 #undef VISITOR_ID_COUNT_FUNCTION
1752  }
1753 }
1754 
1755 
1757  MarkCompactMarkingVisitor::non_count_table_;
1758 
1759 
1761  public:
1763  : collector_(collector) {}
1764 
1765  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1766  collector_->PrepareThreadForCodeFlushing(isolate, top);
1767  }
1768 
1769  private:
1770  MarkCompactCollector* collector_;
1771 };
1772 
1773 
1774 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1775  public:
1777  : collector_(collector) {}
1778 
1779  void VisitPointers(Object** start, Object** end) {
1780  for (Object** p = start; p < end; p++) VisitPointer(p);
1781  }
1782 
1783  void VisitPointer(Object** slot) {
1784  Object* obj = *slot;
1785  if (obj->IsSharedFunctionInfo()) {
1786  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1787  MarkBit shared_mark = Marking::MarkBitFrom(shared);
1788  MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1789  collector_->MarkObject(shared->code(), code_mark);
1790  collector_->MarkObject(shared, shared_mark);
1791  }
1792  }
1793 
1794  private:
1795  MarkCompactCollector* collector_;
1796 };
1797 
1798 
1799 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1800  ThreadLocalTop* top) {
1801  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1802  // Note: for the frame that has a pending lazy deoptimization
1803  // StackFrame::unchecked_code will return a non-optimized code object for
1804  // the outermost function and StackFrame::LookupCode will return
1805  // actual optimized code object.
1806  StackFrame* frame = it.frame();
1807  Code* code = frame->unchecked_code();
1808  MarkBit code_mark = Marking::MarkBitFrom(code);
1809  MarkObject(code, code_mark);
1810  if (frame->is_optimized()) {
1812  frame->LookupCode());
1813  }
1814  }
1815 }
1816 
1817 
1818 void MarkCompactCollector::PrepareForCodeFlushing() {
1819  // Enable code flushing for non-incremental cycles.
1820  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1821  EnableCodeFlushing(!was_marked_incrementally_);
1822  }
1823 
1824  // If code flushing is disabled, there is no need to prepare for it.
1825  if (!is_code_flushing_enabled()) return;
1826 
1827  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1828  // relies on it being marked before any other descriptor array.
1829  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1830  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1831  MarkObject(descriptor_array, descriptor_array_mark);
1832 
1833  // Make sure we are not referencing the code from the stack.
1834  ASSERT(this == heap()->mark_compact_collector());
1835  PrepareThreadForCodeFlushing(heap()->isolate(),
1836  heap()->isolate()->thread_local_top());
1837 
1838  // Iterate the archived stacks in all threads to check if
1839  // the code is referenced.
1840  CodeMarkingVisitor code_marking_visitor(this);
1842  &code_marking_visitor);
1843 
1844  SharedFunctionInfoMarkingVisitor visitor(this);
1845  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1846  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1847 
1848  ProcessMarkingDeque();
1849 }
1850 
1851 
1852 // Visitor class for marking heap roots.
1853 class RootMarkingVisitor : public ObjectVisitor {
1854  public:
1855  explicit RootMarkingVisitor(Heap* heap)
1856  : collector_(heap->mark_compact_collector()) { }
1857 
1858  void VisitPointer(Object** p) {
1859  MarkObjectByPointer(p);
1860  }
1861 
1862  void VisitPointers(Object** start, Object** end) {
1863  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1864  }
1865 
1866  // Skip the weak next code link in a code object, which is visited in
1867  // ProcessTopOptimizedFrame.
1869 
1870  private:
1871  void MarkObjectByPointer(Object** p) {
1872  if (!(*p)->IsHeapObject()) return;
1873 
1874  // Replace flat cons strings in place.
1875  HeapObject* object = ShortCircuitConsString(p);
1876  MarkBit mark_bit = Marking::MarkBitFrom(object);
1877  if (mark_bit.Get()) return;
1878 
1879  Map* map = object->map();
1880  // Mark the object.
1881  collector_->SetMark(object, mark_bit);
1882 
1883  // Mark the map pointer and body, and push them on the marking stack.
1884  MarkBit map_mark = Marking::MarkBitFrom(map);
1885  collector_->MarkObject(map, map_mark);
1886  MarkCompactMarkingVisitor::IterateBody(map, object);
1887 
1888  // Mark all the objects reachable from the map and body. May leave
1889  // overflowed objects in the heap.
1890  collector_->EmptyMarkingDeque();
1891  }
1892 
1893  MarkCompactCollector* collector_;
1894 };
1895 
1896 
1897 // Helper class for pruning the string table.
1898 template<bool finalize_external_strings>
1899 class StringTableCleaner : public ObjectVisitor {
1900  public:
1901  explicit StringTableCleaner(Heap* heap)
1902  : heap_(heap), pointers_removed_(0) { }
1903 
1904  virtual void VisitPointers(Object** start, Object** end) {
1905  // Visit all HeapObject pointers in [start, end).
1906  for (Object** p = start; p < end; p++) {
1907  Object* o = *p;
1908  if (o->IsHeapObject() &&
1909  !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1910  if (finalize_external_strings) {
1911  ASSERT(o->IsExternalString());
1913  } else {
1914  pointers_removed_++;
1915  }
1916  // Set the entry to the_hole_value (as deleted).
1917  *p = heap_->the_hole_value();
1918  }
1919  }
1920  }
1921 
1923  ASSERT(!finalize_external_strings);
1924  return pointers_removed_;
1925  }
1926 
1927  private:
1928  Heap* heap_;
1929  int pointers_removed_;
1930 };
1931 
1932 
1935 
1936 
1937 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1938 // are retained.
1940  public:
1941  virtual Object* RetainAs(Object* object) {
1942  if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1943  return object;
1944  } else if (object->IsAllocationSite() &&
1945  !(AllocationSite::cast(object)->IsZombie())) {
1946  // "dead" AllocationSites need to live long enough for a traversal of new
1947  // space. These sites get a one-time reprieve.
1948  AllocationSite* site = AllocationSite::cast(object);
1949  site->MarkZombie();
1951  return object;
1952  } else {
1953  return NULL;
1954  }
1955  }
1956 };
1957 
1958 
1959 // Fill the marking stack with overflowed objects returned by the given
1960 // iterator. Stop when the marking stack is filled or the end of the space
1961 // is reached, whichever comes first.
1962 template<class T>
1963 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1964  MarkingDeque* marking_deque,
1965  T* it) {
1966  // The caller should ensure that the marking stack is initially not full,
1967  // so that we don't waste effort pointlessly scanning for objects.
1968  ASSERT(!marking_deque->IsFull());
1969 
1970  Map* filler_map = heap->one_pointer_filler_map();
1971  for (HeapObject* object = it->Next();
1972  object != NULL;
1973  object = it->Next()) {
1974  MarkBit markbit = Marking::MarkBitFrom(object);
1975  if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1976  Marking::GreyToBlack(markbit);
1977  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1978  marking_deque->PushBlack(object);
1979  if (marking_deque->IsFull()) return;
1980  }
1981  }
1982 }
1983 
1984 
1985 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1986 
1987 
1988 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1989  MemoryChunk* p) {
1990  ASSERT(!marking_deque->IsFull());
1991  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1992  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1993  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1994  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1995 
1996  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1997  Address cell_base = it.CurrentCellBase();
1998  MarkBit::CellType* cell = it.CurrentCell();
1999 
2000  const MarkBit::CellType current_cell = *cell;
2001  if (current_cell == 0) continue;
2002 
2003  MarkBit::CellType grey_objects;
2004  if (it.HasNext()) {
2005  const MarkBit::CellType next_cell = *(cell+1);
2006  grey_objects = current_cell &
2007  ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2008  } else {
2009  grey_objects = current_cell & (current_cell >> 1);
2010  }
2011 
2012  int offset = 0;
2013  while (grey_objects != 0) {
2014  int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2015  grey_objects >>= trailing_zeros;
2016  offset += trailing_zeros;
2017  MarkBit markbit(cell, 1 << offset, false);
2018  ASSERT(Marking::IsGrey(markbit));
2019  Marking::GreyToBlack(markbit);
2020  Address addr = cell_base + offset * kPointerSize;
2021  HeapObject* object = HeapObject::FromAddress(addr);
2022  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2023  marking_deque->PushBlack(object);
2024  if (marking_deque->IsFull()) return;
2025  offset += 2;
2026  grey_objects >>= 2;
2027  }
2028 
2029  grey_objects >>= (Bitmap::kBitsPerCell - 1);
2030  }
2031 }
2032 
2033 
2034 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
2035  NewSpace* new_space,
2036  NewSpacePage* p) {
2037  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2038  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2039  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2040  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2041 
2042  MarkBit::CellType* cells = p->markbits()->cells();
2043  int survivors_size = 0;
2044 
2045  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2046  Address cell_base = it.CurrentCellBase();
2047  MarkBit::CellType* cell = it.CurrentCell();
2048 
2049  MarkBit::CellType current_cell = *cell;
2050  if (current_cell == 0) continue;
2051 
2052  int offset = 0;
2053  while (current_cell != 0) {
2054  int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2055  current_cell >>= trailing_zeros;
2056  offset += trailing_zeros;
2057  Address address = cell_base + offset * kPointerSize;
2058  HeapObject* object = HeapObject::FromAddress(address);
2059 
2060  int size = object->Size();
2061  survivors_size += size;
2062 
2064 
2065  offset++;
2066  current_cell >>= 1;
2067  // Aggressively promote young survivors to the old space.
2068  if (TryPromoteObject(object, size)) {
2069  continue;
2070  }
2071 
2072  // Promotion failed. Just migrate object to another semispace.
2073  MaybeObject* allocation = new_space->AllocateRaw(size);
2074  if (allocation->IsFailure()) {
2075  if (!new_space->AddFreshPage()) {
2076  // Shouldn't happen. We are sweeping linearly, and to-space
2077  // has the same number of pages as from-space, so there is
2078  // always room.
2079  UNREACHABLE();
2080  }
2081  allocation = new_space->AllocateRaw(size);
2082  ASSERT(!allocation->IsFailure());
2083  }
2084  Object* target = allocation->ToObjectUnchecked();
2085 
2087  object,
2088  size,
2089  NEW_SPACE);
2090  }
2091  *cells = 0;
2092  }
2093  return survivors_size;
2094 }
2095 
2096 
2097 static void DiscoverGreyObjectsInSpace(Heap* heap,
2098  MarkingDeque* marking_deque,
2099  PagedSpace* space) {
2100  if (!space->was_swept_conservatively()) {
2101  HeapObjectIterator it(space);
2102  DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2103  } else {
2104  PageIterator it(space);
2105  while (it.has_next()) {
2106  Page* p = it.next();
2107  DiscoverGreyObjectsOnPage(marking_deque, p);
2108  if (marking_deque->IsFull()) return;
2109  }
2110  }
2111 }
2112 
2113 
2114 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2115  MarkingDeque* marking_deque) {
2116  NewSpace* space = heap->new_space();
2117  NewSpacePageIterator it(space->bottom(), space->top());
2118  while (it.has_next()) {
2119  NewSpacePage* page = it.next();
2120  DiscoverGreyObjectsOnPage(marking_deque, page);
2121  if (marking_deque->IsFull()) return;
2122  }
2123 }
2124 
2125 
2126 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2127  Object* o = *p;
2128  if (!o->IsHeapObject()) return false;
2129  HeapObject* heap_object = HeapObject::cast(o);
2130  MarkBit mark = Marking::MarkBitFrom(heap_object);
2131  return !mark.Get();
2132 }
2133 
2134 
2135 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2136  Object** p) {
2137  Object* o = *p;
2138  ASSERT(o->IsHeapObject());
2139  HeapObject* heap_object = HeapObject::cast(o);
2140  MarkBit mark = Marking::MarkBitFrom(heap_object);
2141  return !mark.Get();
2142 }
2143 
2144 
2145 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2146  StringTable* string_table = heap()->string_table();
2147  // Mark the string table itself.
2148  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2149  SetMark(string_table, string_table_mark);
2150  // Explicitly mark the prefix.
2151  string_table->IteratePrefix(visitor);
2152  ProcessMarkingDeque();
2153 }
2154 
2155 
2157  MarkBit mark_bit = Marking::MarkBitFrom(site);
2158  SetMark(site, mark_bit);
2159 }
2160 
2161 
2162 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2163  // Mark the heap roots including global variables, stack variables,
2164  // etc., and all objects reachable from them.
2166 
2167  // Handle the string table specially.
2168  MarkStringTable(visitor);
2169 
2171 
2172  // There may be overflowed objects in the heap. Visit them now.
2173  while (marking_deque_.overflowed()) {
2174  RefillMarkingDeque();
2175  EmptyMarkingDeque();
2176  }
2177 }
2178 
2179 
2180 void MarkCompactCollector::MarkImplicitRefGroups() {
2181  List<ImplicitRefGroup*>* ref_groups =
2183 
2184  int last = 0;
2185  for (int i = 0; i < ref_groups->length(); i++) {
2186  ImplicitRefGroup* entry = ref_groups->at(i);
2187  ASSERT(entry != NULL);
2188 
2189  if (!IsMarked(*entry->parent)) {
2190  (*ref_groups)[last++] = entry;
2191  continue;
2192  }
2193 
2194  Object*** children = entry->children;
2195  // A parent object is marked, so mark all child heap objects.
2196  for (size_t j = 0; j < entry->length; ++j) {
2197  if ((*children[j])->IsHeapObject()) {
2198  HeapObject* child = HeapObject::cast(*children[j]);
2199  MarkBit mark = Marking::MarkBitFrom(child);
2200  MarkObject(child, mark);
2201  }
2202  }
2203 
2204  // Once the entire group has been marked, dispose it because it's
2205  // not needed anymore.
2206  delete entry;
2207  }
2208  ref_groups->Rewind(last);
2209 }
2210 
2211 
2213  HeapObject* weak_object_to_code_table =
2214  HeapObject::cast(heap()->weak_object_to_code_table());
2215  if (!IsMarked(weak_object_to_code_table)) {
2216  MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2217  SetMark(weak_object_to_code_table, mark);
2218  }
2219 }
2220 
2221 
2222 // Mark all objects reachable from the objects on the marking stack.
2223 // Before: the marking stack contains zero or more heap object pointers.
2224 // After: the marking stack is empty, and all objects reachable from the
2225 // marking stack have been marked, or are overflowed in the heap.
2226 void MarkCompactCollector::EmptyMarkingDeque() {
2227  while (!marking_deque_.IsEmpty()) {
2228  HeapObject* object = marking_deque_.Pop();
2229  ASSERT(object->IsHeapObject());
2230  ASSERT(heap()->Contains(object));
2231  ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2232 
2233  Map* map = object->map();
2234  MarkBit map_mark = Marking::MarkBitFrom(map);
2235  MarkObject(map, map_mark);
2236 
2237  MarkCompactMarkingVisitor::IterateBody(map, object);
2238  }
2239 }
2240 
2241 
2242 // Sweep the heap for overflowed objects, clear their overflow bits, and
2243 // push them on the marking stack. Stop early if the marking stack fills
2244 // before sweeping completes. If sweeping completes, there are no remaining
2245 // overflowed objects in the heap so the overflow flag on the markings stack
2246 // is cleared.
2247 void MarkCompactCollector::RefillMarkingDeque() {
2248  ASSERT(marking_deque_.overflowed());
2249 
2250  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2251  if (marking_deque_.IsFull()) return;
2252 
2253  DiscoverGreyObjectsInSpace(heap(),
2254  &marking_deque_,
2255  heap()->old_pointer_space());
2256  if (marking_deque_.IsFull()) return;
2257 
2258  DiscoverGreyObjectsInSpace(heap(),
2259  &marking_deque_,
2260  heap()->old_data_space());
2261  if (marking_deque_.IsFull()) return;
2262 
2263  DiscoverGreyObjectsInSpace(heap(),
2264  &marking_deque_,
2265  heap()->code_space());
2266  if (marking_deque_.IsFull()) return;
2267 
2268  DiscoverGreyObjectsInSpace(heap(),
2269  &marking_deque_,
2270  heap()->map_space());
2271  if (marking_deque_.IsFull()) return;
2272 
2273  DiscoverGreyObjectsInSpace(heap(),
2274  &marking_deque_,
2275  heap()->cell_space());
2276  if (marking_deque_.IsFull()) return;
2277 
2278  DiscoverGreyObjectsInSpace(heap(),
2279  &marking_deque_,
2280  heap()->property_cell_space());
2281  if (marking_deque_.IsFull()) return;
2282 
2283  LargeObjectIterator lo_it(heap()->lo_space());
2284  DiscoverGreyObjectsWithIterator(heap(),
2285  &marking_deque_,
2286  &lo_it);
2287  if (marking_deque_.IsFull()) return;
2288 
2289  marking_deque_.ClearOverflowed();
2290 }
2291 
2292 
2293 // Mark all objects reachable (transitively) from objects on the marking
2294 // stack. Before: the marking stack contains zero or more heap object
2295 // pointers. After: the marking stack is empty and there are no overflowed
2296 // objects in the heap.
2297 void MarkCompactCollector::ProcessMarkingDeque() {
2298  EmptyMarkingDeque();
2299  while (marking_deque_.overflowed()) {
2300  RefillMarkingDeque();
2301  EmptyMarkingDeque();
2302  }
2303 }
2304 
2305 
2306 // Mark all objects reachable (transitively) from objects on the marking
2307 // stack including references only considered in the atomic marking pause.
2308 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2309  bool work_to_do = true;
2310  ASSERT(marking_deque_.IsEmpty());
2311  while (work_to_do) {
2313  visitor, &IsUnmarkedHeapObjectWithHeap);
2314  MarkImplicitRefGroups();
2315  ProcessWeakCollections();
2316  work_to_do = !marking_deque_.IsEmpty();
2317  ProcessMarkingDeque();
2318  }
2319 }
2320 
2321 
2322 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2323  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2324  !it.done(); it.Advance()) {
2325  if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2326  return;
2327  }
2328  if (it.frame()->type() == StackFrame::OPTIMIZED) {
2329  Code* code = it.frame()->LookupCode();
2330  if (!code->CanDeoptAt(it.frame()->pc())) {
2331  code->CodeIterateBody(visitor);
2332  }
2333  ProcessMarkingDeque();
2334  return;
2335  }
2336  }
2337 }
2338 
2339 
2340 void MarkCompactCollector::MarkLiveObjects() {
2341  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2342  // The recursive GC marker detects when it is nearing stack overflow,
2343  // and switches to a different marking system. JS interrupts interfere
2344  // with the C stack limit check.
2345  PostponeInterruptsScope postpone(isolate());
2346 
2347  bool incremental_marking_overflowed = false;
2348  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2349  if (was_marked_incrementally_) {
2350  // Finalize the incremental marking and check whether we had an overflow.
2351  // Both markers use grey color to mark overflowed objects so
2352  // non-incremental marker can deal with them as if overflow
2353  // occured during normal marking.
2354  // But incremental marker uses a separate marking deque
2355  // so we have to explicitly copy its overflow state.
2356  incremental_marking->Finalize();
2357  incremental_marking_overflowed =
2358  incremental_marking->marking_deque()->overflowed();
2359  incremental_marking->marking_deque()->ClearOverflowed();
2360  } else {
2361  // Abort any pending incremental activities e.g. incremental sweeping.
2362  incremental_marking->Abort();
2363  }
2364 
2365 #ifdef DEBUG
2366  ASSERT(state_ == PREPARE_GC);
2367  state_ = MARK_LIVE_OBJECTS;
2368 #endif
2369  // The to space contains live objects, a page in from space is used as a
2370  // marking stack.
2371  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2372  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2373  if (FLAG_force_marking_deque_overflows) {
2374  marking_deque_end = marking_deque_start + 64 * kPointerSize;
2375  }
2376  marking_deque_.Initialize(marking_deque_start,
2377  marking_deque_end);
2378  ASSERT(!marking_deque_.overflowed());
2379 
2380  if (incremental_marking_overflowed) {
2381  // There are overflowed objects left in the heap after incremental marking.
2382  marking_deque_.SetOverflowed();
2383  }
2384 
2385  PrepareForCodeFlushing();
2386 
2387  if (was_marked_incrementally_) {
2388  // There is no write barrier on cells so we have to scan them now at the end
2389  // of the incremental marking.
2390  {
2391  HeapObjectIterator cell_iterator(heap()->cell_space());
2392  HeapObject* cell;
2393  while ((cell = cell_iterator.Next()) != NULL) {
2394  ASSERT(cell->IsCell());
2395  if (IsMarked(cell)) {
2396  int offset = Cell::kValueOffset;
2397  MarkCompactMarkingVisitor::VisitPointer(
2398  heap(),
2399  reinterpret_cast<Object**>(cell->address() + offset));
2400  }
2401  }
2402  }
2403  {
2404  HeapObjectIterator js_global_property_cell_iterator(
2405  heap()->property_cell_space());
2406  HeapObject* cell;
2407  while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2408  ASSERT(cell->IsPropertyCell());
2409  if (IsMarked(cell)) {
2410  MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2411  }
2412  }
2413  }
2414  }
2415 
2416  RootMarkingVisitor root_visitor(heap());
2417  MarkRoots(&root_visitor);
2418 
2419  ProcessTopOptimizedFrame(&root_visitor);
2420 
2421  // The objects reachable from the roots are marked, yet unreachable
2422  // objects are unmarked. Mark objects reachable due to host
2423  // application specific logic or through Harmony weak maps.
2424  ProcessEphemeralMarking(&root_visitor);
2425 
2426  // The objects reachable from the roots, weak maps or object groups
2427  // are marked, yet unreachable objects are unmarked. Mark objects
2428  // reachable only from weak global handles.
2429  //
2430  // First we identify nonlive weak handles and mark them as pending
2431  // destruction.
2433  &IsUnmarkedHeapObject);
2434  // Then we mark the objects and process the transitive closure.
2435  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2436  while (marking_deque_.overflowed()) {
2437  RefillMarkingDeque();
2438  EmptyMarkingDeque();
2439  }
2440 
2441  // Repeat host application specific and Harmony weak maps marking to
2442  // mark unmarked objects reachable from the weak roots.
2443  ProcessEphemeralMarking(&root_visitor);
2444 
2445  AfterMarking();
2446 }
2447 
2448 
2449 void MarkCompactCollector::AfterMarking() {
2450  // Object literal map caches reference strings (cache keys) and maps
2451  // (cache values). At this point still useful maps have already been
2452  // marked. Mark the keys for the alive values before we process the
2453  // string table.
2454  ProcessMapCaches();
2455 
2456  // Prune the string table removing all strings only pointed to by the
2457  // string table. Cannot use string_table() here because the string
2458  // table is marked.
2459  StringTable* string_table = heap()->string_table();
2460  InternalizedStringTableCleaner internalized_visitor(heap());
2461  string_table->IterateElements(&internalized_visitor);
2462  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2463 
2464  ExternalStringTableCleaner external_visitor(heap());
2465  heap()->external_string_table_.Iterate(&external_visitor);
2466  heap()->external_string_table_.CleanUp();
2467 
2468  // Process the weak references.
2469  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2470  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2471 
2472  // Remove object groups after marking phase.
2475 
2476  // Flush code from collected candidates.
2477  if (is_code_flushing_enabled()) {
2478  code_flusher_->ProcessCandidates();
2479  // If incremental marker does not support code flushing, we need to
2480  // disable it before incremental marking steps for next cycle.
2481  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2482  EnableCodeFlushing(false);
2483  }
2484  }
2485 
2486  if (FLAG_track_gc_object_stats) {
2488  }
2489 }
2490 
2491 
2492 void MarkCompactCollector::ProcessMapCaches() {
2493  Object* raw_context = heap()->native_contexts_list_;
2494  while (raw_context != heap()->undefined_value()) {
2495  Context* context = reinterpret_cast<Context*>(raw_context);
2496  if (IsMarked(context)) {
2497  HeapObject* raw_map_cache =
2499  // A map cache may be reachable from the stack. In this case
2500  // it's already transitively marked and it's too late to clean
2501  // up its parts.
2502  if (!IsMarked(raw_map_cache) &&
2503  raw_map_cache != heap()->undefined_value()) {
2504  MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2505  int existing_elements = map_cache->NumberOfElements();
2506  int used_elements = 0;
2507  for (int i = MapCache::kElementsStartIndex;
2508  i < map_cache->length();
2509  i += MapCache::kEntrySize) {
2510  Object* raw_key = map_cache->get(i);
2511  if (raw_key == heap()->undefined_value() ||
2512  raw_key == heap()->the_hole_value()) continue;
2514  Object* raw_map = map_cache->get(i + 1);
2515  if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2516  ++used_elements;
2517  } else {
2518  // Delete useless entries with unmarked maps.
2519  ASSERT(raw_map->IsMap());
2520  map_cache->set_the_hole(i);
2521  map_cache->set_the_hole(i + 1);
2522  }
2523  }
2524  if (used_elements == 0) {
2525  context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2526  } else {
2527  // Note: we don't actually shrink the cache here to avoid
2528  // extra complexity during GC. We rely on subsequent cache
2529  // usages (EnsureCapacity) to do this.
2530  map_cache->ElementsRemoved(existing_elements - used_elements);
2531  MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2532  MarkObject(map_cache, map_cache_markbit);
2533  }
2534  }
2535  }
2536  // Move to next element in the list.
2537  raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2538  }
2539  ProcessMarkingDeque();
2540 }
2541 
2542 
2543 void MarkCompactCollector::ReattachInitialMaps() {
2544  HeapObjectIterator map_iterator(heap()->map_space());
2545  for (HeapObject* obj = map_iterator.Next();
2546  obj != NULL;
2547  obj = map_iterator.Next()) {
2548  Map* map = Map::cast(obj);
2549 
2551  if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2552 
2553  if (map->attached_to_shared_function_info()) {
2554  JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2555  }
2556  }
2557 }
2558 
2559 
2560 void MarkCompactCollector::ClearNonLiveReferences() {
2561  // Iterate over the map space, setting map transitions that go from
2562  // a marked map to an unmarked map to null transitions. This action
2563  // is carried out only on maps of JSObjects and related subtypes.
2564  HeapObjectIterator map_iterator(heap()->map_space());
2565  for (HeapObject* obj = map_iterator.Next();
2566  obj != NULL;
2567  obj = map_iterator.Next()) {
2568  Map* map = Map::cast(obj);
2569 
2570  if (!map->CanTransition()) continue;
2571 
2572  MarkBit map_mark = Marking::MarkBitFrom(map);
2573  if (map_mark.Get() && map->attached_to_shared_function_info()) {
2574  // This map is used for inobject slack tracking and has been detached
2575  // from SharedFunctionInfo during the mark phase.
2576  // Since it survived the GC, reattach it now.
2577  JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2578  }
2579 
2580  ClearNonLivePrototypeTransitions(map);
2581  ClearNonLiveMapTransitions(map, map_mark);
2582 
2583  if (map_mark.Get()) {
2584  ClearNonLiveDependentCode(map->dependent_code());
2585  } else {
2586  ClearAndDeoptimizeDependentCode(map->dependent_code());
2587  map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2588  }
2589  }
2590 
2591  // Iterate over property cell space, removing dependent code that is not
2592  // otherwise kept alive by strong references.
2593  HeapObjectIterator cell_iterator(heap_->property_cell_space());
2594  for (HeapObject* cell = cell_iterator.Next();
2595  cell != NULL;
2596  cell = cell_iterator.Next()) {
2597  if (IsMarked(cell)) {
2598  ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2599  }
2600  }
2601 
2602  // Iterate over allocation sites, removing dependent code that is not
2603  // otherwise kept alive by strong references.
2604  Object* undefined = heap()->undefined_value();
2605  for (Object* site = heap()->allocation_sites_list();
2606  site != undefined;
2607  site = AllocationSite::cast(site)->weak_next()) {
2608  if (IsMarked(site)) {
2609  ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2610  }
2611  }
2612 
2613  if (heap_->weak_object_to_code_table()->IsHashTable()) {
2614  WeakHashTable* table =
2616  uint32_t capacity = table->Capacity();
2617  for (uint32_t i = 0; i < capacity; i++) {
2618  uint32_t key_index = table->EntryToIndex(i);
2619  Object* key = table->get(key_index);
2620  if (!table->IsKey(key)) continue;
2621  uint32_t value_index = table->EntryToValueIndex(i);
2622  Object* value = table->get(value_index);
2623  if (key->IsCell() && !IsMarked(key)) {
2624  Cell* cell = Cell::cast(key);
2625  Object* object = cell->value();
2626  if (IsMarked(object)) {
2627  MarkBit mark = Marking::MarkBitFrom(cell);
2628  SetMark(cell, mark);
2629  Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2630  RecordSlot(value_slot, value_slot, *value_slot);
2631  }
2632  }
2633  if (IsMarked(key)) {
2634  if (!IsMarked(value)) {
2635  HeapObject* obj = HeapObject::cast(value);
2636  MarkBit mark = Marking::MarkBitFrom(obj);
2637  SetMark(obj, mark);
2638  }
2639  ClearNonLiveDependentCode(DependentCode::cast(value));
2640  } else {
2641  ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2642  table->set(key_index, heap_->the_hole_value());
2643  table->set(value_index, heap_->the_hole_value());
2644  }
2645  }
2646  }
2647 }
2648 
2649 
2650 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2651  int number_of_transitions = map->NumberOfProtoTransitions();
2652  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2653 
2654  int new_number_of_transitions = 0;
2655  const int header = Map::kProtoTransitionHeaderSize;
2656  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2657  const int map_offset = header + Map::kProtoTransitionMapOffset;
2658  const int step = Map::kProtoTransitionElementsPerEntry;
2659  for (int i = 0; i < number_of_transitions; i++) {
2660  Object* prototype = prototype_transitions->get(proto_offset + i * step);
2661  Object* cached_map = prototype_transitions->get(map_offset + i * step);
2662  if (IsMarked(prototype) && IsMarked(cached_map)) {
2663  ASSERT(!prototype->IsUndefined());
2664  int proto_index = proto_offset + new_number_of_transitions * step;
2665  int map_index = map_offset + new_number_of_transitions * step;
2666  if (new_number_of_transitions != i) {
2667  prototype_transitions->set(
2668  proto_index,
2669  prototype,
2671  prototype_transitions->set(
2672  map_index,
2673  cached_map,
2675  }
2676  Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2677  RecordSlot(slot, slot, prototype);
2678  new_number_of_transitions++;
2679  }
2680  }
2681 
2682  if (new_number_of_transitions != number_of_transitions) {
2683  map->SetNumberOfProtoTransitions(new_number_of_transitions);
2684  }
2685 
2686  // Fill slots that became free with undefined value.
2687  for (int i = new_number_of_transitions * step;
2688  i < number_of_transitions * step;
2689  i++) {
2690  prototype_transitions->set_undefined(header + i);
2691  }
2692 }
2693 
2694 
2695 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2696  MarkBit map_mark) {
2697  Object* potential_parent = map->GetBackPointer();
2698  if (!potential_parent->IsMap()) return;
2699  Map* parent = Map::cast(potential_parent);
2700 
2701  // Follow back pointer, check whether we are dealing with a map transition
2702  // from a live map to a dead path and in case clear transitions of parent.
2703  bool current_is_alive = map_mark.Get();
2704  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2705  if (!current_is_alive && parent_is_alive) {
2706  parent->ClearNonLiveTransitions(heap());
2707  }
2708 }
2709 
2710 
2711 void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2712  DependentCode* entries) {
2713  DisallowHeapAllocation no_allocation;
2714  DependentCode::GroupStartIndexes starts(entries);
2715  int number_of_entries = starts.number_of_entries();
2716  if (number_of_entries == 0) return;
2717  for (int i = 0; i < number_of_entries; i++) {
2718  // If the entry is compilation info then the map must be alive,
2719  // and ClearAndDeoptimizeDependentCode shouldn't be called.
2720  ASSERT(entries->is_code_at(i));
2721  Code* code = entries->code_at(i);
2722 
2723  if (IsMarked(code) && !code->marked_for_deoptimization()) {
2724  code->set_marked_for_deoptimization(true);
2725  code->InvalidateEmbeddedObjects();
2726  have_code_to_deoptimize_ = true;
2727  }
2728  entries->clear_at(i);
2729  }
2730 }
2731 
2732 
2733 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2734  DisallowHeapAllocation no_allocation;
2735  DependentCode::GroupStartIndexes starts(entries);
2736  int number_of_entries = starts.number_of_entries();
2737  if (number_of_entries == 0) return;
2738  int new_number_of_entries = 0;
2739  // Go through all groups, remove dead codes and compact.
2740  for (int g = 0; g < DependentCode::kGroupCount; g++) {
2741  int group_number_of_entries = 0;
2742  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2743  Object* obj = entries->object_at(i);
2744  ASSERT(obj->IsCode() || IsMarked(obj));
2745  if (IsMarked(obj) &&
2746  (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2747  if (new_number_of_entries + group_number_of_entries != i) {
2748  entries->set_object_at(
2749  new_number_of_entries + group_number_of_entries, obj);
2750  }
2751  Object** slot = entries->slot_at(new_number_of_entries +
2752  group_number_of_entries);
2753  RecordSlot(slot, slot, obj);
2754  group_number_of_entries++;
2755  }
2756  }
2757  entries->set_number_of_entries(
2758  static_cast<DependentCode::DependencyGroup>(g),
2759  group_number_of_entries);
2760  new_number_of_entries += group_number_of_entries;
2761  }
2762  for (int i = new_number_of_entries; i < number_of_entries; i++) {
2763  entries->clear_at(i);
2764  }
2765 }
2766 
2767 
2768 void MarkCompactCollector::ProcessWeakCollections() {
2769  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2770  Object* weak_collection_obj = encountered_weak_collections();
2771  while (weak_collection_obj != Smi::FromInt(0)) {
2773  HeapObject::cast(weak_collection_obj)));
2774  JSWeakCollection* weak_collection =
2775  reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2776  ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2777  Object** anchor = reinterpret_cast<Object**>(table->address());
2778  for (int i = 0; i < table->Capacity(); i++) {
2779  if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2780  Object** key_slot =
2781  table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2782  RecordSlot(anchor, key_slot, *key_slot);
2783  Object** value_slot =
2784  table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2785  MarkCompactMarkingVisitor::MarkObjectByPointer(
2786  this, anchor, value_slot);
2787  }
2788  }
2789  weak_collection_obj = weak_collection->next();
2790  }
2791 }
2792 
2793 
2794 void MarkCompactCollector::ClearWeakCollections() {
2795  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2796  Object* weak_collection_obj = encountered_weak_collections();
2797  while (weak_collection_obj != Smi::FromInt(0)) {
2799  HeapObject::cast(weak_collection_obj)));
2800  JSWeakCollection* weak_collection =
2801  reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2802  ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2803  for (int i = 0; i < table->Capacity(); i++) {
2804  if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2805  table->RemoveEntry(i);
2806  }
2807  }
2808  weak_collection_obj = weak_collection->next();
2809  weak_collection->set_next(Smi::FromInt(0));
2810  }
2812 }
2813 
2814 
2815 // We scavange new space simultaneously with sweeping. This is done in two
2816 // passes.
2817 //
2818 // The first pass migrates all alive objects from one semispace to another or
2819 // promotes them to old space. Forwarding address is written directly into
2820 // first word of object without any encoding. If object is dead we write
2821 // NULL as a forwarding address.
2822 //
2823 // The second pass updates pointers to new space in all spaces. It is possible
2824 // to encounter pointers to dead new space objects during traversal of pointers
2825 // to new space. We should clear them to avoid encountering them during next
2826 // pointer iteration. This is an issue if the store buffer overflows and we
2827 // have to scan the entire old space, including dead objects, looking for
2828 // pointers to new space.
2830  HeapObject* src,
2831  int size,
2832  AllocationSpace dest) {
2833  Address dst_addr = dst->address();
2834  Address src_addr = src->address();
2835  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2836  if (heap_profiler->is_tracking_object_moves()) {
2837  heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
2838  }
2839  ASSERT(heap()->AllowedToBeMigrated(src, dest));
2840  ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2841  if (dest == OLD_POINTER_SPACE) {
2842  Address src_slot = src_addr;
2843  Address dst_slot = dst_addr;
2844  ASSERT(IsAligned(size, kPointerSize));
2845 
2846  for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2847  Object* value = Memory::Object_at(src_slot);
2848 
2849  Memory::Object_at(dst_slot) = value;
2850 
2851  if (heap_->InNewSpace(value)) {
2852  heap_->store_buffer()->Mark(dst_slot);
2853  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2854  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2855  &migration_slots_buffer_,
2856  reinterpret_cast<Object**>(dst_slot),
2858  }
2859 
2860  src_slot += kPointerSize;
2861  dst_slot += kPointerSize;
2862  }
2863 
2864  if (compacting_ && dst->IsJSFunction()) {
2865  Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2866  Address code_entry = Memory::Address_at(code_entry_slot);
2867 
2868  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2869  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2870  &migration_slots_buffer_,
2872  code_entry_slot,
2874  }
2875  } else if (compacting_ && dst->IsConstantPoolArray()) {
2876  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
2877  for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
2878  Address code_entry_slot =
2879  dst_addr + constant_pool->OffsetOfElementAt(i);
2880  Address code_entry = Memory::Address_at(code_entry_slot);
2881 
2882  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2883  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2884  &migration_slots_buffer_,
2886  code_entry_slot,
2888  }
2889  }
2890  }
2891  } else if (dest == CODE_SPACE) {
2892  PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2893  heap()->MoveBlock(dst_addr, src_addr, size);
2894  SlotsBuffer::AddTo(&slots_buffer_allocator_,
2895  &migration_slots_buffer_,
2897  dst_addr,
2899  Code::cast(dst)->Relocate(dst_addr - src_addr);
2900  } else {
2901  ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2902  heap()->MoveBlock(dst_addr, src_addr, size);
2903  }
2904  Memory::Address_at(src_addr) = dst_addr;
2905 }
2906 
2907 
2908 // Visitor for updating pointers from live objects in old spaces to new space.
2909 // It does not expect to encounter pointers to dead objects.
2910 class PointersUpdatingVisitor: public ObjectVisitor {
2911  public:
2912  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2913 
2914  void VisitPointer(Object** p) {
2915  UpdatePointer(p);
2916  }
2917 
2918  void VisitPointers(Object** start, Object** end) {
2919  for (Object** p = start; p < end; p++) UpdatePointer(p);
2920  }
2921 
2922  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2923  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2924  Object* target = rinfo->target_object();
2925  Object* old_target = target;
2926  VisitPointer(&target);
2927  // Avoid unnecessary changes that might unnecessary flush the instruction
2928  // cache.
2929  if (target != old_target) {
2930  rinfo->set_target_object(target);
2931  }
2932  }
2933 
2934  void VisitCodeTarget(RelocInfo* rinfo) {
2935  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2936  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2937  Object* old_target = target;
2938  VisitPointer(&target);
2939  if (target != old_target) {
2940  rinfo->set_target_address(Code::cast(target)->instruction_start());
2941  }
2942  }
2943 
2944  void VisitCodeAgeSequence(RelocInfo* rinfo) {
2945  ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2946  Object* stub = rinfo->code_age_stub();
2947  ASSERT(stub != NULL);
2948  VisitPointer(&stub);
2949  if (stub != rinfo->code_age_stub()) {
2950  rinfo->set_code_age_stub(Code::cast(stub));
2951  }
2952  }
2953 
2954  void VisitDebugTarget(RelocInfo* rinfo) {
2955  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2956  rinfo->IsPatchedReturnSequence()) ||
2957  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2958  rinfo->IsPatchedDebugBreakSlotSequence()));
2959  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2960  VisitPointer(&target);
2961  rinfo->set_call_address(Code::cast(target)->instruction_start());
2962  }
2963 
2964  static inline void UpdateSlot(Heap* heap, Object** slot) {
2965  Object* obj = *slot;
2966 
2967  if (!obj->IsHeapObject()) return;
2968 
2969  HeapObject* heap_obj = HeapObject::cast(obj);
2970 
2971  MapWord map_word = heap_obj->map_word();
2972  if (map_word.IsForwardingAddress()) {
2973  ASSERT(heap->InFromSpace(heap_obj) ||
2974  MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2975  HeapObject* target = map_word.ToForwardingAddress();
2976  *slot = target;
2977  ASSERT(!heap->InFromSpace(target) &&
2978  !MarkCompactCollector::IsOnEvacuationCandidate(target));
2979  }
2980  }
2981 
2982  private:
2983  inline void UpdatePointer(Object** p) {
2984  UpdateSlot(heap_, p);
2985  }
2986 
2987  Heap* heap_;
2988 };
2989 
2990 
2991 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2992  ASSERT(*p == object);
2993 
2994  Address old_addr = object->address();
2995 
2996  Address new_addr = Memory::Address_at(old_addr);
2997 
2998  // The new space sweep will overwrite the map word of dead objects
2999  // with NULL. In this case we do not need to transfer this entry to
3000  // the store buffer which we are rebuilding.
3001  if (new_addr != NULL) {
3002  *p = HeapObject::FromAddress(new_addr);
3003  } else {
3004  // We have to zap this pointer, because the store buffer may overflow later,
3005  // and then we have to scan the entire heap and we don't want to find
3006  // spurious newspace pointers in the old space.
3007  // TODO(mstarzinger): This was changed to a sentinel value to track down
3008  // rare crashes, change it back to Smi::FromInt(0) later.
3009  *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
3010  }
3011 }
3012 
3013 
3014 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3015  Object** p) {
3016  MapWord map_word = HeapObject::cast(*p)->map_word();
3017 
3018  if (map_word.IsForwardingAddress()) {
3019  return String::cast(map_word.ToForwardingAddress());
3020  }
3021 
3022  return String::cast(*p);
3023 }
3024 
3025 
3027  int object_size) {
3028  // TODO(hpayer): Replace that check with an assert.
3029  CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3030 
3031  OldSpace* target_space = heap()->TargetSpace(object);
3032 
3033  ASSERT(target_space == heap()->old_pointer_space() ||
3034  target_space == heap()->old_data_space());
3035  Object* result;
3036  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
3037  if (maybe_result->ToObject(&result)) {
3038  HeapObject* target = HeapObject::cast(result);
3039  MigrateObject(target,
3040  object,
3041  object_size,
3042  target_space->identity());
3044  increment_promoted_objects_size(object_size);
3045  return true;
3046  }
3047 
3048  return false;
3049 }
3050 
3051 
3052 void MarkCompactCollector::EvacuateNewSpace() {
3053  // There are soft limits in the allocation code, designed trigger a mark
3054  // sweep collection by failing allocations. But since we are already in
3055  // a mark-sweep allocation, there is no sense in trying to trigger one.
3056  AlwaysAllocateScope scope(isolate());
3058 
3059  NewSpace* new_space = heap()->new_space();
3060 
3061  // Store allocation range before flipping semispaces.
3062  Address from_bottom = new_space->bottom();
3063  Address from_top = new_space->top();
3064 
3065  // Flip the semispaces. After flipping, to space is empty, from space has
3066  // live objects.
3067  new_space->Flip();
3068  new_space->ResetAllocationInfo();
3069 
3070  int survivors_size = 0;
3071 
3072  // First pass: traverse all objects in inactive semispace, remove marks,
3073  // migrate live objects and write forwarding addresses. This stage puts
3074  // new entries in the store buffer and may cause some pages to be marked
3075  // scan-on-scavenge.
3076  NewSpacePageIterator it(from_bottom, from_top);
3077  while (it.has_next()) {
3078  NewSpacePage* p = it.next();
3079  survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3080  }
3081 
3082  heap_->IncrementYoungSurvivorsCounter(survivors_size);
3083  new_space->set_age_mark(new_space->top());
3084 }
3085 
3086 
3087 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3088  AlwaysAllocateScope always_allocate(isolate());
3089  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3090  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3091  p->MarkSweptPrecisely();
3092 
3093  int offsets[16];
3094 
3095  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3096  Address cell_base = it.CurrentCellBase();
3097  MarkBit::CellType* cell = it.CurrentCell();
3098 
3099  if (*cell == 0) continue;
3100 
3101  int live_objects = MarkWordToObjectStarts(*cell, offsets);
3102  for (int i = 0; i < live_objects; i++) {
3103  Address object_addr = cell_base + offsets[i] * kPointerSize;
3104  HeapObject* object = HeapObject::FromAddress(object_addr);
3105  ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3106 
3107  int size = object->Size();
3108 
3109  MaybeObject* target = space->AllocateRaw(size);
3110  if (target->IsFailure()) {
3111  // OS refused to give us memory.
3112  V8::FatalProcessOutOfMemory("Evacuation");
3113  return;
3114  }
3115 
3116  Object* target_object = target->ToObjectUnchecked();
3117 
3118  MigrateObject(HeapObject::cast(target_object),
3119  object,
3120  size,
3121  space->identity());
3122  ASSERT(object->map_word().IsForwardingAddress());
3123  }
3124 
3125  // Clear marking bits for current cell.
3126  *cell = 0;
3127  }
3128  p->ResetLiveBytes();
3129 }
3130 
3131 
3132 void MarkCompactCollector::EvacuatePages() {
3133  int npages = evacuation_candidates_.length();
3134  for (int i = 0; i < npages; i++) {
3135  Page* p = evacuation_candidates_[i];
3136  // TODO(hpayer): This check is just used for debugging purpose and
3137  // should be removed or turned into an assert after investigating the
3138  // crash in concurrent sweeping.
3139  CHECK(p->IsEvacuationCandidate() ||
3140  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3141  CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
3142  if (p->IsEvacuationCandidate()) {
3143  // During compaction we might have to request a new page.
3144  // Check that space still have room for that.
3145  if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3146  EvacuateLiveObjectsFromPage(p);
3147  } else {
3148  // Without room for expansion evacuation is not guaranteed to succeed.
3149  // Pessimistically abandon unevacuated pages.
3150  for (int j = i; j < npages; j++) {
3151  Page* page = evacuation_candidates_[j];
3152  slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3153  page->ClearEvacuationCandidate();
3154  page->SetFlag(Page::RESCAN_ON_EVACUATION);
3155  page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3156  }
3157  return;
3158  }
3159  }
3160  }
3161 }
3162 
3163 
3165  public:
3166  virtual Object* RetainAs(Object* object) {
3167  if (object->IsHeapObject()) {
3168  HeapObject* heap_object = HeapObject::cast(object);
3169  MapWord map_word = heap_object->map_word();
3170  if (map_word.IsForwardingAddress()) {
3171  return map_word.ToForwardingAddress();
3172  }
3173  }
3174  return object;
3175  }
3176 };
3177 
3178 
3179 static inline void UpdateSlot(Isolate* isolate,
3180  ObjectVisitor* v,
3181  SlotsBuffer::SlotType slot_type,
3182  Address addr) {
3183  switch (slot_type) {
3185  RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3186  rinfo.Visit(isolate, v);
3187  break;
3188  }
3190  v->VisitCodeEntry(addr);
3191  break;
3192  }
3194  HeapObject* obj = HeapObject::FromAddress(addr);
3195  Code::cast(obj)->CodeIterateBody(v);
3196  break;
3197  }
3199  RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3200  if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3201  break;
3202  }
3204  RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3205  if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3206  break;
3207  }
3209  RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3210  rinfo.Visit(isolate, v);
3211  break;
3212  }
3213  default:
3214  UNREACHABLE();
3215  break;
3216  }
3217 }
3218 
3219 
3223 };
3224 
3225 
3229 };
3230 
3231 
3235 };
3236 
3237 
3238 // Sweep a space precisely. After this has been done the space can
3239 // be iterated precisely, hitting only the live objects. Code space
3240 // is always swept precisely because we want to be able to iterate
3241 // over it. Map space is swept precisely, because it is not compacted.
3242 // Slots in live objects pointing into evacuation candidates are updated
3243 // if requested.
3244 template<SweepingMode sweeping_mode,
3245  SkipListRebuildingMode skip_list_mode,
3246  FreeSpaceTreatmentMode free_space_mode>
3247 static void SweepPrecisely(PagedSpace* space,
3248  Page* p,
3249  ObjectVisitor* v) {
3250  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3251  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3252  space->identity() == CODE_SPACE);
3253  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3254 
3255  double start_time = 0.0;
3256  if (FLAG_print_cumulative_gc_stat) {
3257  start_time = OS::TimeCurrentMillis();
3258  }
3259 
3260  p->MarkSweptPrecisely();
3261 
3262  Address free_start = p->area_start();
3263  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3264  int offsets[16];
3265 
3266  SkipList* skip_list = p->skip_list();
3267  int curr_region = -1;
3268  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3269  skip_list->Clear();
3270  }
3271 
3272  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3273  Address cell_base = it.CurrentCellBase();
3274  MarkBit::CellType* cell = it.CurrentCell();
3275  int live_objects = MarkWordToObjectStarts(*cell, offsets);
3276  int live_index = 0;
3277  for ( ; live_objects != 0; live_objects--) {
3278  Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3279  if (free_end != free_start) {
3280  if (free_space_mode == ZAP_FREE_SPACE) {
3281  memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
3282  }
3283  space->Free(free_start, static_cast<int>(free_end - free_start));
3284 #ifdef ENABLE_GDB_JIT_INTERFACE
3285  if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3286  GDBJITInterface::RemoveCodeRange(free_start, free_end);
3287  }
3288 #endif
3289  }
3290  HeapObject* live_object = HeapObject::FromAddress(free_end);
3291  ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3292  Map* map = live_object->map();
3293  int size = live_object->SizeFromMap(map);
3294  if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3295  live_object->IterateBody(map->instance_type(), size, v);
3296  }
3297  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3298  int new_region_start =
3299  SkipList::RegionNumber(free_end);
3300  int new_region_end =
3301  SkipList::RegionNumber(free_end + size - kPointerSize);
3302  if (new_region_start != curr_region ||
3303  new_region_end != curr_region) {
3304  skip_list->AddObject(free_end, size);
3305  curr_region = new_region_end;
3306  }
3307  }
3308  free_start = free_end + size;
3309  }
3310  // Clear marking bits for current cell.
3311  *cell = 0;
3312  }
3313  if (free_start != p->area_end()) {
3314  if (free_space_mode == ZAP_FREE_SPACE) {
3315  memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
3316  }
3317  space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3318 #ifdef ENABLE_GDB_JIT_INTERFACE
3319  if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3320  GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3321  }
3322 #endif
3323  }
3324  p->ResetLiveBytes();
3325  if (FLAG_print_cumulative_gc_stat) {
3326  space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3327  }
3328 }
3329 
3330 
3331 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3332  Page* p = Page::FromAddress(code->address());
3333 
3334  if (p->IsEvacuationCandidate() ||
3335  p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3336  return false;
3337  }
3338 
3339  Address code_start = code->address();
3340  Address code_end = code_start + code->Size();
3341 
3342  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3343  uint32_t end_index =
3344  MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3345 
3346  Bitmap* b = p->markbits();
3347 
3348  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3349  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3350 
3351  MarkBit::CellType* start_cell = start_mark_bit.cell();
3352  MarkBit::CellType* end_cell = end_mark_bit.cell();
3353 
3354  if (value) {
3355  MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3356  MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3357 
3358  if (start_cell == end_cell) {
3359  *start_cell |= start_mask & end_mask;
3360  } else {
3361  *start_cell |= start_mask;
3362  for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3363  *cell = ~0;
3364  }
3365  *end_cell |= end_mask;
3366  }
3367  } else {
3368  for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3369  *cell = 0;
3370  }
3371  }
3372 
3373  return true;
3374 }
3375 
3376 
3377 static bool IsOnInvalidatedCodeObject(Address addr) {
3378  // We did not record any slots in large objects thus
3379  // we can safely go to the page from the slot address.
3380  Page* p = Page::FromAddress(addr);
3381 
3382  // First check owner's identity because old pointer and old data spaces
3383  // are swept lazily and might still have non-zero mark-bits on some
3384  // pages.
3385  if (p->owner()->identity() != CODE_SPACE) return false;
3386 
3387  // In code space only bits on evacuation candidates (but we don't record
3388  // any slots on them) and under invalidated code objects are non-zero.
3389  MarkBit mark_bit =
3390  p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3391 
3392  return mark_bit.Get();
3393 }
3394 
3395 
3397  if (heap_->incremental_marking()->IsCompacting() &&
3398  !ShouldSkipEvacuationSlotRecording(code)) {
3399  ASSERT(compacting_);
3400 
3401  // If the object is white than no slots were recorded on it yet.
3402  MarkBit mark_bit = Marking::MarkBitFrom(code);
3403  if (Marking::IsWhite(mark_bit)) return;
3404 
3405  invalidated_code_.Add(code);
3406  }
3407 }
3408 
3409 
3410 // Return true if the given code is deoptimized or will be deoptimized.
3411 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3412  return code->marked_for_deoptimization();
3413 }
3414 
3415 
3416 bool MarkCompactCollector::MarkInvalidatedCode() {
3417  bool code_marked = false;
3418 
3419  int length = invalidated_code_.length();
3420  for (int i = 0; i < length; i++) {
3421  Code* code = invalidated_code_[i];
3422 
3423  if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3424  code_marked = true;
3425  }
3426  }
3427 
3428  return code_marked;
3429 }
3430 
3431 
3432 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3433  int length = invalidated_code_.length();
3434  for (int i = 0; i < length; i++) {
3435  if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3436  }
3437 }
3438 
3439 
3440 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3441  int length = invalidated_code_.length();
3442  for (int i = 0; i < length; i++) {
3443  Code* code = invalidated_code_[i];
3444  if (code != NULL) {
3445  code->Iterate(visitor);
3446  SetMarkBitsUnderInvalidatedCode(code, false);
3447  }
3448  }
3449  invalidated_code_.Rewind(0);
3450 }
3451 
3452 
3453 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3454  Heap::RelocationLock relocation_lock(heap());
3455 
3456  bool code_slots_filtering_required;
3457  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3458  code_slots_filtering_required = MarkInvalidatedCode();
3459  EvacuateNewSpace();
3460  }
3461 
3462  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3463  EvacuatePages();
3464  }
3465 
3466  // Second pass: find pointers to new space and update them.
3467  PointersUpdatingVisitor updating_visitor(heap());
3468 
3469  { GCTracer::Scope gc_scope(tracer_,
3470  GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3471  // Update pointers in to space.
3472  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3473  heap()->new_space()->top());
3474  for (HeapObject* object = to_it.Next();
3475  object != NULL;
3476  object = to_it.Next()) {
3477  Map* map = object->map();
3478  object->IterateBody(map->instance_type(),
3479  object->SizeFromMap(map),
3480  &updating_visitor);
3481  }
3482  }
3483 
3484  { GCTracer::Scope gc_scope(tracer_,
3485  GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3486  // Update roots.
3487  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3488  }
3489 
3490  { GCTracer::Scope gc_scope(tracer_,
3491  GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3492  StoreBufferRebuildScope scope(heap_,
3493  heap_->store_buffer(),
3494  &Heap::ScavengeStoreBufferCallback);
3496  &UpdatePointer);
3497  }
3498 
3499  { GCTracer::Scope gc_scope(tracer_,
3500  GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3502  migration_slots_buffer_,
3503  code_slots_filtering_required);
3504  if (FLAG_trace_fragmentation) {
3505  PrintF(" migration slots buffer: %d\n",
3506  SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3507  }
3508 
3509  if (compacting_ && was_marked_incrementally_) {
3510  // It's difficult to filter out slots recorded for large objects.
3511  LargeObjectIterator it(heap_->lo_space());
3512  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3513  // LargeObjectSpace is not swept yet thus we have to skip
3514  // dead objects explicitly.
3515  if (!IsMarked(obj)) continue;
3516 
3517  Page* p = Page::FromAddress(obj->address());
3518  if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3519  obj->Iterate(&updating_visitor);
3521  }
3522  }
3523  }
3524  }
3525 
3526  int npages = evacuation_candidates_.length();
3527  { GCTracer::Scope gc_scope(
3528  tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3529  for (int i = 0; i < npages; i++) {
3530  Page* p = evacuation_candidates_[i];
3531  ASSERT(p->IsEvacuationCandidate() ||
3532  p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3533 
3534  if (p->IsEvacuationCandidate()) {
3536  p->slots_buffer(),
3537  code_slots_filtering_required);
3538  if (FLAG_trace_fragmentation) {
3539  PrintF(" page %p slots buffer: %d\n",
3540  reinterpret_cast<void*>(p),
3541  SlotsBuffer::SizeOfChain(p->slots_buffer()));
3542  }
3543 
3544  // Important: skip list should be cleared only after roots were updated
3545  // because root iteration traverses the stack and might have to find
3546  // code objects from non-updated pc pointing into evacuation candidate.
3547  SkipList* list = p->skip_list();
3548  if (list != NULL) list->Clear();
3549  } else {
3550  if (FLAG_gc_verbose) {
3551  PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3552  reinterpret_cast<intptr_t>(p));
3553  }
3554  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3555  p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3556 
3557  switch (space->identity()) {
3558  case OLD_DATA_SPACE:
3559  SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3560  break;
3561  case OLD_POINTER_SPACE:
3562  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3565  space, p, &updating_visitor);
3566  break;
3567  case CODE_SPACE:
3568  if (FLAG_zap_code_space) {
3569  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3571  ZAP_FREE_SPACE>(
3572  space, p, &updating_visitor);
3573  } else {
3574  SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3576  IGNORE_FREE_SPACE>(
3577  space, p, &updating_visitor);
3578  }
3579  break;
3580  default:
3581  UNREACHABLE();
3582  break;
3583  }
3584  }
3585  }
3586  }
3587 
3588  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3589 
3590  // Update pointers from cells.
3591  HeapObjectIterator cell_iterator(heap_->cell_space());
3592  for (HeapObject* cell = cell_iterator.Next();
3593  cell != NULL;
3594  cell = cell_iterator.Next()) {
3595  if (cell->IsCell()) {
3596  Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3597  }
3598  }
3599 
3600  HeapObjectIterator js_global_property_cell_iterator(
3601  heap_->property_cell_space());
3602  for (HeapObject* cell = js_global_property_cell_iterator.Next();
3603  cell != NULL;
3604  cell = js_global_property_cell_iterator.Next()) {
3605  if (cell->IsPropertyCell()) {
3606  PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3607  }
3608  }
3609 
3610  // Update the head of the native contexts list in the heap.
3611  updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3612 
3613  heap_->string_table()->Iterate(&updating_visitor);
3614  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3615  if (heap_->weak_object_to_code_table()->IsHashTable()) {
3616  WeakHashTable* table =
3618  table->Iterate(&updating_visitor);
3619  table->Rehash(heap_->undefined_value());
3620  }
3621 
3622  // Update pointers from external string table.
3624  &UpdateReferenceInExternalStringTableEntry);
3625 
3626  EvacuationWeakObjectRetainer evacuation_object_retainer;
3627  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3628 
3629  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3630  // under it.
3631  ProcessInvalidatedCode(&updating_visitor);
3632 
3633  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3634 
3635 #ifdef VERIFY_HEAP
3636  if (FLAG_verify_heap) {
3637  VerifyEvacuation(heap_);
3638  }
3639 #endif
3640 
3641  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3642  ASSERT(migration_slots_buffer_ == NULL);
3643 }
3644 
3645 
3646 void MarkCompactCollector::UnlinkEvacuationCandidates() {
3647  int npages = evacuation_candidates_.length();
3648  for (int i = 0; i < npages; i++) {
3649  Page* p = evacuation_candidates_[i];
3650  if (!p->IsEvacuationCandidate()) continue;
3651  p->Unlink();
3652  p->ClearSweptPrecisely();
3653  p->ClearSweptConservatively();
3654  }
3655 }
3656 
3657 
3658 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3659  int npages = evacuation_candidates_.length();
3660  for (int i = 0; i < npages; i++) {
3661  Page* p = evacuation_candidates_[i];
3662  if (!p->IsEvacuationCandidate()) continue;
3663  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3664  space->Free(p->area_start(), p->area_size());
3665  p->set_scan_on_scavenge(false);
3666  slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3667  p->ResetLiveBytes();
3668  space->ReleasePage(p, false);
3669  }
3670  evacuation_candidates_.Rewind(0);
3671  compacting_ = false;
3672  heap()->FreeQueuedChunks();
3673 }
3674 
3675 
3676 static const int kStartTableEntriesPerLine = 5;
3677 static const int kStartTableLines = 171;
3678 static const int kStartTableInvalidLine = 127;
3679 static const int kStartTableUnusedEntry = 126;
3680 
3681 #define _ kStartTableUnusedEntry
3682 #define X kStartTableInvalidLine
3683 // Mark-bit to object start offset table.
3684 //
3685 // The line is indexed by the mark bits in a byte. The first number on
3686 // the line describes the number of live object starts for the line and the
3687 // other numbers on the line describe the offsets (in words) of the object
3688 // starts.
3689 //
3690 // Since objects are at least 2 words large we don't have entries for two
3691 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3692 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3693  0, _, _, _, _, // 0
3694  1, 0, _, _, _, // 1
3695  1, 1, _, _, _, // 2
3696  X, _, _, _, _, // 3
3697  1, 2, _, _, _, // 4
3698  2, 0, 2, _, _, // 5
3699  X, _, _, _, _, // 6
3700  X, _, _, _, _, // 7
3701  1, 3, _, _, _, // 8
3702  2, 0, 3, _, _, // 9
3703  2, 1, 3, _, _, // 10
3704  X, _, _, _, _, // 11
3705  X, _, _, _, _, // 12
3706  X, _, _, _, _, // 13
3707  X, _, _, _, _, // 14
3708  X, _, _, _, _, // 15
3709  1, 4, _, _, _, // 16
3710  2, 0, 4, _, _, // 17
3711  2, 1, 4, _, _, // 18
3712  X, _, _, _, _, // 19
3713  2, 2, 4, _, _, // 20
3714  3, 0, 2, 4, _, // 21
3715  X, _, _, _, _, // 22
3716  X, _, _, _, _, // 23
3717  X, _, _, _, _, // 24
3718  X, _, _, _, _, // 25
3719  X, _, _, _, _, // 26
3720  X, _, _, _, _, // 27
3721  X, _, _, _, _, // 28
3722  X, _, _, _, _, // 29
3723  X, _, _, _, _, // 30
3724  X, _, _, _, _, // 31
3725  1, 5, _, _, _, // 32
3726  2, 0, 5, _, _, // 33
3727  2, 1, 5, _, _, // 34
3728  X, _, _, _, _, // 35
3729  2, 2, 5, _, _, // 36
3730  3, 0, 2, 5, _, // 37
3731  X, _, _, _, _, // 38
3732  X, _, _, _, _, // 39
3733  2, 3, 5, _, _, // 40
3734  3, 0, 3, 5, _, // 41
3735  3, 1, 3, 5, _, // 42
3736  X, _, _, _, _, // 43
3737  X, _, _, _, _, // 44
3738  X, _, _, _, _, // 45
3739  X, _, _, _, _, // 46
3740  X, _, _, _, _, // 47
3741  X, _, _, _, _, // 48
3742  X, _, _, _, _, // 49
3743  X, _, _, _, _, // 50
3744  X, _, _, _, _, // 51
3745  X, _, _, _, _, // 52
3746  X, _, _, _, _, // 53
3747  X, _, _, _, _, // 54
3748  X, _, _, _, _, // 55
3749  X, _, _, _, _, // 56
3750  X, _, _, _, _, // 57
3751  X, _, _, _, _, // 58
3752  X, _, _, _, _, // 59
3753  X, _, _, _, _, // 60
3754  X, _, _, _, _, // 61
3755  X, _, _, _, _, // 62
3756  X, _, _, _, _, // 63
3757  1, 6, _, _, _, // 64
3758  2, 0, 6, _, _, // 65
3759  2, 1, 6, _, _, // 66
3760  X, _, _, _, _, // 67
3761  2, 2, 6, _, _, // 68
3762  3, 0, 2, 6, _, // 69
3763  X, _, _, _, _, // 70
3764  X, _, _, _, _, // 71
3765  2, 3, 6, _, _, // 72
3766  3, 0, 3, 6, _, // 73
3767  3, 1, 3, 6, _, // 74
3768  X, _, _, _, _, // 75
3769  X, _, _, _, _, // 76
3770  X, _, _, _, _, // 77
3771  X, _, _, _, _, // 78
3772  X, _, _, _, _, // 79
3773  2, 4, 6, _, _, // 80
3774  3, 0, 4, 6, _, // 81
3775  3, 1, 4, 6, _, // 82
3776  X, _, _, _, _, // 83
3777  3, 2, 4, 6, _, // 84
3778  4, 0, 2, 4, 6, // 85
3779  X, _, _, _, _, // 86
3780  X, _, _, _, _, // 87
3781  X, _, _, _, _, // 88
3782  X, _, _, _, _, // 89
3783  X, _, _, _, _, // 90
3784  X, _, _, _, _, // 91
3785  X, _, _, _, _, // 92
3786  X, _, _, _, _, // 93
3787  X, _, _, _, _, // 94
3788  X, _, _, _, _, // 95
3789  X, _, _, _, _, // 96
3790  X, _, _, _, _, // 97
3791  X, _, _, _, _, // 98
3792  X, _, _, _, _, // 99
3793  X, _, _, _, _, // 100
3794  X, _, _, _, _, // 101
3795  X, _, _, _, _, // 102
3796  X, _, _, _, _, // 103
3797  X, _, _, _, _, // 104
3798  X, _, _, _, _, // 105
3799  X, _, _, _, _, // 106
3800  X, _, _, _, _, // 107
3801  X, _, _, _, _, // 108
3802  X, _, _, _, _, // 109
3803  X, _, _, _, _, // 110
3804  X, _, _, _, _, // 111
3805  X, _, _, _, _, // 112
3806  X, _, _, _, _, // 113
3807  X, _, _, _, _, // 114
3808  X, _, _, _, _, // 115
3809  X, _, _, _, _, // 116
3810  X, _, _, _, _, // 117
3811  X, _, _, _, _, // 118
3812  X, _, _, _, _, // 119
3813  X, _, _, _, _, // 120
3814  X, _, _, _, _, // 121
3815  X, _, _, _, _, // 122
3816  X, _, _, _, _, // 123
3817  X, _, _, _, _, // 124
3818  X, _, _, _, _, // 125
3819  X, _, _, _, _, // 126
3820  X, _, _, _, _, // 127
3821  1, 7, _, _, _, // 128
3822  2, 0, 7, _, _, // 129
3823  2, 1, 7, _, _, // 130
3824  X, _, _, _, _, // 131
3825  2, 2, 7, _, _, // 132
3826  3, 0, 2, 7, _, // 133
3827  X, _, _, _, _, // 134
3828  X, _, _, _, _, // 135
3829  2, 3, 7, _, _, // 136
3830  3, 0, 3, 7, _, // 137
3831  3, 1, 3, 7, _, // 138
3832  X, _, _, _, _, // 139
3833  X, _, _, _, _, // 140
3834  X, _, _, _, _, // 141
3835  X, _, _, _, _, // 142
3836  X, _, _, _, _, // 143
3837  2, 4, 7, _, _, // 144
3838  3, 0, 4, 7, _, // 145
3839  3, 1, 4, 7, _, // 146
3840  X, _, _, _, _, // 147
3841  3, 2, 4, 7, _, // 148
3842  4, 0, 2, 4, 7, // 149
3843  X, _, _, _, _, // 150
3844  X, _, _, _, _, // 151
3845  X, _, _, _, _, // 152
3846  X, _, _, _, _, // 153
3847  X, _, _, _, _, // 154
3848  X, _, _, _, _, // 155
3849  X, _, _, _, _, // 156
3850  X, _, _, _, _, // 157
3851  X, _, _, _, _, // 158
3852  X, _, _, _, _, // 159
3853  2, 5, 7, _, _, // 160
3854  3, 0, 5, 7, _, // 161
3855  3, 1, 5, 7, _, // 162
3856  X, _, _, _, _, // 163
3857  3, 2, 5, 7, _, // 164
3858  4, 0, 2, 5, 7, // 165
3859  X, _, _, _, _, // 166
3860  X, _, _, _, _, // 167
3861  3, 3, 5, 7, _, // 168
3862  4, 0, 3, 5, 7, // 169
3863  4, 1, 3, 5, 7 // 170
3864 };
3865 #undef _
3866 #undef X
3867 
3868 
3869 // Takes a word of mark bits. Returns the number of objects that start in the
3870 // range. Puts the offsets of the words in the supplied array.
3871 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3872  int objects = 0;
3873  int offset = 0;
3874 
3875  // No consecutive 1 bits.
3876  ASSERT((mark_bits & 0x180) != 0x180);
3877  ASSERT((mark_bits & 0x18000) != 0x18000);
3878  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3879 
3880  while (mark_bits != 0) {
3881  int byte = (mark_bits & 0xff);
3882  mark_bits >>= 8;
3883  if (byte != 0) {
3884  ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3885  char* table = kStartTable + byte * kStartTableEntriesPerLine;
3886  int objects_in_these_8_words = table[0];
3887  ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3888  ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3889  for (int i = 0; i < objects_in_these_8_words; i++) {
3890  starts[objects++] = offset + table[1 + i];
3891  }
3892  }
3893  offset += 8;
3894  }
3895  return objects;
3896 }
3897 
3898 
3899 static inline Address DigestFreeStart(Address approximate_free_start,
3900  uint32_t free_start_cell) {
3901  ASSERT(free_start_cell != 0);
3902 
3903  // No consecutive 1 bits.
3904  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3905 
3906  int offsets[16];
3907  uint32_t cell = free_start_cell;
3908  int offset_of_last_live;
3909  if ((cell & 0x80000000u) != 0) {
3910  // This case would overflow below.
3911  offset_of_last_live = 31;
3912  } else {
3913  // Remove all but one bit, the most significant. This is an optimization
3914  // that may or may not be worthwhile.
3915  cell |= cell >> 16;
3916  cell |= cell >> 8;
3917  cell |= cell >> 4;
3918  cell |= cell >> 2;
3919  cell |= cell >> 1;
3920  cell = (cell + 1) >> 1;
3921  int live_objects = MarkWordToObjectStarts(cell, offsets);
3922  ASSERT(live_objects == 1);
3923  offset_of_last_live = offsets[live_objects - 1];
3924  }
3925  Address last_live_start =
3926  approximate_free_start + offset_of_last_live * kPointerSize;
3927  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3928  Address free_start = last_live_start + last_live->Size();
3929  return free_start;
3930 }
3931 
3932 
3933 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3934  ASSERT(cell != 0);
3935 
3936  // No consecutive 1 bits.
3937  ASSERT((cell & (cell << 1)) == 0);
3938 
3939  int offsets[16];
3940  if (cell == 0x80000000u) { // Avoid overflow below.
3941  return block_address + 31 * kPointerSize;
3942  }
3943  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3944  ASSERT((first_set_bit & cell) == first_set_bit);
3945  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3946  ASSERT(live_objects == 1);
3947  USE(live_objects);
3948  return block_address + offsets[0] * kPointerSize;
3949 }
3950 
3951 
3952 template<MarkCompactCollector::SweepingParallelism mode>
3953 static intptr_t Free(PagedSpace* space,
3954  FreeList* free_list,
3955  Address start,
3956  int size) {
3958  return space->Free(start, size);
3959  } else {
3960  return size - free_list->Free(start, size);
3961  }
3962 }
3963 
3964 
3965 // Force instantiation of templatized SweepConservatively method for
3966 // SWEEP_SEQUENTIALLY mode.
3967 template intptr_t MarkCompactCollector::
3968  SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3969  PagedSpace*, FreeList*, Page*);
3970 
3971 
3972 // Force instantiation of templatized SweepConservatively method for
3973 // SWEEP_IN_PARALLEL mode.
3974 template intptr_t MarkCompactCollector::
3975  SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3976  PagedSpace*, FreeList*, Page*);
3977 
3978 
3979 // Sweeps a space conservatively. After this has been done the larger free
3980 // spaces have been put on the free list and the smaller ones have been
3981 // ignored and left untouched. A free space is always either ignored or put
3982 // on the free list, never split up into two parts. This is important
3983 // because it means that any FreeSpace maps left actually describe a region of
3984 // memory that can be ignored when scanning. Dead objects other than free
3985 // spaces will not contain the free space map.
3986 template<MarkCompactCollector::SweepingParallelism mode>
3988  FreeList* free_list,
3989  Page* p) {
3990  // TODO(hpayer): This check is just used for debugging purpose and
3991  // should be removed or turned into an assert after investigating the
3992  // crash in concurrent sweeping.
3993  CHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3995  free_list != NULL) ||
3997  free_list == NULL));
3998 
3999  // When parallel sweeping is active, the page will be marked after
4000  // sweeping by the main thread.
4003  }
4004 
4005  intptr_t freed_bytes = 0;
4006  size_t size = 0;
4007 
4008  // Skip over all the dead objects at the start of the page and mark them free.
4009  Address cell_base = 0;
4010  MarkBit::CellType* cell = NULL;
4011  MarkBitCellIterator it(p);
4012  for (; !it.Done(); it.Advance()) {
4013  cell_base = it.CurrentCellBase();
4014  cell = it.CurrentCell();
4015  if (*cell != 0) break;
4016  }
4017 
4018  if (it.Done()) {
4019  size = p->area_end() - p->area_start();
4020  freed_bytes += Free<mode>(space, free_list, p->area_start(),
4021  static_cast<int>(size));
4022  ASSERT_EQ(0, p->LiveBytes());
4023  return freed_bytes;
4024  }
4025 
4026  // Grow the size of the start-of-page free space a little to get up to the
4027  // first live object.
4028  Address free_end = StartOfLiveObject(cell_base, *cell);
4029  // Free the first free space.
4030  size = free_end - p->area_start();
4031  freed_bytes += Free<mode>(space, free_list, p->area_start(),
4032  static_cast<int>(size));
4033 
4034  // The start of the current free area is represented in undigested form by
4035  // the address of the last 32-word section that contained a live object and
4036  // the marking bitmap for that cell, which describes where the live object
4037  // started. Unless we find a large free space in the bitmap we will not
4038  // digest this pair into a real address. We start the iteration here at the
4039  // first word in the marking bit map that indicates a live object.
4040  Address free_start = cell_base;
4041  MarkBit::CellType free_start_cell = *cell;
4042 
4043  for (; !it.Done(); it.Advance()) {
4044  cell_base = it.CurrentCellBase();
4045  cell = it.CurrentCell();
4046  if (*cell != 0) {
4047  // We have a live object. Check approximately whether it is more than 32
4048  // words since the last live object.
4049  if (cell_base - free_start > 32 * kPointerSize) {
4050  free_start = DigestFreeStart(free_start, free_start_cell);
4051  if (cell_base - free_start > 32 * kPointerSize) {
4052  // Now that we know the exact start of the free space it still looks
4053  // like we have a large enough free space to be worth bothering with.
4054  // so now we need to find the start of the first live object at the
4055  // end of the free space.
4056  free_end = StartOfLiveObject(cell_base, *cell);
4057  freed_bytes += Free<mode>(space, free_list, free_start,
4058  static_cast<int>(free_end - free_start));
4059  }
4060  }
4061  // Update our undigested record of where the current free area started.
4062  free_start = cell_base;
4063  free_start_cell = *cell;
4064  // Clear marking bits for current cell.
4065  *cell = 0;
4066  }
4067  }
4068 
4069  // Handle the free space at the end of the page.
4070  if (cell_base - free_start > 32 * kPointerSize) {
4071  free_start = DigestFreeStart(free_start, free_start_cell);
4072  freed_bytes += Free<mode>(space, free_list, free_start,
4073  static_cast<int>(p->area_end() - free_start));
4074  }
4075 
4076  p->ResetLiveBytes();
4077  return freed_bytes;
4078 }
4079 
4080 
4082  PageIterator it(space);
4083  FreeList* free_list = space == heap()->old_pointer_space()
4084  ? free_list_old_pointer_space_.get()
4085  : free_list_old_data_space_.get();
4086  FreeList private_free_list(space);
4087  while (it.has_next()) {
4088  Page* p = it.next();
4089 
4090  if (p->TryParallelSweeping()) {
4091  SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
4092  free_list->Concatenate(&private_free_list);
4094  }
4095  }
4096 }
4097 
4098 
4099 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4100  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
4101  sweeper == LAZY_CONSERVATIVE ||
4102  sweeper == PARALLEL_CONSERVATIVE ||
4103  sweeper == CONCURRENT_CONSERVATIVE);
4104  space->ClearStats();
4105 
4106  PageIterator it(space);
4107 
4108  int pages_swept = 0;
4109  bool lazy_sweeping_active = false;
4110  bool unused_page_present = false;
4111  bool parallel_sweeping_active = false;
4112 
4113  while (it.has_next()) {
4114  Page* p = it.next();
4115 
4118 
4119  // Clear sweeping flags indicating that marking bits are still intact.
4120  p->ClearSweptPrecisely();
4122 
4124  // Will be processed in EvacuateNewSpaceAndCandidates.
4125  ASSERT(evacuation_candidates_.length() > 0);
4126  continue;
4127  }
4128 
4129  // One unused page is kept, all further are released before sweeping them.
4130  if (p->LiveBytes() == 0) {
4131  if (unused_page_present) {
4132  if (FLAG_gc_verbose) {
4133  PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4134  reinterpret_cast<intptr_t>(p));
4135  }
4136  // Adjust unswept free bytes because releasing a page expects said
4137  // counter to be accurate for unswept pages.
4138  space->IncreaseUnsweptFreeBytes(p);
4139  space->ReleasePage(p, true);
4140  continue;
4141  }
4142  unused_page_present = true;
4143  }
4144 
4145  switch (sweeper) {
4146  case CONSERVATIVE: {
4147  if (FLAG_gc_verbose) {
4148  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4149  reinterpret_cast<intptr_t>(p));
4150  }
4151  SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4152  pages_swept++;
4153  break;
4154  }
4155  case LAZY_CONSERVATIVE: {
4156  if (lazy_sweeping_active) {
4157  if (FLAG_gc_verbose) {
4158  PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4159  reinterpret_cast<intptr_t>(p));
4160  }
4161  space->IncreaseUnsweptFreeBytes(p);
4162  } else {
4163  if (FLAG_gc_verbose) {
4164  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4165  reinterpret_cast<intptr_t>(p));
4166  }
4167  SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4168  pages_swept++;
4169  space->SetPagesToSweep(p->next_page());
4170  lazy_sweeping_active = true;
4171  }
4172  break;
4173  }
4175  case PARALLEL_CONSERVATIVE: {
4176  if (!parallel_sweeping_active) {
4177  if (FLAG_gc_verbose) {
4178  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4179  reinterpret_cast<intptr_t>(p));
4180  }
4181  SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4182  pages_swept++;
4183  parallel_sweeping_active = true;
4184  } else {
4185  if (FLAG_gc_verbose) {
4186  PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4187  reinterpret_cast<intptr_t>(p));
4188  }
4190  space->IncreaseUnsweptFreeBytes(p);
4191  }
4192  break;
4193  }
4194  case PRECISE: {
4195  if (FLAG_gc_verbose) {
4196  PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4197  reinterpret_cast<intptr_t>(p));
4198  }
4199  if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4200  SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
4201  space, p, NULL);
4202  } else if (space->identity() == CODE_SPACE) {
4203  SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
4204  space, p, NULL);
4205  } else {
4206  SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4207  space, p, NULL);
4208  }
4209  pages_swept++;
4210  break;
4211  }
4212  default: {
4213  UNREACHABLE();
4214  }
4215  }
4216  }
4217 
4218  if (FLAG_gc_verbose) {
4219  PrintF("SweepSpace: %s (%d pages swept)\n",
4220  AllocationSpaceName(space->identity()),
4221  pages_swept);
4222  }
4223 
4224  // Give pages that are queued to be freed back to the OS.
4225  heap()->FreeQueuedChunks();
4226 }
4227 
4228 
4229 void MarkCompactCollector::SweepSpaces() {
4230  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4231 #ifdef DEBUG
4232  state_ = SWEEP_SPACES;
4233 #endif
4234  SweeperType how_to_sweep =
4235  FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4237  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4238  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4239  }
4240  if (sweep_precisely_) how_to_sweep = PRECISE;
4241 
4242  // Unlink evacuation candidates before sweeper threads access the list of
4243  // pages to avoid race condition.
4244  UnlinkEvacuationCandidates();
4245 
4246  // Noncompacting collections simply sweep the spaces to clear the mark
4247  // bits and free the nonlive blocks (for old and map spaces). We sweep
4248  // the map space last because freeing non-live maps overwrites them and
4249  // the other spaces rely on possibly non-live maps to get the sizes for
4250  // non-live objects.
4251  { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4252  { SequentialSweepingScope scope(this);
4253  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4254  SweepSpace(heap()->old_data_space(), how_to_sweep);
4255  }
4256 
4257  if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4258  how_to_sweep == CONCURRENT_CONSERVATIVE) {
4259  // TODO(hpayer): fix race with concurrent sweeper
4260  StartSweeperThreads();
4261  }
4262 
4263  if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4265  }
4266  }
4267  RemoveDeadInvalidatedCode();
4268  SweepSpace(heap()->code_space(), PRECISE);
4269 
4270  SweepSpace(heap()->cell_space(), PRECISE);
4271  SweepSpace(heap()->property_cell_space(), PRECISE);
4272 
4273  EvacuateNewSpaceAndCandidates();
4274 
4275  // ClearNonLiveTransitions depends on precise sweeping of map space to
4276  // detect whether unmarked map became dead in this collection or in one
4277  // of the previous ones.
4278  SweepSpace(heap()->map_space(), PRECISE);
4279 
4280  // Deallocate unmarked objects and clear marked bits for marked objects.
4281  heap_->lo_space()->FreeUnmarkedObjects();
4282 
4283  // Deallocate evacuated candidate pages.
4284  ReleaseEvacuationCandidates();
4285 }
4286 
4287 
4288 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4289  PageIterator it(space);
4290  while (it.has_next()) {
4291  Page* p = it.next();
4292  if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
4294  p->MarkSweptConservatively();
4295  }
4296  ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4297  }
4298 }
4299 
4300 
4301 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4302  ParallelSweepSpaceComplete(heap()->old_pointer_space());
4303  ParallelSweepSpaceComplete(heap()->old_data_space());
4304 }
4305 
4306 
4308 #ifdef ENABLE_DEBUGGER_SUPPORT
4309  if (isolate()->debug()->IsLoaded() ||
4310  isolate()->debug()->has_break_points()) {
4311  enable = false;
4312  }
4313 #endif
4314 
4315  if (enable) {
4316  if (code_flusher_ != NULL) return;
4317  code_flusher_ = new CodeFlusher(isolate());
4318  } else {
4319  if (code_flusher_ == NULL) return;
4320  code_flusher_->EvictAllCandidates();
4321  delete code_flusher_;
4322  code_flusher_ = NULL;
4323  }
4324 
4325  if (FLAG_trace_code_flushing) {
4326  PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4327  }
4328 }
4329 
4330 
4331 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4332 // Our profiling tools do not expect intersections between
4333 // code objects. We should either reenable it or change our tools.
4335  Isolate* isolate) {
4336 #ifdef ENABLE_GDB_JIT_INTERFACE
4337  if (obj->IsCode()) {
4338  GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4339  }
4340 #endif
4341  if (obj->IsCode()) {
4342  PROFILE(isolate, CodeDeleteEvent(obj->address()));
4343  }
4344 }
4345 
4346 
4348  return heap_->isolate();
4349 }
4350 
4351 
4355 }
4356 
4357 
4359  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4360 }
4361 
4362 
4364  SlotsBuffer** buffer_address,
4365  SlotType type,
4366  Address addr,
4367  AdditionMode mode) {
4368  SlotsBuffer* buffer = *buffer_address;
4369  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4370  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4371  allocator->DeallocateChain(buffer_address);
4372  return false;
4373  }
4374  buffer = allocator->AllocateBuffer(buffer);
4375  *buffer_address = buffer;
4376  }
4377  ASSERT(buffer->HasSpaceForTypedSlot());
4378  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4379  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4380  return true;
4381 }
4382 
4383 
4384 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4385  if (RelocInfo::IsCodeTarget(rmode)) {
4387  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4389  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4391  } else if (RelocInfo::IsJSReturn(rmode)) {
4393  }
4394  UNREACHABLE();
4396 }
4397 
4398 
4399 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4400  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4401  RelocInfo::Mode rmode = rinfo->rmode();
4402  if (target_page->IsEvacuationCandidate() &&
4403  (rinfo->host() == NULL ||
4404  !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4405  bool success;
4406  if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4407  // This doesn't need to be typed since it is just a normal heap pointer.
4408  Object** target_pointer =
4409  reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4410  success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4411  target_page->slots_buffer_address(),
4412  target_pointer,
4414  } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4415  success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4416  target_page->slots_buffer_address(),
4418  rinfo->constant_pool_entry_address(),
4420  } else {
4421  success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4422  target_page->slots_buffer_address(),
4423  SlotTypeForRMode(rmode),
4424  rinfo->pc(),
4426  }
4427  if (!success) {
4428  EvictEvacuationCandidate(target_page);
4429  }
4430  }
4431 }
4432 
4433 
4435  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4436  if (target_page->IsEvacuationCandidate() &&
4437  !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4438  if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4439  target_page->slots_buffer_address(),
4441  slot,
4443  EvictEvacuationCandidate(target_page);
4444  }
4445  }
4446 }
4447 
4448 
4450  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4451  if (is_compacting()) {
4453  GcSafeFindCodeForInnerPointer(pc);
4454  MarkBit mark_bit = Marking::MarkBitFrom(host);
4455  if (Marking::IsBlack(mark_bit)) {
4456  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4457  RecordRelocSlot(&rinfo, target);
4458  }
4459  }
4460 }
4461 
4462 
4463 static inline SlotsBuffer::SlotType DecodeSlotType(
4464  SlotsBuffer::ObjectSlot slot) {
4465  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4466 }
4467 
4468 
4470  PointersUpdatingVisitor v(heap);
4471 
4472  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4473  ObjectSlot slot = slots_[slot_idx];
4474  if (!IsTypedSlot(slot)) {
4476  } else {
4477  ++slot_idx;
4478  ASSERT(slot_idx < idx_);
4479  UpdateSlot(heap->isolate(),
4480  &v,
4481  DecodeSlotType(slot),
4482  reinterpret_cast<Address>(slots_[slot_idx]));
4483  }
4484  }
4485 }
4486 
4487 
4489  PointersUpdatingVisitor v(heap);
4490 
4491  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4492  ObjectSlot slot = slots_[slot_idx];
4493  if (!IsTypedSlot(slot)) {
4494  if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4496  }
4497  } else {
4498  ++slot_idx;
4499  ASSERT(slot_idx < idx_);
4500  Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4501  if (!IsOnInvalidatedCodeObject(pc)) {
4502  UpdateSlot(heap->isolate(),
4503  &v,
4504  DecodeSlotType(slot),
4505  reinterpret_cast<Address>(slots_[slot_idx]));
4506  }
4507  }
4508  }
4509 }
4510 
4511 
4513  return new SlotsBuffer(next_buffer);
4514 }
4515 
4516 
4518  delete buffer;
4519 }
4520 
4521 
4523  SlotsBuffer* buffer = *buffer_address;
4524  while (buffer != NULL) {
4525  SlotsBuffer* next_buffer = buffer->next();
4526  DeallocateBuffer(buffer);
4527  buffer = next_buffer;
4528  }
4529  *buffer_address = NULL;
4530 }
4531 
4532 
4533 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static int SizeOf(Map *map, HeapObject *object)
Definition: objects-inl.h:6794
void RecordFixedArraySubTypeStats(int array_sub_type, size_t size)
Definition: heap.h:1910
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const uint32_t kShortcutTypeTag
Definition: objects.h:656
void ClearEvacuationCandidate()
Definition: spaces.h:684
static const char * kGreyBitPattern
Definition: mark-compact.h:81
FixedArraySubInstanceType
Definition: objects.h:874
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:639
List< ImplicitRefGroup * > * implicit_ref_groups()
Code * builtin(Name name)
Definition: builtins.h:322
static bool IsTypedSlot(ObjectSlot slot)
void ResetUnsweptFreeBytes()
Definition: spaces.h:1918
static const int kCodeOffset
Definition: objects.h:7103
void VisitEmbeddedPointer(RelocInfo *rinfo)
#define CHECK_EQ(expected, value)
Definition: checks.h:252
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static ConstantPoolArray * cast(Object *obj)
static const int kCodeEntryOffset
Definition: objects.h:7518
void EvictCandidate(SharedFunctionInfo *shared_info)
static const int kValueOffset
Definition: objects.h:9547
#define PROFILE(IsolateGetter, Call)
Definition: cpu-profiler.h:194
#define V8PRIxPTR
Definition: globals.h:228
Address FromSpacePageHigh()
Definition: spaces.h:2559
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
CompilationCache * compilation_cache()
Definition: isolate.h:867
Object ** native_contexts_list_address()
Definition: heap.h:1458
SharedFunctionInfoMarkingVisitor(MarkCompactCollector *collector)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
bool InNewSpace(Object *object)
Definition: heap-inl.h:307
static String * cast(Object *obj)
INLINE(static void MarkObject(Heap *heap, HeapObject *object))
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:902
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:277
Isolate * isolate()
Definition: heap-inl.h:624
static void MarkInlinedFunctionsCode(Heap *heap, Code *code)
void Prepare(GCTracer *tracer)
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:4673
void FinalizeExternalString(String *string)
Definition: heap-inl.h:291
int sweep_generation()
Definition: heap.h:1804
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
INLINE(static void VisitUnmarkedObject(MarkCompactCollector *collector, HeapObject *obj))
static HeapObject * cast(Object *obj)
virtual void VisitPointers(Object **start, Object **end)
static const int kProtoTransitionElementsPerEntry
Definition: objects.h:6133
static Map * cast(Object *obj)
INLINE(static void VisitPointer(Heap *heap, Object **p))
void ResetAllocationInfo()
Definition: spaces.cc:1359
kSerializedDataOffset Object
Definition: objects-inl.h:5016
Builtins * builtins()
Definition: isolate.h:948
static AllocationSite * cast(Object *obj)
bool InFromSpace(Object *object)
Definition: heap-inl.h:321
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:43
void Relocate(intptr_t delta)
Definition: objects.cc:10319
Object * weak_object_to_code_table()
Definition: heap.h:1367
bool marked_for_deoptimization()
Definition: objects-inl.h:4553
static void MoveBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:469
void IterateWeakRoots(ObjectVisitor *v)
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6292
#define ASSERT(condition)
Definition: checks.h:329
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
static const uint32_t kBitsPerCell
Definition: spaces.h:168
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:557
void ClearFlag(int flag)
Definition: spaces.h:444
bool StartCompaction(CompactionMode mode)
void VisitCodeTarget(RelocInfo *rinfo)
void UpdateSlotsWithFilter(Heap *heap)
OldSpace * TargetSpace(HeapObject *object)
Definition: heap-inl.h:384
void VisitPointers(Object **start, Object **end)
void DecrementUnsweptFreeBytes(intptr_t by)
Definition: spaces.h:1909
void RecordObjectStats(InstanceType type, size_t size)
Definition: heap.h:1890
static Context * cast(Object *context)
Definition: contexts.h:244
#define VISITOR_ID_LIST(V)
static const char * kBlackBitPattern
Definition: mark-compact.h:69
ThreadManager * thread_manager()
Definition: isolate.h:922
static SharedFunctionInfo * cast(Object *obj)
virtual void CallOnBackgroundThread(Task *task, ExpectedRuntime expected_runtime)=0
Object * DataAt(int index)
Definition: objects-inl.h:5986
#define CHECK(condition)
Definition: checks.h:75
static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map *map, HeapObject *obj)
uint32_t CellType
Definition: spaces.h:125
static Code * cast(Object *obj)
INLINE(static bool VisitUnmarkedObjects(Heap *heap, Object **start, Object **end))
void ClearSweptConservatively()
Definition: spaces.h:842
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1199
StoreBuffer * store_buffer()
Definition: heap.h:1773
static Smi * cast(Object *object)
static void IterateBody(HeapObject *obj, ObjectVisitor *v)
Definition: objects-inl.h:6843
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:332
static const int kOsrAstIdOffset
Definition: objects.h:6746
void IterateArchivedThreads(ThreadVisitor *v)
Definition: v8threads.cc:383
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1904
MarkBit Next()
Definition: spaces.h:145
int num_sweeper_threads() const
Definition: isolate.h:1080
bool TryPromoteObject(HeapObject *object, int object_size)
uint8_t byte
Definition: globals.h:185
void set_encountered_weak_collections(Object *weak_collection)
Definition: mark-compact.h:708
void ReleasePage(Page *page, bool unlink)
Definition: spaces.cc:1118
unsigned int ms_count()
Definition: heap.h:1370
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void ClearSweptPrecisely()
Definition: spaces.h:841
void Iterate(ObjectVisitor *v)
Definition: objects.cc:1751
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void Mark(Address addr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static JSGlobalProxy * cast(Object *obj)
void VisitPointers(Object **start, Object **end)
bool is_tracking_object_moves() const
Definition: heap-profiler.h:84
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static Cell * cast(Object *obj)
CodeMarkingVisitor(MarkCompactCollector *collector)
static void UpdateAllocationSiteFeedback(HeapObject *object, ScratchpadSlotMode mode)
Definition: heap-inl.h:493
static const int kProtoTransitionMapOffset
Definition: objects.h:6135
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2208
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2089
Context * native_context()
Definition: contexts.cc:67
v8::Handle< v8::Object > bottom
Definition: test-api.cc:2280
SweeperThread ** sweeper_threads()
Definition: isolate.h:1084
PagedSpace * paged_space(int idx)
Definition: heap.h:647
void Initialize(Address low, Address high)
Definition: mark-compact.h:174
void CollectEvacuationCandidates(PagedSpace *space)
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2616
PropertyCellSpace * property_cell_space()
Definition: heap.h:643
const int kPointerSize
Definition: globals.h:268
void check(i::Vector< const uint8_t > string)
static void ObjectStatsCountFixedArray(FixedArrayBase *fixed_array, FixedArraySubInstanceType fast_type, FixedArraySubInstanceType dictionary_type)
bool IsFlagSet(int flag)
Definition: spaces.h:456
static Address & Address_at(Address addr)
Definition: v8memory.h:79
void CodeIterateBody(ObjectVisitor *v)
void MarkEvacuationCandidate()
Definition: spaces.h:679
void CheckpointObjectStats()
Definition: heap.cc:7818
bool IsAligned(T value, U alignment)
Definition: utils.h:211
static DependentCode * cast(Object *object)
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2568
GlobalHandles * global_handles()
Definition: isolate.h:918
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1685
char kStartTable[kStartTableLines *kStartTableEntriesPerLine]
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
virtual Object * RetainAs(Object *object)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void AddToAccountingStats(intptr_t bytes)
Definition: spaces.h:1775
const uint32_t kShortcutTypeMask
Definition: objects.h:652
const Register pc
OldSpace * old_pointer_space()
Definition: heap.h:638
static double TimeCurrentMillis()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
void IteratePointersToFromSpace(ObjectVisitor *v)
void set_age_mark(Address mark)
Definition: spaces.h:2515
void VisitThread(Isolate *isolate, ThreadLocalTop *top)
OldSpace * code_space()
Definition: heap.h:640
void DeallocateBuffer(SlotsBuffer *buffer)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:707
StringTableCleaner< false > InternalizedStringTableCleaner
static const int kCachedCodeOffset
Definition: objects.h:6744
static PropertyCell * cast(Object *obj)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset DependentCode
Definition: objects-inl.h:5047
bool WasSwept()
Definition: spaces.h:836
LargeObjectSpace * lo_space()
Definition: heap.h:646
void MarkSweptConservatively()
Definition: spaces.h:839
void FreeQueuedChunks()
Definition: heap.cc:7742
CellSpace * cell_space()
Definition: heap.h:642
virtual Object * RetainAs(Object *object)
StubCache * stub_cache()
Definition: isolate.h:877
SweeperTask(Heap *heap, PagedSpace *space)
#define T(name, string, precedence)
Definition: token.cc:48
static int SizeFor(int length)
Definition: objects.h:3067
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void ObjectMoveEvent(Address from, Address to, int size)
static const int kProtoTransitionHeaderSize
Definition: objects.h:6131
void DeallocateChain(SlotsBuffer **buffer_address)
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1708
bool HasTransitionArray()
Definition: objects-inl.h:4807
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1896
void SetDataAt(int index, Object *value)
Definition: objects-inl.h:5992
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size)
Definition: heap.h:1896
#define VISITOR_ID_COUNT_FUNCTION(id)
bool IterateObjectGroups(ObjectVisitor *v, WeakSlotCallbackWithHeap can_skip)
HeapProfiler * heap_profiler() const
Definition: isolate.h:985
FreeList * free_list()
Definition: spaces.h:1949
static const int kMapOffset
Definition: objects.h:1890
void Add(ObjectSlot slot)
Definition: mark-compact.h:291
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:912
void MarkAllocationSite(AllocationSite *site)
void VisitPointers(Object **start, Object **end)
static const int kEntriesStart
Definition: objects.h:6742
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:490
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6266
static const int kTableOffset
Definition: objects.h:9805
static ObjectHashTable * cast(Object *obj)
Definition: objects.h:4226
#define V8_OVERRIDE
Definition: v8config.h:402
StringTableCleaner< true > ExternalStringTableCleaner
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1372
void IdentifyWeakHandles(WeakSlotCallback f)
Heap * heap() const
Definition: spaces.h:900
IncrementalMarking * incremental_marking()
Definition: heap.h:1781
bool Contains(Address addr)
Definition: heap.cc:5929
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:364
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:675
void Iterate(v8::internal::ObjectVisitor *v)
Definition: api.cc:7553
static void UpdateSlot(Heap *heap, Object **slot)
static int code_index(bool is_ascii)
Definition: objects.h:7900
void ShortPrint(FILE *out=stdout)
Definition: objects.cc:1123
INLINE(static void MarkObjectByPointer(MarkCompactCollector *collector, Object **anchor_slot, Object **p))
static const int kProtoTransitionPrototypeOffset
Definition: objects.h:6134
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
intptr_t RefillFreeLists(PagedSpace *space)
void IterateFunctions(ObjectVisitor *v)
InstanceType instance_type()
Definition: objects-inl.h:4012
bool IsEvacuationCandidate()
Definition: spaces.h:657
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
void USE(T)
Definition: globals.h:341
static const int kConstructorOffset
Definition: objects.h:6428
INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject *object))
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, false > DisallowHeapAllocation
Definition: assert-scope.h:214
INLINE(static bool MarkObjectWithoutPush(Heap *heap, HeapObject *object))
static FixedArray * cast(Object *obj)
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1884
#define X
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
Definition: objects-inl.h:5047
MapSpace * map_space()
Definition: heap.h:641
Page * next_page()
Definition: spaces-inl.h:238
void RecordCodeTargetPatch(Address pc, Code *target)
static const int kEntryLength
Definition: objects.h:6747
static const int kCompilationErrorValue
Definition: objects.h:7983
int OffsetOfElementAt(int index)
Definition: objects.h:3244
static const int kNextMapIndex
Definition: objects.h:6741
static const int kPrototypeOffset
Definition: objects.h:6427
HeapObject * obj
static WeakHashTable * cast(Object *obj)
Definition: objects.h:4282
#define _
const int kPageSizeBits
Definition: v8globals.h:95
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
static void DeoptimizeMarkedCode(Isolate *isolate)
Definition: deoptimizer.cc:467
static GlobalObject * cast(Object *obj)
T Min(T a, T b)
Definition: utils.h:234
static intptr_t SweepConservatively(PagedSpace *space, FreeList *free_list, Page *p)
static CodeCache * cast(Object *obj)
int CountTrailingZeros(uint64_t value, int width)
static const int kUninitializedValue
Definition: objects.h:7979
ParallelSweepingState parallel_sweeping()
Definition: spaces.h:485
void VisitDebugTarget(RelocInfo *rinfo)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
NewSpace * new_space()
Definition: heap.h:637
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:346
static int saved_code_index(bool is_ascii)
Definition: objects.h:7908
static v8::Platform * GetCurrentPlatform()
Definition: v8.cc:233
static void VisitWeakCollection(Map *map, HeapObject *object)
void SweepInParallel(PagedSpace *space)
void TransferMark(Address old_start, Address new_start)
static JSObject * cast(Object *obj)
OldSpace * old_data_space()
Definition: heap.h:639
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1769
void VisitCodeAgeSequence(RelocInfo *rinfo)
static int RegionNumber(Address addr)
Definition: spaces.h:1045
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
Definition: spaces.cc:1096
AllocationSpace identity()
Definition: spaces.h:906
void RecordCodeEntrySlot(Address slot, Code *target)
Address FromSpacePageLow()
Definition: spaces.h:2558
const int MB
Definition: globals.h:246
static JSFunction * cast(Object *obj)