59 MarkCompactCollector::MarkCompactCollector(Heap* heap) :
63 sweep_precisely_(
false),
64 reduce_memory_footprint_(
false),
65 abort_incremental_marking_(
false),
68 was_marked_incrementally_(
false),
69 sweeping_pending_(
false),
70 pending_sweeper_jobs_semaphore_(0),
71 sequential_sweeping_(
false),
73 migration_slots_buffer_(
NULL),
76 encountered_weak_collections_(
NULL),
77 have_code_to_deoptimize_(
false) { }
80 class VerifyMarkingVisitor:
public ObjectVisitor {
82 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
85 for (
Object** current = start; current < end; current++) {
86 if ((*current)->IsHeapObject()) {
88 CHECK(heap_->mark_compact_collector()->IsMarked(
object));
93 void VisitEmbeddedPointer(RelocInfo* rinfo) {
94 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
95 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
96 Object* p = rinfo->target_object();
101 void VisitCell(RelocInfo* rinfo) {
102 Code*
code = rinfo->host();
103 ASSERT(rinfo->rmode() == RelocInfo::CELL);
104 if (!code->IsWeakObject(rinfo->target_cell())) {
105 ObjectVisitor::VisitCell(rinfo);
115 VerifyMarkingVisitor visitor(heap);
124 CHECK(current >= next_object_must_be_here_or_later);
125 object->Iterate(&visitor);
126 next_object_must_be_here_or_later = current +
object->Size();
132 static void VerifyMarking(NewSpace* space) {
134 NewSpacePageIterator it(space->bottom(), end);
139 while (it.has_next()) {
140 NewSpacePage* page = it.next();
141 Address limit = it.has_next() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
148 static void VerifyMarking(PagedSpace* space) {
149 PageIterator it(space);
151 while (it.has_next()) {
153 VerifyMarking(space->heap(), p->area_start(), p->area_end());
158 static void VerifyMarking(Heap* heap) {
159 VerifyMarking(heap->old_pointer_space());
160 VerifyMarking(heap->old_data_space());
161 VerifyMarking(heap->code_space());
162 VerifyMarking(heap->cell_space());
163 VerifyMarking(heap->property_cell_space());
164 VerifyMarking(heap->map_space());
165 VerifyMarking(heap->new_space());
167 VerifyMarkingVisitor visitor(heap);
169 LargeObjectIterator it(heap->lo_space());
170 for (HeapObject*
obj = it.Next();
obj !=
NULL;
obj = it.Next()) {
180 class VerifyEvacuationVisitor:
public ObjectVisitor {
183 for (
Object** current = start; current < end; current++) {
184 if ((*current)->IsHeapObject()) {
186 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
object));
194 VerifyEvacuationVisitor visitor;
203 CHECK(current >= next_object_must_be_here_or_later);
204 object->Iterate(&visitor);
205 next_object_must_be_here_or_later = current +
object->Size();
211 static void VerifyEvacuation(NewSpace* space) {
212 NewSpacePageIterator it(space->bottom(), space->top());
213 VerifyEvacuationVisitor visitor;
215 while (it.has_next()) {
216 NewSpacePage* page = it.next();
217 Address current = page->area_start();
218 Address limit = it.has_next() ? page->area_end() : space->top();
219 CHECK(limit == space->top() || !page->Contains(space->top()));
220 while (current < limit) {
222 object->Iterate(&visitor);
223 current +=
object->Size();
229 static void VerifyEvacuation(PagedSpace* space) {
232 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
233 space->was_swept_conservatively())
return;
234 PageIterator it(space);
236 while (it.has_next()) {
238 if (p->IsEvacuationCandidate())
continue;
239 VerifyEvacuation(p->area_start(), p->area_end());
244 static void VerifyEvacuation(Heap* heap) {
245 VerifyEvacuation(heap->old_pointer_space());
246 VerifyEvacuation(heap->old_data_space());
247 VerifyEvacuation(heap->code_space());
248 VerifyEvacuation(heap->cell_space());
249 VerifyEvacuation(heap->property_cell_space());
250 VerifyEvacuation(heap->map_space());
251 VerifyEvacuation(heap->new_space());
253 VerifyEvacuationVisitor visitor;
254 heap->IterateStrongRoots(&visitor,
VISIT_ALL);
256 #endif // VERIFY_HEAP
260 class VerifyNativeContextSeparationVisitor:
public ObjectVisitor {
262 VerifyNativeContextSeparationVisitor() : current_native_context_(
NULL) {}
265 for (
Object** current = start; current < end; current++) {
266 if ((*current)->IsHeapObject()) {
268 if (object->IsString())
continue;
269 switch (object->map()->instance_type()) {
291 if (object->IsContext()) {
292 CheckContext(
object);
295 int length = array->length();
298 array->set_length(0);
301 array->set_length(length);
308 object->Iterate(
this);
330 void CheckContext(
Object* context) {
331 if (!context->IsContext())
return;
333 if (current_native_context_ ==
NULL) {
334 current_native_context_ = native_context;
336 CHECK_EQ(current_native_context_, native_context);
340 Context* current_native_context_;
344 static void VerifyNativeContextSeparation(Heap* heap) {
345 HeapObjectIterator it(heap->code_space());
347 for (
Object*
object = it.Next();
object !=
NULL;
object = it.Next()) {
348 VerifyNativeContextSeparationVisitor visitor;
368 evacuation_candidates_.Add(p);
372 static void TraceFragmentation(
PagedSpace* space) {
374 intptr_t reserved = (number_of_pages * space->
AreaSize());
376 PrintF(
"[%s]: %d pages, %d (%.1f%%) free\n",
379 static_cast<int>(free),
380 static_cast<double>(free) * 100 / reserved);
386 ASSERT(evacuation_candidates_.length() == 0);
388 #ifdef ENABLE_GDB_JIT_INTERFACE
390 if (FLAG_gdbjit)
return false;
396 if (FLAG_compact_code_space &&
398 FLAG_incremental_code_compaction)) {
400 }
else if (FLAG_trace_fragmentation) {
401 TraceFragmentation(
heap()->code_space());
404 if (FLAG_trace_fragmentation) {
405 TraceFragmentation(
heap()->map_space());
406 TraceFragmentation(
heap()->cell_space());
407 TraceFragmentation(
heap()->property_cell_space());
414 compacting_ = evacuation_candidates_.length() > 0;
424 ASSERT(state_ == PREPARE_GC);
430 if (FLAG_collect_maps) ClearNonLiveReferences();
432 ClearWeakCollections();
435 if (FLAG_verify_heap) {
436 VerifyMarking(heap_);
442 if (!FLAG_collect_maps) ReattachInitialMaps();
445 if (FLAG_verify_native_context_separation) {
446 VerifyNativeContextSeparation(heap_);
451 if (
heap()->weak_embedded_objects_verification_enabled()) {
452 VerifyWeakEmbeddedObjectsInOptimizedCode();
454 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
455 VerifyOmittedMapChecks();
473 void MarkCompactCollector::VerifyMarkbitsAreClean(
PagedSpace* space) {
474 PageIterator it(space);
476 while (it.has_next()) {
484 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
485 NewSpacePageIterator it(space->bottom(), space->top());
487 while (it.has_next()) {
488 NewSpacePage* p = it.next();
489 CHECK(p->markbits()->IsClean());
495 void MarkCompactCollector::VerifyMarkbitsAreClean() {
501 VerifyMarkbitsAreClean(heap_->
map_space());
502 VerifyMarkbitsAreClean(heap_->
new_space());
504 LargeObjectIterator it(heap_->
lo_space());
505 for (HeapObject*
obj = it.Next();
obj !=
NULL;
obj = it.Next()) {
506 MarkBit mark_bit = Marking::MarkBitFrom(
obj);
507 CHECK(Marking::IsWhite(mark_bit));
513 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
514 HeapObjectIterator code_iterator(
heap()->code_space());
515 for (HeapObject*
obj = code_iterator.Next();
517 obj = code_iterator.Next()) {
519 if (code->kind() != Code::OPTIMIZED_FUNCTION)
continue;
520 if (WillBeDeoptimized(code))
continue;
521 code->VerifyEmbeddedObjectsDependency();
526 void MarkCompactCollector::VerifyOmittedMapChecks() {
527 HeapObjectIterator iterator(
heap()->map_space());
528 for (HeapObject*
obj = iterator.Next();
530 obj = iterator.Next()) {
532 map->VerifyOmittedMapChecks();
535 #endif // VERIFY_HEAP
538 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
539 PageIterator it(space);
541 while (it.has_next()) {
547 static void ClearMarkbitsInNewSpace(NewSpace* space) {
548 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
550 while (it.has_next()) {
557 ClearMarkbitsInPagedSpace(heap_->
code_space());
558 ClearMarkbitsInPagedSpace(heap_->
map_space());
561 ClearMarkbitsInPagedSpace(heap_->
cell_space());
563 ClearMarkbitsInNewSpace(heap_->
new_space());
579 : heap_(heap), space_(space) {}
597 void MarkCompactCollector::StartSweeperThreads() {
601 CHECK(free_list_old_pointer_space_.get()->IsEmpty());
602 CHECK(free_list_old_data_space_.get()->IsEmpty());
603 sweeping_pending_ =
true;
607 if (FLAG_job_based_sweeping) {
609 new SweeperTask(
heap(),
heap()->old_data_space()),
612 new SweeperTask(
heap(),
heap()->old_pointer_space()),
619 ASSERT(sweeping_pending_ ==
true);
623 if (FLAG_job_based_sweeping) {
625 pending_sweeper_jobs_semaphore_.Wait();
626 pending_sweeper_jobs_semaphore_.Wait();
628 ParallelSweepSpacesComplete();
629 sweeping_pending_ =
false;
640 if (space ==
heap()->old_pointer_space()) {
641 free_list = free_list_old_pointer_space_.get();
642 }
else if (space ==
heap()->old_data_space()) {
643 free_list = free_list_old_data_space_.get();
663 return sweeping_pending_;
677 if (old_start == new_start)
return;
679 MarkBit new_mark_bit = MarkBitFrom(new_start);
680 MarkBit old_mark_bit = MarkBitFrom(old_start);
683 ObjectColor old_color = Color(old_mark_bit);
686 if (Marking::IsBlack(old_mark_bit)) {
687 old_mark_bit.
Clear();
688 ASSERT(IsWhite(old_mark_bit));
689 Marking::MarkBlack(new_mark_bit);
691 }
else if (Marking::IsGrey(old_mark_bit)) {
692 old_mark_bit.
Clear();
694 ASSERT(IsWhite(old_mark_bit));
701 ObjectColor new_color = Color(new_mark_bit);
702 ASSERT(new_color == old_color);
716 return "PROPERTY_CELL_SPACE";
729 static int FreeListFragmentation(PagedSpace* space, Page* p) {
731 if (!p->WasSwept()) {
732 if (FLAG_trace_fragmentation) {
733 PrintF(
"%p [%s]: %d bytes live (unswept)\n",
734 reinterpret_cast<void*>(p),
741 PagedSpace::SizeStats sizes;
742 space->ObtainFreeListStatistics(p, &sizes);
745 intptr_t ratio_threshold;
746 intptr_t area_size = space->AreaSize();
748 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
750 ratio_threshold = 10;
752 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
754 ratio_threshold = 15;
757 if (FLAG_trace_fragmentation) {
758 PrintF(
"%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
759 reinterpret_cast<void*>(p),
761 static_cast<int>(sizes.small_size_),
762 static_cast<double>(sizes.small_size_ * 100) /
764 static_cast<int>(sizes.medium_size_),
765 static_cast<double>(sizes.medium_size_ * 100) /
767 static_cast<int>(sizes.large_size_),
768 static_cast<double>(sizes.large_size_ * 100) /
770 static_cast<int>(sizes.huge_size_),
771 static_cast<double>(sizes.huge_size_ * 100) /
773 (ratio > ratio_threshold) ?
"[fragmented]" :
"");
776 if (FLAG_always_compact && sizes.Total() != area_size) {
780 if (ratio <= ratio_threshold)
return 0;
782 return static_cast<int>(ratio - ratio_threshold);
791 static const int kMaxMaxEvacuationCandidates = 1000;
793 int max_evacuation_candidates =
794 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
796 if (FLAG_stress_compaction || FLAG_always_compact) {
797 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
802 Candidate() : fragmentation_(0), page_(
NULL) { }
803 Candidate(
int f,
Page* p) : fragmentation_(f), page_(p) { }
805 int fragmentation() {
return fragmentation_; }
806 Page* page() {
return page_; }
815 REDUCE_MEMORY_FOOTPRINT
820 intptr_t reserved = number_of_pages * space->
AreaSize();
822 static const intptr_t kFreenessThreshold = 50;
824 if (reduce_memory_footprint_ && over_reserved >= space->
AreaSize()) {
828 mode = REDUCE_MEMORY_FOOTPRINT;
829 max_evacuation_candidates += 2;
833 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->
AreaSize()) {
837 mode = REDUCE_MEMORY_FOOTPRINT;
838 max_evacuation_candidates *= 2;
841 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
842 PrintF(
"Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
843 "evacuation candidate limit: %d\n",
844 static_cast<double>(over_reserved) /
MB,
845 static_cast<double>(reserved) /
MB,
846 static_cast<int>(kFreenessThreshold),
847 max_evacuation_candidates);
850 intptr_t estimated_release = 0;
852 Candidate candidates[kMaxMaxEvacuationCandidates];
854 max_evacuation_candidates =
855 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
858 int fragmentation = 0;
859 Candidate* least =
NULL;
861 PageIterator it(space);
862 if (it.has_next()) it.next();
864 while (it.has_next()) {
868 if (FLAG_stress_compaction) {
870 uintptr_t page_number =
reinterpret_cast<uintptr_t
>(p) >>
kPageSizeBits;
871 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
872 }
else if (mode == REDUCE_MEMORY_FOOTPRINT) {
874 if (estimated_release >= over_reserved) {
878 intptr_t free_bytes = 0;
885 free_bytes = sizes.
Total();
888 int free_pct =
static_cast<int>(free_bytes * 100) / p->
area_size();
890 if (free_pct >= kFreenessThreshold) {
891 estimated_release += free_bytes;
892 fragmentation = free_pct;
897 if (FLAG_trace_fragmentation) {
898 PrintF(
"%p [%s]: %d (%.2f%%) free %s\n",
899 reinterpret_cast<void*>(p),
901 static_cast<int>(free_bytes),
902 static_cast<double>(free_bytes * 100) / p->
area_size(),
903 (fragmentation > 0) ?
"[fragmented]" :
"");
906 fragmentation = FreeListFragmentation(space, p);
909 if (fragmentation != 0) {
910 if (count < max_evacuation_candidates) {
911 candidates[count++] = Candidate(fragmentation, p);
914 for (
int i = 0; i < max_evacuation_candidates; i++) {
916 candidates[i].fragmentation() < least->fragmentation()) {
917 least = candidates + i;
921 if (least->fragmentation() < fragmentation) {
922 *least = Candidate(fragmentation, p);
929 for (
int i = 0; i < count; i++) {
933 if (count > 0 && FLAG_trace_fragmentation) {
934 PrintF(
"Collected %d evacuation candidates for space %s\n",
943 int npages = evacuation_candidates_.length();
944 for (
int i = 0; i < npages; i++) {
945 Page* p = evacuation_candidates_[i];
951 evacuation_candidates_.Rewind(0);
952 invalidated_code_.Rewind(0);
954 ASSERT_EQ(0, evacuation_candidates_.length());
970 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
978 if (was_marked_incrementally_ && abort_incremental_marking_) {
982 was_marked_incrementally_ =
false;
987 if (!FLAG_never_compact && !was_marked_incrementally_) {
991 PagedSpaces spaces(
heap());
994 space = spaces.next()) {
995 space->PrepareForMarkCompact();
999 if (!was_marked_incrementally_ && FLAG_verify_heap) {
1000 VerifyMarkbitsAreClean();
1006 void MarkCompactCollector::Finish() {
1008 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1017 if (have_code_to_deoptimize_) {
1020 have_code_to_deoptimize_ =
false;
1053 void CodeFlusher::ProcessJSFunctionCandidates() {
1054 Code* lazy_compile =
1056 Object* undefined = isolate_->
heap()->undefined_value();
1058 JSFunction* candidate = jsfunction_candidates_head_;
1059 JSFunction* next_candidate;
1060 while (candidate !=
NULL) {
1061 next_candidate = GetNextCandidate(candidate);
1062 ClearNextCandidate(candidate, undefined);
1064 SharedFunctionInfo* shared = candidate->shared();
1066 Code* code = shared->code();
1067 MarkBit code_mark = Marking::MarkBitFrom(code);
1068 if (!code_mark.Get()) {
1069 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1070 PrintF(
"[code-flushing clears: ");
1071 shared->ShortPrint();
1072 PrintF(
" - age: %d]\n", code->GetAge());
1074 shared->set_code(lazy_compile);
1075 candidate->set_code(lazy_compile);
1077 candidate->set_code(code);
1085 RecordCodeEntrySlot(slot, target);
1087 Object** shared_code_slot =
1090 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1092 candidate = next_candidate;
1095 jsfunction_candidates_head_ =
NULL;
1099 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1100 Code* lazy_compile =
1103 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1104 SharedFunctionInfo* next_candidate;
1105 while (candidate !=
NULL) {
1106 next_candidate = GetNextCandidate(candidate);
1107 ClearNextCandidate(candidate);
1109 Code* code = candidate->code();
1110 MarkBit code_mark = Marking::MarkBitFrom(code);
1111 if (!code_mark.Get()) {
1112 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1113 PrintF(
"[code-flushing clears: ");
1114 candidate->ShortPrint();
1115 PrintF(
" - age: %d]\n", code->GetAge());
1117 candidate->set_code(lazy_compile);
1123 RecordSlot(code_slot, code_slot, *code_slot);
1125 candidate = next_candidate;
1128 shared_function_info_candidates_head_ =
NULL;
1132 void CodeFlusher::ProcessOptimizedCodeMaps() {
1135 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1136 SharedFunctionInfo* next_holder;
1138 while (holder !=
NULL) {
1139 next_holder = GetNextCodeMap(holder);
1140 ClearNextCodeMap(holder);
1144 int old_length = code_map->length();
1150 if (!Marking::MarkBitFrom(code).Get())
continue;
1154 int dst_index = new_length++;
1155 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1156 Object*
object = code_map->get(i + j);
1157 code_map->set(dst_index,
object);
1164 RecordSlot(slot, slot, *slot);
1170 if (new_length < old_length) {
1171 holder->TrimOptimizedCodeMap(old_length - new_length);
1174 holder = next_holder;
1177 optimized_code_map_holder_head_ =
NULL;
1185 if (FLAG_trace_code_flushing) {
1186 PrintF(
"[code-flushing abandons function-info: ");
1193 if (candidate == shared_info) {
1194 next_candidate = GetNextCandidate(shared_info);
1195 shared_function_info_candidates_head_ = next_candidate;
1196 ClearNextCandidate(shared_info);
1198 while (candidate !=
NULL) {
1199 next_candidate = GetNextCandidate(candidate);
1201 if (next_candidate == shared_info) {
1202 next_candidate = GetNextCandidate(shared_info);
1203 SetNextCandidate(candidate, next_candidate);
1204 ClearNextCandidate(shared_info);
1208 candidate = next_candidate;
1215 ASSERT(!function->next_function_link()->IsUndefined());
1216 Object* undefined = isolate_->
heap()->undefined_value();
1222 if (FLAG_trace_code_flushing) {
1223 PrintF(
"[code-flushing abandons closure: ");
1224 function->shared()->ShortPrint();
1228 JSFunction* candidate = jsfunction_candidates_head_;
1230 if (candidate ==
function) {
1231 next_candidate = GetNextCandidate(
function);
1232 jsfunction_candidates_head_ = next_candidate;
1233 ClearNextCandidate(
function, undefined);
1235 while (candidate !=
NULL) {
1236 next_candidate = GetNextCandidate(candidate);
1238 if (next_candidate ==
function) {
1239 next_candidate = GetNextCandidate(
function);
1240 SetNextCandidate(candidate, next_candidate);
1241 ClearNextCandidate(
function, undefined);
1245 candidate = next_candidate;
1258 if (FLAG_trace_code_flushing) {
1259 PrintF(
"[code-flushing abandons code-map: ");
1266 if (holder == code_map_holder) {
1267 next_holder = GetNextCodeMap(code_map_holder);
1268 optimized_code_map_holder_head_ = next_holder;
1269 ClearNextCodeMap(code_map_holder);
1271 while (holder !=
NULL) {
1272 next_holder = GetNextCodeMap(holder);
1274 if (next_holder == code_map_holder) {
1275 next_holder = GetNextCodeMap(code_map_holder);
1276 SetNextCodeMap(holder, next_holder);
1277 ClearNextCodeMap(code_map_holder);
1281 holder = next_holder;
1287 void CodeFlusher::EvictJSFunctionCandidates() {
1288 JSFunction* candidate = jsfunction_candidates_head_;
1290 while (candidate !=
NULL) {
1291 next_candidate = GetNextCandidate(candidate);
1293 candidate = next_candidate;
1299 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1300 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1301 SharedFunctionInfo* next_candidate;
1302 while (candidate !=
NULL) {
1303 next_candidate = GetNextCandidate(candidate);
1305 candidate = next_candidate;
1307 ASSERT(shared_function_info_candidates_head_ ==
NULL);
1311 void CodeFlusher::EvictOptimizedCodeMaps() {
1312 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1313 SharedFunctionInfo* next_holder;
1314 while (holder !=
NULL) {
1315 next_holder = GetNextCodeMap(holder);
1317 holder = next_holder;
1319 ASSERT(optimized_code_map_holder_head_ ==
NULL);
1326 JSFunction** slot = &jsfunction_candidates_head_;
1327 JSFunction* candidate = jsfunction_candidates_head_;
1328 while (candidate !=
NULL) {
1330 v->VisitPointer(reinterpret_cast<Object**>(slot));
1332 candidate = GetNextCandidate(*slot);
1333 slot = GetNextCandidateSlot(*slot);
1338 MarkCompactCollector::~MarkCompactCollector() {
1339 if (code_flusher_ !=
NULL) {
1340 delete code_flusher_;
1341 code_flusher_ =
NULL;
1346 static inline HeapObject* ShortCircuitConsString(
Object** p) {
1360 if (!FLAG_clever_optimizations)
return object;
1361 Map*
map =
object->map();
1365 Object* second =
reinterpret_cast<ConsString*
>(object)->second();
1366 Heap* heap = map->GetHeap();
1367 if (second != heap->empty_string()) {
1374 Object* first =
reinterpret_cast<ConsString*
>(object)->first();
1375 if (!heap->InNewSpace(
object) && heap->InNewSpace(first))
return object;
1393 template<MarkCompactMarkingVisitor::VisitorId
id>
1407 const int kMinRangeForMarkingRecursion = 64;
1408 if (end - start >= kMinRangeForMarkingRecursion) {
1409 if (VisitUnmarkedObjects(heap, start, end))
return;
1413 for (
Object** p = start; p < end; p++) {
1414 MarkObjectByPointer(collector, start, p);
1420 MarkBit mark = Marking::MarkBitFrom(
object);
1427 MarkBit mark_bit = Marking::MarkBitFrom(
object);
1428 if (!mark_bit.
Get()) {
1439 if (!(*p)->IsHeapObject())
return;
1440 HeapObject*
object = ShortCircuitConsString(p);
1441 collector->RecordSlot(anchor_slot, p,
object);
1442 MarkBit mark = Marking::MarkBitFrom(
object);
1443 collector->MarkObject(
object, mark);
1456 MarkBit mark = Marking::MarkBitFrom(obj);
1459 MarkBit map_mark = Marking::MarkBitFrom(map);
1461 IterateBody(map, obj);
1471 if (
check.HasOverflowed())
return false;
1475 for (
Object** p = start; p < end; p++) {
1477 if (!o->IsHeapObject())
continue;
1478 collector->RecordSlot(start, p, o);
1480 MarkBit mark = Marking::MarkBitFrom(obj);
1481 if (mark.
Get())
continue;
1482 VisitUnmarkedObject(collector, obj);
1517 Object* table_object = weak_collection->table();
1518 if (!table_object->IsHashTable())
return;
1522 MarkBit table_mark = Marking::MarkBitFrom(table);
1523 collector->RecordSlot(table_slot, table_slot, table);
1524 if (!table_mark.
Get()) collector->SetMark(table, table_mark);
1526 collector->MarkObject(table->
map(), Marking::MarkBitFrom(table->
map()));
1532 static inline void TrackObjectStatsAndVisit(
Map* map,
HeapObject* obj);
1536 static const int kRegExpCodeThreshold = 5;
1538 static void UpdateRegExpCodeAgeAndFlush(
Heap* heap,
1550 if (!code->IsSmi() &&
1562 RecordSlot(slot, slot, code);
1567 }
else if (code->IsSmi()) {
1591 static void VisitRegExpAndFlushCode(Map* map, HeapObject*
object) {
1592 Heap* heap = map->GetHeap();
1594 if (!collector->is_code_flushing_enabled()) {
1595 VisitJSRegExp(map,
object);
1598 JSRegExp* re =
reinterpret_cast<JSRegExp*
>(object);
1600 UpdateRegExpCodeAgeAndFlush(heap, re,
true);
1601 UpdateRegExpCodeAgeAndFlush(heap, re,
false);
1603 VisitJSRegExp(map,
object);
1606 static VisitorDispatchTable<Callback> non_count_table_;
1615 if (fixed_array->
map() != heap->fixed_cow_array_map() &&
1616 fixed_array->
map() != heap->fixed_double_array_map() &&
1617 fixed_array != heap->empty_fixed_array()) {
1618 if (fixed_array->IsDictionary()) {
1620 fixed_array->
Size());
1623 fixed_array->
Size());
1632 int object_size = obj->
Size();
1634 non_count_table_.GetVisitorById(
id)(
map,
obj);
1635 if (obj->IsJSObject()) {
1638 DICTIONARY_ELEMENTS_SUB_TYPE,
1639 FAST_ELEMENTS_SUB_TYPE);
1641 DICTIONARY_PROPERTIES_SUB_TYPE,
1642 FAST_PROPERTIES_SUB_TYPE);
1647 template<MarkCompactMarkingVisitor::VisitorId
id>
1650 ObjectStatsVisitBase(
id, map, obj);
1664 array != heap->empty_descriptor_array()) {
1665 int fixed_array_size = array->
Size();
1670 int fixed_array_size = map_obj->transitions()->
Size();
1677 cache->default_cache()->
Size());
1678 if (!cache->normal_type_cache()->IsUndefined()) {
1680 MAP_CODE_CACHE_SUB_TYPE,
1695 int object_size = obj->
Size();
1712 if (sfi->scope_info() != heap->empty_fixed_array()) {
1714 SCOPE_INFO_SUB_TYPE,
1729 if (fixed_array == heap->string_table()) {
1731 STRING_TABLE_SUB_TYPE,
1732 fixed_array->
Size());
1742 table_.Register(kVisitJSRegExp,
1743 &VisitRegExpAndFlushCode);
1745 if (FLAG_track_gc_object_stats) {
1747 non_count_table_.CopyFrom(&
table_);
1748 #define VISITOR_ID_COUNT_FUNCTION(id) \
1749 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1751 #undef VISITOR_ID_COUNT_FUNCTION
1757 MarkCompactMarkingVisitor::non_count_table_;
1763 : collector_(collector) {}
1766 collector_->PrepareThreadForCodeFlushing(isolate, top);
1777 : collector_(collector) {}
1785 if (obj->IsSharedFunctionInfo()) {
1787 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1788 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1789 collector_->MarkObject(shared->code(), code_mark);
1790 collector_->MarkObject(shared, shared_mark);
1799 void MarkCompactCollector::PrepareThreadForCodeFlushing(
Isolate* isolate,
1800 ThreadLocalTop* top) {
1801 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1807 Code* code = frame->unchecked_code();
1808 MarkBit code_mark = Marking::MarkBitFrom(code);
1809 MarkObject(code, code_mark);
1810 if (frame->is_optimized()) {
1812 frame->LookupCode());
1818 void MarkCompactCollector::PrepareForCodeFlushing() {
1820 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1829 HeapObject* descriptor_array =
heap()->empty_descriptor_array();
1830 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1831 MarkObject(descriptor_array, descriptor_array_mark);
1834 ASSERT(
this ==
heap()->mark_compact_collector());
1842 &code_marking_visitor);
1848 ProcessMarkingDeque();
1856 : collector_(heap->mark_compact_collector()) { }
1859 MarkObjectByPointer(p);
1863 for (
Object** p = start; p < end; p++) MarkObjectByPointer(p);
1871 void MarkObjectByPointer(
Object** p) {
1872 if (!(*p)->IsHeapObject())
return;
1875 HeapObject*
object = ShortCircuitConsString(p);
1876 MarkBit mark_bit = Marking::MarkBitFrom(
object);
1877 if (mark_bit.
Get())
return;
1879 Map* map =
object->
map();
1881 collector_->SetMark(
object, mark_bit);
1884 MarkBit map_mark = Marking::MarkBitFrom(map);
1885 collector_->MarkObject(map, map_mark);
1886 MarkCompactMarkingVisitor::IterateBody(map,
object);
1890 collector_->EmptyMarkingDeque();
1893 MarkCompactCollector* collector_;
1898 template<
bool finalize_external_
strings>
1902 : heap_(heap), pointers_removed_(0) { }
1906 for (
Object** p = start; p < end; p++) {
1908 if (o->IsHeapObject() &&
1910 if (finalize_external_strings) {
1911 ASSERT(o->IsExternalString());
1914 pointers_removed_++;
1917 *p = heap_->the_hole_value();
1923 ASSERT(!finalize_external_strings);
1924 return pointers_removed_;
1929 int pointers_removed_;
1944 }
else if (object->IsAllocationSite() &&
1963 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1964 MarkingDeque* marking_deque,
1968 ASSERT(!marking_deque->IsFull());
1970 Map* filler_map = heap->one_pointer_filler_map();
1971 for (HeapObject*
object = it->Next();
1973 object = it->Next()) {
1974 MarkBit markbit = Marking::MarkBitFrom(
object);
1975 if ((object->
map() != filler_map) && Marking::IsGrey(markbit)) {
1976 Marking::GreyToBlack(markbit);
1978 marking_deque->PushBlack(
object);
1979 if (marking_deque->IsFull())
return;
1985 static inline int MarkWordToObjectStarts(uint32_t mark_bits,
int* starts);
1988 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1990 ASSERT(!marking_deque->IsFull());
1996 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1997 Address cell_base = it.CurrentCellBase();
2001 if (current_cell == 0)
continue;
2006 grey_objects = current_cell &
2009 grey_objects = current_cell & (current_cell >> 1);
2013 while (grey_objects != 0) {
2015 grey_objects >>= trailing_zeros;
2016 offset += trailing_zeros;
2017 MarkBit markbit(cell, 1 << offset,
false);
2018 ASSERT(Marking::IsGrey(markbit));
2019 Marking::GreyToBlack(markbit);
2023 marking_deque->PushBlack(
object);
2024 if (marking_deque->IsFull())
return;
2034 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
2035 NewSpace* new_space,
2043 int survivors_size = 0;
2045 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2046 Address cell_base = it.CurrentCellBase();
2050 if (current_cell == 0)
continue;
2053 while (current_cell != 0) {
2055 current_cell >>= trailing_zeros;
2056 offset += trailing_zeros;
2060 int size =
object->Size();
2061 survivors_size +=
size;
2073 MaybeObject* allocation = new_space->AllocateRaw(size);
2074 if (allocation->IsFailure()) {
2075 if (!new_space->AddFreshPage()) {
2081 allocation = new_space->AllocateRaw(size);
2082 ASSERT(!allocation->IsFailure());
2084 Object* target = allocation->ToObjectUnchecked();
2093 return survivors_size;
2097 static void DiscoverGreyObjectsInSpace(Heap* heap,
2098 MarkingDeque* marking_deque,
2099 PagedSpace* space) {
2100 if (!space->was_swept_conservatively()) {
2101 HeapObjectIterator it(space);
2102 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2104 PageIterator it(space);
2105 while (it.has_next()) {
2106 Page* p = it.next();
2107 DiscoverGreyObjectsOnPage(marking_deque, p);
2108 if (marking_deque->IsFull())
return;
2114 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2115 MarkingDeque* marking_deque) {
2116 NewSpace* space = heap->new_space();
2117 NewSpacePageIterator it(space->bottom(), space->top());
2118 while (it.has_next()) {
2119 NewSpacePage* page = it.next();
2120 DiscoverGreyObjectsOnPage(marking_deque, page);
2121 if (marking_deque->IsFull())
return;
2126 bool MarkCompactCollector::IsUnmarkedHeapObject(
Object** p) {
2128 if (!o->IsHeapObject())
return false;
2130 MarkBit mark = Marking::MarkBitFrom(heap_object);
2135 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2138 ASSERT(o->IsHeapObject());
2140 MarkBit mark = Marking::MarkBitFrom(heap_object);
2145 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2146 StringTable* string_table =
heap()->string_table();
2148 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2149 SetMark(string_table, string_table_mark);
2151 string_table->IteratePrefix(visitor);
2152 ProcessMarkingDeque();
2157 MarkBit mark_bit = Marking::MarkBitFrom(site);
2158 SetMark(site, mark_bit);
2168 MarkStringTable(visitor);
2174 RefillMarkingDeque();
2175 EmptyMarkingDeque();
2180 void MarkCompactCollector::MarkImplicitRefGroups() {
2181 List<ImplicitRefGroup*>* ref_groups =
2185 for (
int i = 0; i < ref_groups->length(); i++) {
2186 ImplicitRefGroup* entry = ref_groups->at(i);
2190 (*ref_groups)[last++] = entry;
2194 Object*** children = entry->children;
2196 for (
size_t j = 0; j < entry->length; ++j) {
2197 if ((*children[j])->IsHeapObject()) {
2199 MarkBit mark = Marking::MarkBitFrom(child);
2200 MarkObject(child, mark);
2208 ref_groups->Rewind(last);
2215 if (!
IsMarked(weak_object_to_code_table)) {
2216 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2217 SetMark(weak_object_to_code_table, mark);
2226 void MarkCompactCollector::EmptyMarkingDeque() {
2227 while (!marking_deque_.
IsEmpty()) {
2229 ASSERT(object->IsHeapObject());
2231 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(
object)));
2233 Map* map =
object->
map();
2234 MarkBit map_mark = Marking::MarkBitFrom(map);
2235 MarkObject(map, map_mark);
2237 MarkCompactMarkingVisitor::IterateBody(map,
object);
2247 void MarkCompactCollector::RefillMarkingDeque() {
2250 DiscoverGreyObjectsInNewSpace(
heap(), &marking_deque_);
2251 if (marking_deque_.
IsFull())
return;
2253 DiscoverGreyObjectsInSpace(
heap(),
2255 heap()->old_pointer_space());
2256 if (marking_deque_.
IsFull())
return;
2258 DiscoverGreyObjectsInSpace(
heap(),
2260 heap()->old_data_space());
2261 if (marking_deque_.
IsFull())
return;
2263 DiscoverGreyObjectsInSpace(
heap(),
2265 heap()->code_space());
2266 if (marking_deque_.
IsFull())
return;
2268 DiscoverGreyObjectsInSpace(
heap(),
2270 heap()->map_space());
2271 if (marking_deque_.
IsFull())
return;
2273 DiscoverGreyObjectsInSpace(
heap(),
2275 heap()->cell_space());
2276 if (marking_deque_.
IsFull())
return;
2278 DiscoverGreyObjectsInSpace(
heap(),
2280 heap()->property_cell_space());
2281 if (marking_deque_.
IsFull())
return;
2283 LargeObjectIterator lo_it(
heap()->lo_space());
2284 DiscoverGreyObjectsWithIterator(
heap(),
2287 if (marking_deque_.
IsFull())
return;
2297 void MarkCompactCollector::ProcessMarkingDeque() {
2298 EmptyMarkingDeque();
2300 RefillMarkingDeque();
2301 EmptyMarkingDeque();
2308 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2309 bool work_to_do =
true;
2311 while (work_to_do) {
2313 visitor, &IsUnmarkedHeapObjectWithHeap);
2314 MarkImplicitRefGroups();
2315 ProcessWeakCollections();
2316 work_to_do = !marking_deque_.
IsEmpty();
2317 ProcessMarkingDeque();
2322 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2323 for (StackFrameIterator it(
isolate(),
isolate()->thread_local_top());
2324 !it.done(); it.Advance()) {
2325 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2328 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2329 Code* code = it.frame()->LookupCode();
2330 if (!code->CanDeoptAt(it.frame()->pc())) {
2331 code->CodeIterateBody(visitor);
2333 ProcessMarkingDeque();
2340 void MarkCompactCollector::MarkLiveObjects() {
2341 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2345 PostponeInterruptsScope postpone(
isolate());
2347 bool incremental_marking_overflowed =
false;
2349 if (was_marked_incrementally_) {
2357 incremental_marking_overflowed =
2358 incremental_marking->marking_deque()->overflowed();
2359 incremental_marking->marking_deque()->ClearOverflowed();
2362 incremental_marking->Abort();
2366 ASSERT(state_ == PREPARE_GC);
2367 state_ = MARK_LIVE_OBJECTS;
2373 if (FLAG_force_marking_deque_overflows) {
2374 marking_deque_end = marking_deque_start + 64 *
kPointerSize;
2376 marking_deque_.
Initialize(marking_deque_start,
2380 if (incremental_marking_overflowed) {
2385 PrepareForCodeFlushing();
2387 if (was_marked_incrementally_) {
2391 HeapObjectIterator cell_iterator(
heap()->cell_space());
2393 while ((cell = cell_iterator.Next()) !=
NULL) {
2397 MarkCompactMarkingVisitor::VisitPointer(
2399 reinterpret_cast<Object**>(cell->address() + offset));
2404 HeapObjectIterator js_global_property_cell_iterator(
2405 heap()->property_cell_space());
2407 while ((cell = js_global_property_cell_iterator.Next()) !=
NULL) {
2408 ASSERT(cell->IsPropertyCell());
2410 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2417 MarkRoots(&root_visitor);
2419 ProcessTopOptimizedFrame(&root_visitor);
2424 ProcessEphemeralMarking(&root_visitor);
2433 &IsUnmarkedHeapObject);
2437 RefillMarkingDeque();
2438 EmptyMarkingDeque();
2443 ProcessEphemeralMarking(&root_visitor);
2449 void MarkCompactCollector::AfterMarking() {
2459 StringTable* string_table =
heap()->string_table();
2461 string_table->IterateElements(&internalized_visitor);
2462 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2465 heap()->external_string_table_.
Iterate(&external_visitor);
2469 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2481 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2486 if (FLAG_track_gc_object_stats) {
2492 void MarkCompactCollector::ProcessMapCaches() {
2493 Object* raw_context =
heap()->native_contexts_list_;
2494 while (raw_context !=
heap()->undefined_value()) {
2495 Context* context =
reinterpret_cast<Context*
>(raw_context);
2497 HeapObject* raw_map_cache =
2503 raw_map_cache !=
heap()->undefined_value()) {
2504 MapCache* map_cache =
reinterpret_cast<MapCache*
>(raw_map_cache);
2505 int existing_elements = map_cache->NumberOfElements();
2506 int used_elements = 0;
2508 i < map_cache->length();
2510 Object* raw_key = map_cache->get(i);
2511 if (raw_key ==
heap()->undefined_value() ||
2512 raw_key ==
heap()->the_hole_value())
continue;
2514 Object* raw_map = map_cache->get(i + 1);
2515 if (raw_map->IsHeapObject() &&
IsMarked(raw_map)) {
2519 ASSERT(raw_map->IsMap());
2520 map_cache->set_the_hole(i);
2521 map_cache->set_the_hole(i + 1);
2524 if (used_elements == 0) {
2530 map_cache->ElementsRemoved(existing_elements - used_elements);
2531 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2532 MarkObject(map_cache, map_cache_markbit);
2539 ProcessMarkingDeque();
2543 void MarkCompactCollector::ReattachInitialMaps() {
2544 HeapObjectIterator map_iterator(
heap()->map_space());
2545 for (HeapObject* obj = map_iterator.Next();
2547 obj = map_iterator.Next()) {
2553 if (map->attached_to_shared_function_info()) {
2560 void MarkCompactCollector::ClearNonLiveReferences() {
2564 HeapObjectIterator map_iterator(
heap()->map_space());
2565 for (HeapObject* obj = map_iterator.Next();
2567 obj = map_iterator.Next()) {
2570 if (!map->CanTransition())
continue;
2572 MarkBit map_mark = Marking::MarkBitFrom(map);
2573 if (map_mark.Get() && map->attached_to_shared_function_info()) {
2580 ClearNonLivePrototypeTransitions(map);
2581 ClearNonLiveMapTransitions(map, map_mark);
2583 if (map_mark.Get()) {
2584 ClearNonLiveDependentCode(map->dependent_code());
2586 ClearAndDeoptimizeDependentCode(map->dependent_code());
2594 for (HeapObject* cell = cell_iterator.Next();
2596 cell = cell_iterator.Next()) {
2604 Object* undefined =
heap()->undefined_value();
2605 for (
Object* site =
heap()->allocation_sites_list();
2614 WeakHashTable* table =
2616 uint32_t capacity = table->
Capacity();
2617 for (uint32_t i = 0; i < capacity; i++) {
2618 uint32_t key_index = table->EntryToIndex(i);
2619 Object* key = table->get(key_index);
2620 if (!table->IsKey(key))
continue;
2621 uint32_t value_index = table->EntryToValueIndex(i);
2622 Object* value = table->get(value_index);
2623 if (key->IsCell() && !
IsMarked(key)) {
2625 Object*
object = cell->value();
2627 MarkBit mark = Marking::MarkBitFrom(cell);
2628 SetMark(cell, mark);
2630 RecordSlot(value_slot, value_slot, *value_slot);
2636 MarkBit mark = Marking::MarkBitFrom(obj);
2642 table->set(key_index, heap_->the_hole_value());
2643 table->set(value_index, heap_->the_hole_value());
2650 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2651 int number_of_transitions = map->NumberOfProtoTransitions();
2652 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2654 int new_number_of_transitions = 0;
2659 for (
int i = 0; i < number_of_transitions; i++) {
2660 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2661 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2663 ASSERT(!prototype->IsUndefined());
2664 int proto_index = proto_offset + new_number_of_transitions * step;
2665 int map_index = map_offset + new_number_of_transitions * step;
2666 if (new_number_of_transitions != i) {
2667 prototype_transitions->set(
2671 prototype_transitions->set(
2676 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2677 RecordSlot(slot, slot, prototype);
2678 new_number_of_transitions++;
2682 if (new_number_of_transitions != number_of_transitions) {
2683 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2687 for (
int i = new_number_of_transitions * step;
2688 i < number_of_transitions * step;
2690 prototype_transitions->set_undefined(header + i);
2695 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2697 Object* potential_parent = map->GetBackPointer();
2698 if (!potential_parent->IsMap())
return;
2699 Map* parent =
Map::cast(potential_parent);
2703 bool current_is_alive = map_mark.Get();
2704 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2705 if (!current_is_alive && parent_is_alive) {
2706 parent->ClearNonLiveTransitions(
heap());
2711 void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2714 DependentCode::GroupStartIndexes starts(entries);
2715 int number_of_entries = starts.number_of_entries();
2716 if (number_of_entries == 0)
return;
2717 for (
int i = 0; i < number_of_entries; i++) {
2720 ASSERT(entries->is_code_at(i));
2721 Code* code = entries->code_at(i);
2723 if (
IsMarked(code) && !code->marked_for_deoptimization()) {
2724 code->set_marked_for_deoptimization(
true);
2725 code->InvalidateEmbeddedObjects();
2726 have_code_to_deoptimize_ =
true;
2728 entries->clear_at(i);
2733 void MarkCompactCollector::ClearNonLiveDependentCode(
DependentCode* entries) {
2735 DependentCode::GroupStartIndexes starts(entries);
2736 int number_of_entries = starts.number_of_entries();
2737 if (number_of_entries == 0)
return;
2738 int new_number_of_entries = 0;
2741 int group_number_of_entries = 0;
2742 for (
int i = starts.at(g); i < starts.at(g + 1); i++) {
2743 Object* obj = entries->object_at(i);
2746 (!obj->IsCode() || !WillBeDeoptimized(
Code::cast(obj)))) {
2747 if (new_number_of_entries + group_number_of_entries != i) {
2748 entries->set_object_at(
2749 new_number_of_entries + group_number_of_entries, obj);
2751 Object** slot = entries->slot_at(new_number_of_entries +
2752 group_number_of_entries);
2753 RecordSlot(slot, slot, obj);
2754 group_number_of_entries++;
2757 entries->set_number_of_entries(
2758 static_cast<DependentCode::DependencyGroup>(g),
2759 group_number_of_entries);
2760 new_number_of_entries += group_number_of_entries;
2762 for (
int i = new_number_of_entries; i < number_of_entries; i++) {
2763 entries->clear_at(i);
2768 void MarkCompactCollector::ProcessWeakCollections() {
2769 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2774 JSWeakCollection* weak_collection =
2775 reinterpret_cast<JSWeakCollection*
>(weak_collection_obj);
2777 Object** anchor =
reinterpret_cast<Object**
>(table->address());
2778 for (
int i = 0; i < table->Capacity(); i++) {
2782 RecordSlot(anchor, key_slot, *key_slot);
2784 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2785 MarkCompactMarkingVisitor::MarkObjectByPointer(
2786 this, anchor, value_slot);
2789 weak_collection_obj = weak_collection->next();
2794 void MarkCompactCollector::ClearWeakCollections() {
2795 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2800 JSWeakCollection* weak_collection =
2801 reinterpret_cast<JSWeakCollection*
>(weak_collection_obj);
2803 for (
int i = 0; i < table->Capacity(); i++) {
2805 table->RemoveEntry(i);
2808 weak_collection_obj = weak_collection->next();
2839 ASSERT(
heap()->AllowedToBeMigrated(src, dest));
2846 for (
int remaining = size / kPointerSize; remaining > 0; remaining--) {
2853 }
else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2855 &migration_slots_buffer_,
2856 reinterpret_cast<Object**>(dst_slot),
2864 if (compacting_ && dst->IsJSFunction()) {
2870 &migration_slots_buffer_,
2875 }
else if (compacting_ && dst->IsConstantPoolArray()) {
2884 &migration_slots_buffer_,
2895 &migration_slots_buffer_,
2919 for (
Object** p = start; p < end; p++) UpdatePointer(p);
2923 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2924 Object* target = rinfo->target_object();
2925 Object* old_target = target;
2929 if (target != old_target) {
2930 rinfo->set_target_object(target);
2935 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2937 Object* old_target = target;
2939 if (target != old_target) {
2940 rinfo->set_target_address(
Code::cast(target)->instruction_start());
2945 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2946 Object* stub = rinfo->code_age_stub();
2949 if (stub != rinfo->code_age_stub()) {
2955 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2956 rinfo->IsPatchedReturnSequence()) ||
2957 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2958 rinfo->IsPatchedDebugBreakSlotSequence()));
2961 rinfo->set_call_address(
Code::cast(target)->instruction_start());
2967 if (!obj->IsHeapObject())
return;
2971 MapWord map_word = heap_obj->
map_word();
2972 if (map_word.IsForwardingAddress()) {
2974 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2975 HeapObject* target = map_word.ToForwardingAddress();
2978 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2983 inline void UpdatePointer(
Object** p) {
2991 static void UpdatePointer(HeapObject** p, HeapObject*
object) {
2994 Address old_addr =
object->address();
3001 if (new_addr !=
NULL) {
3009 *p =
reinterpret_cast<HeapObject*
>(
Smi::FromInt(0x0f100d00 >> 1));
3014 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3018 if (map_word.IsForwardingAddress()) {
3033 ASSERT(target_space ==
heap()->old_pointer_space() ||
3034 target_space ==
heap()->old_data_space());
3036 MaybeObject* maybe_result = target_space->
AllocateRaw(object_size);
3037 if (maybe_result->ToObject(&result)) {
3044 increment_promoted_objects_size(object_size);
3052 void MarkCompactCollector::EvacuateNewSpace() {
3070 int survivors_size = 0;
3076 NewSpacePageIterator it(from_bottom, from_top);
3077 while (it.has_next()) {
3079 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3087 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3088 AlwaysAllocateScope always_allocate(
isolate());
3089 PagedSpace* space =
static_cast<PagedSpace*
>(p->owner());
3090 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3091 p->MarkSweptPrecisely();
3095 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3096 Address cell_base = it.CurrentCellBase();
3099 if (*cell == 0)
continue;
3101 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3102 for (
int i = 0; i < live_objects; i++) {
3105 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(
object)));
3107 int size =
object->Size();
3109 MaybeObject* target = space->AllocateRaw(size);
3110 if (target->IsFailure()) {
3116 Object* target_object = target->ToObjectUnchecked();
3122 ASSERT(object->map_word().IsForwardingAddress());
3128 p->ResetLiveBytes();
3132 void MarkCompactCollector::EvacuatePages() {
3133 int npages = evacuation_candidates_.length();
3134 for (
int i = 0; i < npages; i++) {
3135 Page* p = evacuation_candidates_[i];
3139 CHECK(p->IsEvacuationCandidate() ||
3141 CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
3142 if (p->IsEvacuationCandidate()) {
3145 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3146 EvacuateLiveObjectsFromPage(p);
3150 for (
int j = i; j < npages; j++) {
3151 Page* page = evacuation_candidates_[j];
3152 slots_buffer_allocator_.
DeallocateChain(page->slots_buffer_address());
3153 page->ClearEvacuationCandidate();
3155 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3167 if (object->IsHeapObject()) {
3169 MapWord map_word = heap_object->
map_word();
3170 if (map_word.IsForwardingAddress()) {
3171 return map_word.ToForwardingAddress();
3179 static inline void UpdateSlot(
Isolate* isolate,
3183 switch (slot_type) {
3185 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0,
NULL);
3186 rinfo.Visit(isolate, v);
3190 v->VisitCodeEntry(addr);
3199 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0,
NULL);
3200 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3204 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0,
NULL);
3205 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3209 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0,
NULL);
3210 rinfo.Visit(isolate, v);
3247 static void SweepPrecisely(PagedSpace* space,
3250 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3255 double start_time = 0.0;
3256 if (FLAG_print_cumulative_gc_stat) {
3260 p->MarkSweptPrecisely();
3262 Address free_start = p->area_start();
3263 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3266 SkipList* skip_list = p->skip_list();
3267 int curr_region = -1;
3272 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3273 Address cell_base = it.CurrentCellBase();
3275 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3277 for ( ; live_objects != 0; live_objects--) {
3279 if (free_end != free_start) {
3281 memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
3283 space->Free(free_start, static_cast<int>(free_end - free_start));
3284 #ifdef ENABLE_GDB_JIT_INTERFACE
3285 if (FLAG_gdbjit && space->identity() ==
CODE_SPACE) {
3286 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3291 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3292 Map* map = live_object->map();
3293 int size = live_object->SizeFromMap(map);
3295 live_object->IterateBody(map->instance_type(),
size, v);
3298 int new_region_start =
3300 int new_region_end =
3302 if (new_region_start != curr_region ||
3303 new_region_end != curr_region) {
3304 skip_list->AddObject(free_end, size);
3305 curr_region = new_region_end;
3308 free_start = free_end +
size;
3313 if (free_start != p->area_end()) {
3315 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
3317 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3318 #ifdef ENABLE_GDB_JIT_INTERFACE
3319 if (FLAG_gdbjit && space->identity() ==
CODE_SPACE) {
3320 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3324 p->ResetLiveBytes();
3325 if (FLAG_print_cumulative_gc_stat) {
3331 static bool SetMarkBitsUnderInvalidatedCode(Code* code,
bool value) {
3334 if (p->IsEvacuationCandidate() ||
3339 Address code_start = code->address();
3340 Address code_end = code_start + code->Size();
3343 uint32_t end_index =
3346 Bitmap* b = p->markbits();
3348 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3349 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3358 if (start_cell == end_cell) {
3359 *start_cell |= start_mask & end_mask;
3361 *start_cell |= start_mask;
3365 *end_cell |= end_mask;
3377 static bool IsOnInvalidatedCodeObject(
Address addr) {
3385 if (p->owner()->identity() !=
CODE_SPACE)
return false;
3392 return mark_bit.
Get();
3398 !ShouldSkipEvacuationSlotRecording(code)) {
3402 MarkBit mark_bit = Marking::MarkBitFrom(code);
3403 if (Marking::IsWhite(mark_bit))
return;
3405 invalidated_code_.Add(code);
3411 bool MarkCompactCollector::WillBeDeoptimized(
Code* code) {
3416 bool MarkCompactCollector::MarkInvalidatedCode() {
3417 bool code_marked =
false;
3419 int length = invalidated_code_.length();
3420 for (
int i = 0; i < length; i++) {
3421 Code* code = invalidated_code_[i];
3423 if (SetMarkBitsUnderInvalidatedCode(code,
true)) {
3432 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3433 int length = invalidated_code_.length();
3434 for (
int i = 0; i < length; i++) {
3435 if (!
IsMarked(invalidated_code_[i])) invalidated_code_[i] =
NULL;
3440 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3441 int length = invalidated_code_.length();
3442 for (
int i = 0; i < length; i++) {
3443 Code* code = invalidated_code_[i];
3445 code->Iterate(visitor);
3446 SetMarkBitsUnderInvalidatedCode(code,
false);
3449 invalidated_code_.Rewind(0);
3453 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3454 Heap::RelocationLock relocation_lock(
heap());
3456 bool code_slots_filtering_required;
3457 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3458 code_slots_filtering_required = MarkInvalidatedCode();
3462 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3467 PointersUpdatingVisitor updating_visitor(
heap());
3469 { GCTracer::Scope gc_scope(tracer_,
3470 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3472 SemiSpaceIterator to_it(
heap()->new_space()->
bottom(),
3473 heap()->new_space()->top());
3474 for (HeapObject*
object = to_it.Next();
3476 object = to_it.Next()) {
3477 Map* map =
object->map();
3478 object->IterateBody(map->instance_type(),
3479 object->SizeFromMap(map),
3484 { GCTracer::Scope gc_scope(tracer_,
3485 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3490 { GCTracer::Scope gc_scope(tracer_,
3491 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3492 StoreBufferRebuildScope scope(heap_,
3494 &Heap::ScavengeStoreBufferCallback);
3499 { GCTracer::Scope gc_scope(tracer_,
3500 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3502 migration_slots_buffer_,
3503 code_slots_filtering_required);
3504 if (FLAG_trace_fragmentation) {
3505 PrintF(
" migration slots buffer: %d\n",
3509 if (compacting_ && was_marked_incrementally_) {
3511 LargeObjectIterator it(heap_->
lo_space());
3512 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
3519 obj->Iterate(&updating_visitor);
3526 int npages = evacuation_candidates_.length();
3527 { GCTracer::Scope gc_scope(
3528 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3529 for (
int i = 0; i < npages; i++) {
3530 Page* p = evacuation_candidates_[i];
3531 ASSERT(p->IsEvacuationCandidate() ||
3534 if (p->IsEvacuationCandidate()) {
3537 code_slots_filtering_required);
3538 if (FLAG_trace_fragmentation) {
3539 PrintF(
" page %p slots buffer: %d\n",
3540 reinterpret_cast<void*>(p),
3547 SkipList* list = p->skip_list();
3548 if (list !=
NULL) list->Clear();
3550 if (FLAG_gc_verbose) {
3552 reinterpret_cast<intptr_t>(p));
3554 PagedSpace* space =
static_cast<PagedSpace*
>(p->owner());
3557 switch (space->identity()) {
3559 SweepConservatively<SWEEP_SEQUENTIALLY>(space,
NULL, p);
3565 space, p, &updating_visitor);
3568 if (FLAG_zap_code_space) {
3572 space, p, &updating_visitor);
3577 space, p, &updating_visitor);
3588 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3591 HeapObjectIterator cell_iterator(heap_->
cell_space());
3592 for (HeapObject* cell = cell_iterator.Next();
3594 cell = cell_iterator.Next()) {
3595 if (cell->IsCell()) {
3600 HeapObjectIterator js_global_property_cell_iterator(
3602 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3604 cell = js_global_property_cell_iterator.Next()) {
3605 if (cell->IsPropertyCell()) {
3613 heap_->string_table()->Iterate(&updating_visitor);
3614 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3616 WeakHashTable* table =
3618 table->
Iterate(&updating_visitor);
3619 table->Rehash(heap_->undefined_value());
3624 &UpdateReferenceInExternalStringTableEntry);
3626 EvacuationWeakObjectRetainer evacuation_object_retainer;
3631 ProcessInvalidatedCode(&updating_visitor);
3636 if (FLAG_verify_heap) {
3637 VerifyEvacuation(heap_);
3646 void MarkCompactCollector::UnlinkEvacuationCandidates() {
3647 int npages = evacuation_candidates_.length();
3648 for (
int i = 0; i < npages; i++) {
3649 Page* p = evacuation_candidates_[i];
3650 if (!p->IsEvacuationCandidate())
continue;
3652 p->ClearSweptPrecisely();
3653 p->ClearSweptConservatively();
3658 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3659 int npages = evacuation_candidates_.length();
3660 for (
int i = 0; i < npages; i++) {
3661 Page* p = evacuation_candidates_[i];
3662 if (!p->IsEvacuationCandidate())
continue;
3663 PagedSpace* space =
static_cast<PagedSpace*
>(p->owner());
3664 space->Free(p->area_start(), p->area_size());
3665 p->set_scan_on_scavenge(
false);
3667 p->ResetLiveBytes();
3668 space->ReleasePage(p,
false);
3670 evacuation_candidates_.Rewind(0);
3671 compacting_ =
false;
3676 static const int kStartTableEntriesPerLine = 5;
3677 static const int kStartTableLines = 171;
3678 static const int kStartTableInvalidLine = 127;
3679 static const int kStartTableUnusedEntry = 126;
3681 #define _ kStartTableUnusedEntry
3682 #define X kStartTableInvalidLine
3871 static inline int MarkWordToObjectStarts(uint32_t mark_bits,
int* starts) {
3876 ASSERT((mark_bits & 0x180) != 0x180);
3877 ASSERT((mark_bits & 0x18000) != 0x18000);
3878 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3880 while (mark_bits != 0) {
3881 int byte = (mark_bits & 0xff);
3884 ASSERT(byte < kStartTableLines);
3885 char* table =
kStartTable + byte * kStartTableEntriesPerLine;
3886 int objects_in_these_8_words = table[0];
3887 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3888 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3889 for (
int i = 0; i < objects_in_these_8_words; i++) {
3890 starts[objects++] = offset + table[1 + i];
3899 static inline Address DigestFreeStart(
Address approximate_free_start,
3900 uint32_t free_start_cell) {
3901 ASSERT(free_start_cell != 0);
3904 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3907 uint32_t cell = free_start_cell;
3908 int offset_of_last_live;
3909 if ((cell & 0x80000000u) != 0) {
3911 offset_of_last_live = 31;
3920 cell = (cell + 1) >> 1;
3921 int live_objects = MarkWordToObjectStarts(cell, offsets);
3922 ASSERT(live_objects == 1);
3923 offset_of_last_live = offsets[live_objects - 1];
3926 approximate_free_start + offset_of_last_live *
kPointerSize;
3928 Address free_start = last_live_start + last_live->Size();
3933 static inline Address StartOfLiveObject(
Address block_address, uint32_t cell) {
3937 ASSERT((cell & (cell << 1)) == 0);
3940 if (cell == 0x80000000u) {
3943 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3944 ASSERT((first_set_bit & cell) == first_set_bit);
3945 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3946 ASSERT(live_objects == 1);
3952 template<MarkCompactCollector::SweepingParallelism mode>
3953 static intptr_t Free(PagedSpace* space,
3954 FreeList* free_list,
3958 return space->Free(start, size);
3960 return size - free_list->Free(start, size);
3967 template intptr_t MarkCompactCollector::
3968 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3969 PagedSpace*, FreeList*, Page*);
3974 template intptr_t MarkCompactCollector::
3975 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3976 PagedSpace*, FreeList*, Page*);
3986 template<MarkCompactCollector::SweepingParallelism mode>
3995 free_list !=
NULL) ||
3997 free_list ==
NULL));
4005 intptr_t freed_bytes = 0;
4011 MarkBitCellIterator it(p);
4012 for (; !it.Done(); it.Advance()) {
4013 cell_base = it.CurrentCellBase();
4014 cell = it.CurrentCell();
4015 if (*cell != 0)
break;
4020 freed_bytes += Free<mode>(space, free_list, p->
area_start(),
4021 static_cast<int>(
size));
4028 Address free_end = StartOfLiveObject(cell_base, *cell);
4031 freed_bytes += Free<mode>(space, free_list, p->
area_start(),
4032 static_cast<int>(
size));
4040 Address free_start = cell_base;
4043 for (; !it.Done(); it.Advance()) {
4044 cell_base = it.CurrentCellBase();
4045 cell = it.CurrentCell();
4049 if (cell_base - free_start > 32 * kPointerSize) {
4050 free_start = DigestFreeStart(free_start, free_start_cell);
4051 if (cell_base - free_start > 32 * kPointerSize) {
4056 free_end = StartOfLiveObject(cell_base, *cell);
4057 freed_bytes += Free<mode>(space, free_list, free_start,
4058 static_cast<int>(free_end - free_start));
4062 free_start = cell_base;
4063 free_start_cell = *cell;
4070 if (cell_base - free_start > 32 * kPointerSize) {
4071 free_start = DigestFreeStart(free_start, free_start_cell);
4072 freed_bytes += Free<mode>(space, free_list, free_start,
4073 static_cast<int>(p->
area_end() - free_start));
4082 PageIterator it(space);
4084 ? free_list_old_pointer_space_.get()
4085 : free_list_old_data_space_.get();
4087 while (it.has_next()) {
4088 Page* p = it.next();
4091 SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
4099 void MarkCompactCollector::SweepSpace(
PagedSpace* space, SweeperType sweeper) {
4106 PageIterator it(space);
4108 int pages_swept = 0;
4109 bool lazy_sweeping_active =
false;
4110 bool unused_page_present =
false;
4111 bool parallel_sweeping_active =
false;
4113 while (it.has_next()) {
4114 Page* p = it.next();
4125 ASSERT(evacuation_candidates_.length() > 0);
4131 if (unused_page_present) {
4132 if (FLAG_gc_verbose) {
4134 reinterpret_cast<intptr_t>(p));
4142 unused_page_present =
true;
4147 if (FLAG_gc_verbose) {
4149 reinterpret_cast<intptr_t>(p));
4151 SweepConservatively<SWEEP_SEQUENTIALLY>(space,
NULL, p);
4156 if (lazy_sweeping_active) {
4157 if (FLAG_gc_verbose) {
4159 reinterpret_cast<intptr_t>(p));
4163 if (FLAG_gc_verbose) {
4165 reinterpret_cast<intptr_t>(p));
4167 SweepConservatively<SWEEP_SEQUENTIALLY>(space,
NULL, p);
4170 lazy_sweeping_active =
true;
4176 if (!parallel_sweeping_active) {
4177 if (FLAG_gc_verbose) {
4179 reinterpret_cast<intptr_t>(p));
4181 SweepConservatively<SWEEP_SEQUENTIALLY>(space,
NULL, p);
4183 parallel_sweeping_active =
true;
4185 if (FLAG_gc_verbose) {
4187 reinterpret_cast<intptr_t>(p));
4195 if (FLAG_gc_verbose) {
4197 reinterpret_cast<intptr_t>(p));
4200 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
4203 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
4206 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4218 if (FLAG_gc_verbose) {
4219 PrintF(
"SweepSpace: %s (%d pages swept)\n",
4229 void MarkCompactCollector::SweepSpaces() {
4230 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4232 state_ = SWEEP_SPACES;
4240 if (sweep_precisely_) how_to_sweep =
PRECISE;
4244 UnlinkEvacuationCandidates();
4251 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4252 { SequentialSweepingScope scope(
this);
4253 SweepSpace(
heap()->old_pointer_space(), how_to_sweep);
4254 SweepSpace(
heap()->old_data_space(), how_to_sweep);
4260 StartSweeperThreads();
4267 RemoveDeadInvalidatedCode();
4271 SweepSpace(
heap()->property_cell_space(),
PRECISE);
4273 EvacuateNewSpaceAndCandidates();
4284 ReleaseEvacuationCandidates();
4288 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4289 PageIterator it(space);
4290 while (it.has_next()) {
4291 Page* p = it.next();
4294 p->MarkSweptConservatively();
4301 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4302 ParallelSweepSpaceComplete(
heap()->old_pointer_space());
4303 ParallelSweepSpaceComplete(
heap()->old_data_space());
4308 #ifdef ENABLE_DEBUGGER_SUPPORT
4309 if (
isolate()->debug()->IsLoaded() ||
4310 isolate()->debug()->has_break_points()) {
4316 if (code_flusher_ !=
NULL)
return;
4319 if (code_flusher_ ==
NULL)
return;
4321 delete code_flusher_;
4322 code_flusher_ =
NULL;
4325 if (FLAG_trace_code_flushing) {
4326 PrintF(
"[code-flushing is now %s]\n", enable ?
"on" :
"off");
4336 #ifdef ENABLE_GDB_JIT_INTERFACE
4337 if (obj->IsCode()) {
4338 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4341 if (obj->IsCode()) {
4375 *buffer_address = buffer;
4378 buffer->
Add(reinterpret_cast<ObjectSlot>(type));
4379 buffer->
Add(reinterpret_cast<ObjectSlot>(addr));
4385 if (RelocInfo::IsCodeTarget(rmode)) {
4387 }
else if (RelocInfo::IsEmbeddedObject(rmode)) {
4389 }
else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4391 }
else if (RelocInfo::IsJSReturn(rmode)) {
4401 RelocInfo::Mode rmode = rinfo->rmode();
4403 (rinfo->host() ==
NULL ||
4404 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4406 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4408 Object** target_pointer =
4409 reinterpret_cast<Object**
>(rinfo->constant_pool_entry_address());
4414 }
else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4418 rinfo->constant_pool_entry_address(),
4423 SlotTypeForRMode(rmode),
4428 EvictEvacuationCandidate(target_page);
4437 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4443 EvictEvacuationCandidate(target_page);
4453 GcSafeFindCodeForInnerPointer(pc);
4454 MarkBit mark_bit = Marking::MarkBitFrom(host);
4455 if (Marking::IsBlack(mark_bit)) {
4456 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4472 for (
int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4481 DecodeSlotType(slot),
4482 reinterpret_cast<Address>(slots_[slot_idx]));
4491 for (
int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4494 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4501 if (!IsOnInvalidatedCodeObject(pc)) {
4504 DecodeSlotType(slot),
4505 reinterpret_cast<Address>(slots_[slot_idx]));
4524 while (buffer !=
NULL) {
4527 buffer = next_buffer;
4529 *buffer_address =
NULL;
RootMarkingVisitor(Heap *heap)
static int SizeOf(Map *map, HeapObject *object)
void RecordFixedArraySubTypeStats(int array_sub_type, size_t size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const uint32_t kShortcutTypeTag
void ClearEvacuationCandidate()
static const char * kGreyBitPattern
void RemoveImplicitRefGroups()
FixedArraySubInstanceType
static uint32_t FastAddressToMarkbitIndex(Address addr)
List< ImplicitRefGroup * > * implicit_ref_groups()
Code * builtin(Name name)
static bool IsTypedSlot(ObjectSlot slot)
void ResetUnsweptFreeBytes()
void WaitUntilSweepingCompleted()
static const int kCodeOffset
void VisitEmbeddedPointer(RelocInfo *rinfo)
#define CHECK_EQ(expected, value)
bool AreSweeperThreadsActivated()
static Object *& Object_at(Address addr)
static ConstantPoolArray * cast(Object *obj)
static const int kCodeEntryOffset
static const int kStartOffset
static VisitorDispatchTable< Callback > table_
static int EntryToIndex(int entry)
void EvictCandidate(SharedFunctionInfo *shared_info)
static const int kValueOffset
#define PROFILE(IsolateGetter, Call)
static const int kElementsStartIndex
Address FromSpacePageHigh()
void VisitPointer(Object **p)
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
CompilationCache * compilation_cache()
Object ** native_contexts_list_address()
SharedFunctionInfoMarkingVisitor(MarkCompactCollector *collector)
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool InNewSpace(Object *object)
static String * cast(Object *obj)
friend class CodeMarkingVisitor
INLINE(static void MarkObject(Heap *heap, HeapObject *object))
HandleScopeImplementer * handle_scope_implementer()
static const char * kWhiteBitPattern
friend class RootMarkingVisitor
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
static void Visit(Map *map, HeapObject *obj)
static void MarkInlinedFunctionsCode(Heap *heap, Code *code)
static void Visit(Map *map, HeapObject *obj)
void Prepare(GCTracer *tracer)
static Smi * FromInt(int value)
void BeforeVisitingPointers()
static Object * GetObjectFromEntryAddress(Address location_of_address)
void FinalizeExternalString(String *string)
static MemoryChunk * FromAddress(Address a)
INLINE(static void VisitUnmarkedObject(MarkCompactCollector *collector, HeapObject *obj))
static HeapObject * cast(Object *obj)
virtual void VisitPointers(Object **start, Object **end)
StringTableCleaner(Heap *heap)
static const int kProtoTransitionElementsPerEntry
static Map * cast(Object *obj)
INLINE(static void VisitPointer(Heap *heap, Object **p))
void WaitForSweeperThread()
void ResetAllocationInfo()
kSerializedDataOffset Object
static AllocationSite * cast(Object *obj)
bool InFromSpace(Object *object)
static void Clear(MemoryChunk *chunk)
void Relocate(intptr_t delta)
Object * weak_object_to_code_table()
bool marked_for_deoptimization()
bool IsConcurrentSweepingInProgress()
void MarkWeakObjectToCodeTable()
static void MoveBlock(Address dst, Address src, int byte_size)
void IterateWeakRoots(ObjectVisitor *v)
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
#define ASSERT(condition)
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
static const uint32_t kBitsPerCell
static void IncrementLiveBytesFromGC(Address address, int by)
bool StartCompaction(CompactionMode mode)
void VisitCodeTarget(RelocInfo *rinfo)
void UpdateSlotsWithFilter(Heap *heap)
OldSpace * TargetSpace(HeapObject *object)
void VisitPointers(Object **start, Object **end)
void DecrementUnsweptFreeBytes(intptr_t by)
void RecordObjectStats(InstanceType type, size_t size)
static Context * cast(Object *context)
#define VISITOR_ID_LIST(V)
static const char * kBlackBitPattern
ThreadManager * thread_manager()
static SharedFunctionInfo * cast(Object *obj)
Object * DataAt(int index)
friend class SharedFunctionInfoMarkingVisitor
Isolate * isolate() const
static const int kEntrySize
static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map *map, HeapObject *obj)
void FreeUnmarkedObjects()
static Code * cast(Object *obj)
INLINE(static bool VisitUnmarkedObjects(Heap *heap, Object **start, Object **end))
void ClearSweptConservatively()
static Object ** RawField(HeapObject *obj, int offset)
StoreBuffer * store_buffer()
static Smi * cast(Object *object)
static void IterateBody(HeapObject *obj, ObjectVisitor *v)
static int SizeOfChain(SlotsBuffer *buffer)
static const int kOsrAstIdOffset
void IterateArchivedThreads(ThreadVisitor *v)
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
void IncreaseUnsweptFreeBytes(Page *p)
int num_sweeper_threads() const
bool TryPromoteObject(HeapObject *object, int object_size)
void set_encountered_weak_collections(Object *weak_collection)
void VisitNextCodeLink(Object **p)
void ReleasePage(Page *page, bool unlink)
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void ClearSweptPrecisely()
bool HasSpaceForTypedSlot()
void VisitPointer(Object **slot)
void Iterate(ObjectVisitor *v)
void AddEvacuationCandidate(Page *p)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void VisitPointer(Object **p)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static JSGlobalProxy * cast(Object *obj)
void VisitPointers(Object **start, Object **end)
bool is_tracking_object_moves() const
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static Cell * cast(Object *obj)
CodeMarkingVisitor(MarkCompactCollector *collector)
static void UpdateAllocationSiteFeedback(HeapObject *object, ScratchpadSlotMode mode)
static const int kProtoTransitionMapOffset
intptr_t Concatenate(FreeList *free_list)
static NewSpacePage * FromAddress(Address address_in_page)
Context * native_context()
v8::Handle< v8::Object > bottom
SweeperThread ** sweeper_threads()
PagedSpace * paged_space(int idx)
void Initialize(Address low, Address high)
void CollectEvacuationCandidates(PagedSpace *space)
void EvictEvacuationCandidatesFromFreeLists()
PropertyCellSpace * property_cell_space()
void check(i::Vector< const uint8_t > string)
static void ObjectStatsCountFixedArray(FixedArrayBase *fixed_array, FixedArraySubInstanceType fast_type, FixedArraySubInstanceType dictionary_type)
static Address & Address_at(Address addr)
void CodeIterateBody(ObjectVisitor *v)
void MarkEvacuationCandidate()
void CheckpointObjectStats()
void InvalidateCode(Code *code)
bool IsAligned(T value, U alignment)
static DependentCode * cast(Object *object)
virtual intptr_t SizeOfObjects()
GlobalHandles * global_handles()
void IncrementYoungSurvivorsCounter(int survived)
char kStartTable[kStartTableLines *kStartTableEntriesPerLine]
static const int kMaxRegularHeapObjectSize
virtual Object * RetainAs(Object *object)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
PointersUpdatingVisitor(Heap *heap)
void AddToAccountingStats(intptr_t bytes)
const uint32_t kShortcutTypeMask
OldSpace * old_pointer_space()
void RecordWrites(HeapObject *obj)
static double TimeCurrentMillis()
static Code * GetCodeFromTargetAddress(Address address)
void IteratePointersToFromSpace(ObjectVisitor *v)
void set_age_mark(Address mark)
void VisitThread(Isolate *isolate, ThreadLocalTop *top)
void DeallocateBuffer(SlotsBuffer *buffer)
void Iterate(ObjectVisitor *v)
StringTableCleaner< false > InternalizedStringTableCleaner
static const int kCachedCodeOffset
static PropertyCell * cast(Object *obj)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset DependentCode
static void Visit(Map *map, HeapObject *obj)
LargeObjectSpace * lo_space()
void MarkSweptConservatively()
virtual Object * RetainAs(Object *object)
SweeperTask(Heap *heap, PagedSpace *space)
#define T(name, string, precedence)
static int SizeFor(int length)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void ObjectMoveEvent(Address from, Address to, int size)
static const int kProtoTransitionHeaderSize
void DeallocateChain(SlotsBuffer **buffer_address)
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
bool HasTransitionArray()
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
void ProcessWeakReferences(WeakObjectRetainer *retainer)
void SetDataAt(int index, Object *value)
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size)
#define VISITOR_ID_COUNT_FUNCTION(id)
bool IterateObjectGroups(ObjectVisitor *v, WeakSlotCallbackWithHeap can_skip)
bool is_code_flushing_enabled() const
HeapProfiler * heap_profiler() const
bool is_compacting() const
static const int kMapOffset
void Add(ObjectSlot slot)
InnerPointerToCodeCache * inner_pointer_to_code_cache()
void MarkAllocationSite(AllocationSite *site)
void VisitPointers(Object **start, Object **end)
static const int kEntriesStart
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void set_parallel_sweeping(ParallelSweepingState state)
void IterateRoots(ObjectVisitor *v, VisitMode mode)
static const int kTableOffset
static ObjectHashTable * cast(Object *obj)
StringTableCleaner< true > ExternalStringTableCleaner
void CheckNewSpaceExpansionCriteria()
void IdentifyWeakHandles(WeakSlotCallback f)
Object * encountered_weak_collections()
IncrementalMarking * incremental_marking()
bool Contains(Address addr)
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
SlotsBuffer ** slots_buffer_address()
void Iterate(v8::internal::ObjectVisitor *v)
static void UpdateSlot(Heap *heap, Object **slot)
static int code_index(bool is_ascii)
void ShortPrint(FILE *out=stdout)
INLINE(static void MarkObjectByPointer(MarkCompactCollector *collector, Object **anchor_slot, Object **p))
static const int kProtoTransitionPrototypeOffset
#define ASSERT_EQ(v1, v2)
intptr_t RefillFreeLists(PagedSpace *space)
void IterateFunctions(ObjectVisitor *v)
InstanceType instance_type()
bool IsEvacuationCandidate()
static HeapObject * FromAddress(Address address)
static const int kConstructorOffset
INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject *object))
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, false > DisallowHeapAllocation
INLINE(static bool MarkObjectWithoutPush(Heap *heap, HeapObject *object))
static FixedArray * cast(Object *obj)
void set_was_swept_conservatively(bool b)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
void RemoveObjectGroups()
void RecordCodeTargetPatch(Address pc, Code *target)
static const int kEntryLength
static const int kCompilationErrorValue
int OffsetOfElementAt(int index)
static const int kNextMapIndex
static const int kPrototypeOffset
void EvictAllCandidates()
static void Visit(Map *map, HeapObject *obj)
int count_of_code_ptr_entries()
static WeakHashTable * cast(Object *obj)
void RestartIfNotMarking()
static const char * kImpossibleBitPattern
bool TryParallelSweeping()
static void DeoptimizeMarkedCode(Isolate *isolate)
static GlobalObject * cast(Object *obj)
static intptr_t SweepConservatively(PagedSpace *space, FreeList *free_list, Page *p)
static CodeCache * cast(Object *obj)
int CountTrailingZeros(uint64_t value, int width)
static const int kUninitializedValue
ParallelSweepingState parallel_sweeping()
void VisitDebugTarget(RelocInfo *rinfo)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
static int saved_code_index(bool is_ascii)
static v8::Platform * GetCurrentPlatform()
static void Visit(Map *map, HeapObject *obj)
static void VisitWeakCollection(Map *map, HeapObject *object)
void SweepInParallel(PagedSpace *space)
void TransferMark(Address old_start, Address new_start)
static JSObject * cast(Object *obj)
OldSpace * old_data_space()
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
MarkCompactCollector * mark_compact_collector()
void EnableCodeFlushing(bool enable)
void VisitCodeAgeSequence(RelocInfo *rinfo)
static int RegionNumber(Address addr)
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
AllocationSpace identity()
void RecordCodeEntrySlot(Address slot, Code *target)
Address FromSpacePageLow()
static JSFunction * cast(Object *obj)