46 old_reserved_limit_(
NULL),
47 old_buffer_is_sorted_(
false),
48 old_buffer_is_filtered_(
false),
50 store_buffer_rebuilding_enabled_(
false),
52 may_move_store_buffer_entries_(
true),
53 virtual_memory_(
NULL),
56 hash_sets_are_empty_(
true) {
62 uintptr_t start_as_int =
63 reinterpret_cast<uintptr_t
>(virtual_memory_->
address());
70 old_top_ = old_start_ =
71 reinterpret_cast<Address*
>(old_virtual_memory_->address());
74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
76 ASSERT(initial_length > 0);
78 old_limit_ = old_start_ + initial_length;
81 CHECK(old_virtual_memory_->Commit(
82 reinterpret_cast<void*>(old_start_),
83 (old_limit_ - old_start_) * kPointerSize,
86 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->
address());
87 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->
address());
89 reinterpret_cast<char*
>(virtual_memory_->
address()) +
90 virtual_memory_->
size());
91 ASSERT(start_ <= vm_limit);
92 ASSERT(limit_ <= vm_limit);
95 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
98 CHECK(virtual_memory_->
Commit(reinterpret_cast<Address>(start_),
105 hash_sets_are_empty_ =
false;
107 ClearFilteringHashSets();
112 delete virtual_memory_;
113 delete old_virtual_memory_;
114 delete[] hash_set_1_;
115 delete[] hash_set_2_;
116 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ =
NULL;
117 start_ = limit_ =
NULL;
124 isolate->
counters()->store_buffer_overflows()->Increment();
128 void StoreBuffer::Uniq() {
132 ASSERT(may_move_store_buffer_entries_);
133 for (
Address* read = old_start_; read < old_top_; read++) {
135 if (current != previous) {
136 if (heap_->
InNewSpace(*reinterpret_cast<Object**>(current))) {
146 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
147 return old_limit_ - old_top_ >= space_needed;
152 while (old_limit_ - old_top_ < space_needed &&
153 old_limit_ < old_reserved_limit_) {
154 size_t grow = old_limit_ - old_start_;
155 CHECK(old_virtual_memory_->
Commit(reinterpret_cast<void*>(old_limit_),
161 if (SpaceAvailable(space_needed))
return;
163 if (old_buffer_is_filtered_)
return;
164 ASSERT(may_move_store_buffer_entries_);
167 old_buffer_is_filtered_ =
true;
168 bool page_has_scan_on_scavenge_flag =
false;
170 PointerChunkIterator it(heap_);
172 while ((chunk = it.next()) !=
NULL) {
174 page_has_scan_on_scavenge_flag =
true;
179 if (page_has_scan_on_scavenge_flag) {
183 if (SpaceAvailable(space_needed))
return;
187 static const int kSampleFinenesses = 5;
188 static const struct Samples {
189 int prime_sample_step;
191 } samples[kSampleFinenesses] = {
198 for (
int i = 0; i < kSampleFinenesses; i++) {
199 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
201 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
202 if (SpaceAvailable(space_needed))
return;
210 void StoreBuffer::ExemptPopularPages(
int prime_sample_step,
int threshold) {
211 PointerChunkIterator it(heap_);
213 while ((chunk = it.next()) !=
NULL) {
216 bool created_new_scan_on_scavenge_pages =
false;
217 MemoryChunk* previous_chunk =
NULL;
218 for (
Address* p = old_start_; p < old_top_; p += prime_sample_step) {
220 MemoryChunk* containing_chunk =
NULL;
221 if (previous_chunk !=
NULL && previous_chunk->Contains(addr)) {
222 containing_chunk = previous_chunk;
226 int old_counter = containing_chunk->store_buffer_counter();
227 if (old_counter >= threshold) {
228 containing_chunk->set_scan_on_scavenge(
true);
229 created_new_scan_on_scavenge_pages =
true;
231 containing_chunk->set_store_buffer_counter(old_counter + 1);
232 previous_chunk = containing_chunk;
234 if (created_new_scan_on_scavenge_pages) {
237 old_buffer_is_filtered_ =
true;
244 for (
Address* p = old_start_; p < old_top_; p++) {
247 if (previous_chunk !=
NULL && previous_chunk->
Contains(addr)) {
248 containing_chunk = previous_chunk;
251 previous_chunk = containing_chunk;
253 if (!containing_chunk->
IsFlagSet(flag)) {
261 ClearFilteringHashSets();
267 if (old_buffer_is_sorted_)
return;
268 std::sort(old_start_, old_top_);
271 old_buffer_is_sorted_ =
true;
275 ClearFilteringHashSets();
281 PointerChunkIterator it(heap_);
283 bool page_has_scan_on_scavenge_flag =
false;
284 while ((chunk = it.next()) !=
NULL) {
286 page_has_scan_on_scavenge_flag =
true;
291 if (page_has_scan_on_scavenge_flag) {
297 ClearFilteringHashSets();
299 return page_has_scan_on_scavenge_flag;
304 void StoreBuffer::Clean() {
305 ClearFilteringHashSets();
311 static Address* in_store_buffer_1_element_cache =
NULL;
314 bool StoreBuffer::CellIsInStoreBuffer(
Address cell_address) {
316 if (in_store_buffer_1_element_cache !=
NULL &&
317 *in_store_buffer_1_element_cache == cell_address) {
320 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
321 for (
Address* current = top - 1; current >= start_; current--) {
322 if (*current == cell_address) {
323 in_store_buffer_1_element_cache = current;
327 for (
Address* current = old_top_ - 1; current >= old_start_; current--) {
328 if (*current == cell_address) {
329 in_store_buffer_1_element_cache = current;
338 void StoreBuffer::ClearFilteringHashSets() {
339 if (!hash_sets_are_empty_) {
340 memset(reinterpret_cast<void*>(hash_set_1_),
343 memset(reinterpret_cast<void*>(hash_set_2_),
346 hash_sets_are_empty_ =
true;
352 ClearFilteringHashSets();
363 void StoreBuffer::VerifyPointers(PagedSpace* space,
365 PageIterator it(space);
367 while (it.has_next()) {
368 Page* page = it.next();
369 FindPointersToNewSpaceOnPage(
370 reinterpret_cast<PagedSpace*>(page->owner()),
373 &DummyScavengePointer,
379 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
380 LargeObjectIterator it(space);
381 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
382 if (object->IsFixedArray()) {
383 Address slot_address =
object->address();
384 Address end =
object->address() +
object->Size();
386 while (slot_address < end) {
387 HeapObject** slot =
reinterpret_cast<HeapObject**
>(slot_address);
403 &StoreBuffer::FindPointersToNewSpaceInRegion);
405 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
414 if (FLAG_verify_heap) {
421 void StoreBuffer::FindPointersToNewSpaceInRegion(
426 for (
Address slot_address = start;
429 Object** slot =
reinterpret_cast<Object**
>(slot_address);
432 ASSERT(object->IsHeapObject());
435 if (clear_maps) ClearDeadObject(
object);
436 slot_callback(reinterpret_cast<HeapObject**>(slot),
object);
454 Address page = Page::FromAllocationTop(addr)->area_start();
459 void StoreBuffer::FindPointersToNewSpaceInMaps(
464 ASSERT(MapStartAlign(start) == start);
465 ASSERT(MapEndAlign(end) == end);
468 while (map_address < end) {
475 FindPointersToNewSpaceInRegion(pointer_fields_start,
484 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
489 Address map_aligned_start = MapStartAlign(start);
490 Address map_aligned_end = MapEndAlign(end);
492 ASSERT(map_aligned_start == start);
493 ASSERT(map_aligned_end == end);
495 FindPointersToNewSpaceInMaps(map_aligned_start,
518 void StoreBuffer::FindPointersToNewSpaceOnPage(
524 Address visitable_start = page->area_start();
525 Address end_of_page = page->area_end();
527 Address visitable_end = visitable_start;
529 Object* free_space_map = heap_->free_space_map();
530 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
531 Object* constant_pool_array_map = heap_->constant_pool_array_map();
533 while (visitable_end < end_of_page) {
539 if (o == free_space_map ||
540 o == two_pointer_filler_map ||
541 o == constant_pool_array_map ||
542 (visitable_end == space->top() && visitable_end != space->limit())) {
543 if (visitable_start != visitable_end) {
545 (this->*region_callback)(visitable_start,
549 if (visitable_end >= space->top() && visitable_end < space->limit()) {
550 visitable_end = space->limit();
551 visitable_start = visitable_end;
555 if (visitable_end == space->top() && visitable_end != space->limit()) {
556 visitable_start = visitable_end = space->limit();
566 visitable_end = visitable_start;
569 ASSERT(o != free_space_map);
570 ASSERT(o != two_pointer_filler_map);
571 ASSERT(o != constant_pool_array_map);
572 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
576 ASSERT(visitable_end == end_of_page);
577 if (visitable_start != visitable_end) {
578 (this->*region_callback)(visitable_start,
586 void StoreBuffer::IteratePointersInStoreBuffer(
590 old_top_ = old_start_;
593 for (
Address* current = old_start_; current < limit; current++) {
600 HeapObject* heap_object =
reinterpret_cast<HeapObject*
>(object);
603 if (clear_maps) ClearDeadObject(heap_object);
604 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
609 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
636 IteratePointersInStoreBuffer(slot_callback, clear_maps);
647 if (some_pages_to_scan) {
648 if (callback_ !=
NULL) {
651 PointerChunkIterator it(heap_);
653 while ((chunk = it.next()) !=
NULL) {
654 if (chunk->scan_on_scavenge()) {
656 if (callback_ !=
NULL) {
659 if (chunk->owner() == heap_->
lo_space()) {
660 LargePage* large_page =
reinterpret_cast<LargePage*
>(chunk);
661 HeapObject* array = large_page->GetObject();
662 ASSERT(array->IsFixedArray());
663 Address start = array->address();
664 Address end = start + array->Size();
665 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
667 Page* page =
reinterpret_cast<Page*
>(chunk);
668 PagedSpace* owner =
reinterpret_cast<PagedSpace*
>(page->owner());
669 FindPointersToNewSpaceOnPage(
673 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
674 &StoreBuffer::FindPointersToNewSpaceInRegion),
680 if (callback_ !=
NULL) {
688 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
690 if (top == start_)
return;
697 ASSERT(may_move_store_buffer_entries_);
702 hash_sets_are_empty_ =
false;
703 for (
Address* current = start_; current < top; current++) {
707 uintptr_t int_addr =
reinterpret_cast<uintptr_t
>(*current);
713 uintptr_t hash_addr =
717 if (hash_set_1_[hash1] == int_addr)
continue;
721 if (hash_set_2_[hash2] == int_addr)
continue;
722 if (hash_set_1_[hash1] == 0) {
723 hash_set_1_[hash1] = int_addr;
724 }
else if (hash_set_2_[hash2] == 0) {
725 hash_set_2_[hash2] = int_addr;
729 hash_set_1_[hash1] = int_addr;
730 hash_set_2_[hash2] = 0;
732 old_buffer_is_sorted_ =
false;
733 old_buffer_is_filtered_ =
false;
735 ASSERT(old_top_ <= old_limit_);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kPointerFieldsEndOffset
static const int kHashSetLength
static Object *& Object_at(Address addr)
bool Contains(Address addr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
bool InNewSpace(Object *object)
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
kSerializedDataOffset Object
static const intptr_t kPageAlignmentMask
bool InFromSpace(Object *object)
#define ASSERT(condition)
friend class DontMoveStoreBufferEntriesScope
static void StoreBufferOverflow(Isolate *isolate)
const int kPointerSizeLog2
void(StoreBuffer::* RegionCallback)(Address start, Address end, ObjectSlotCallback slot_callback, bool clear_maps)
const bool FLAG_enable_slow_asserts
static const int kPageSize
StoreBuffer * store_buffer()
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
kInstanceClassNameOffset flag
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static const int kStoreBufferOverflowBit
static const int kStoreBufferSize
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
bool Commit(void *address, size_t size, bool is_executable)
OldSpace * old_pointer_space()
T RoundUp(T x, intptr_t m)
void public_set_store_buffer_top(Address *top)
void EnsureSpace(intptr_t space_needed)
LargeObjectSpace * lo_space()
bool PrepareForIteration()
static HeapObject * FromAddress(Address address)
void set_store_buffer_counter(int counter)
static const int kOldStoreBufferLength
static const int kHashSetLengthLog2
void EnterDirectlyIntoStoreBuffer(Address addr)
static intptr_t CommitPageSize()
OldSpace * old_data_space()
static const int kPointerFieldsBeginOffset