44 old_reserved_limit_(
NULL),
45 old_buffer_is_sorted_(
false),
46 old_buffer_is_filtered_(
false),
48 store_buffer_rebuilding_enabled_(
false),
50 may_move_store_buffer_entries_(
true),
51 virtual_memory_(
NULL),
54 hash_sets_are_empty_(
true) {
60 uintptr_t start_as_int =
61 reinterpret_cast<uintptr_t
>(virtual_memory_->
address());
68 old_top_ = old_start_ =
69 reinterpret_cast<Address*
>(old_virtual_memory_->address());
72 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
74 ASSERT(initial_length > 0);
76 old_limit_ = old_start_ + initial_length;
79 CHECK(old_virtual_memory_->Commit(
80 reinterpret_cast<void*>(old_start_),
81 (old_limit_ - old_start_) * kPointerSize,
84 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->
address());
85 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->
address());
87 reinterpret_cast<char*
>(virtual_memory_->
address()) +
88 virtual_memory_->
size());
89 ASSERT(start_ <= vm_limit);
90 ASSERT(limit_ <= vm_limit);
93 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
96 CHECK(virtual_memory_->
Commit(reinterpret_cast<Address>(start_),
103 hash_sets_are_empty_ =
false;
105 ClearFilteringHashSets();
110 delete virtual_memory_;
111 delete old_virtual_memory_;
112 delete[] hash_set_1_;
113 delete[] hash_set_2_;
114 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ =
NULL;
115 start_ = limit_ =
NULL;
125 #if V8_TARGET_ARCH_X64
126 static int CompareAddresses(
const void* void_a,
const void* void_b) {
128 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_a));
130 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_b));
134 if (a == b)
return 0;
135 if (a < b)
return -1;
140 static int CompareAddresses(
const void* void_a,
const void* void_b) {
142 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_a));
144 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_b));
145 ASSERT(
sizeof(1) ==
sizeof(a));
152 void StoreBuffer::Uniq() {
156 ASSERT(may_move_store_buffer_entries_);
157 for (
Address* read = old_start_; read < old_top_; read++) {
159 if (current != previous) {
160 if (heap_->
InNewSpace(*reinterpret_cast<Object**>(current))) {
171 while (old_limit_ - old_top_ < space_needed &&
172 old_limit_ < old_reserved_limit_) {
173 size_t grow = old_limit_ - old_start_;
174 CHECK(old_virtual_memory_->
Commit(reinterpret_cast<void*>(old_limit_),
180 if (old_limit_ - old_top_ >= space_needed)
return;
182 if (old_buffer_is_filtered_)
return;
183 ASSERT(may_move_store_buffer_entries_);
186 old_buffer_is_filtered_ =
true;
187 bool page_has_scan_on_scavenge_flag =
false;
189 PointerChunkIterator it(heap_);
191 while ((chunk = it.next()) !=
NULL) {
195 if (page_has_scan_on_scavenge_flag) {
201 if (old_limit_ - old_top_ > old_top_ - old_start_)
return;
205 static const int kSampleFinenesses = 5;
206 static const struct Samples {
207 int prime_sample_step;
209 } samples[kSampleFinenesses] = {
216 for (
int i = kSampleFinenesses - 1; i >= 0; i--) {
217 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
219 ASSERT(i != 0 || old_top_ == old_start_);
220 if (old_limit_ - old_top_ > old_top_ - old_start_)
return;
228 void StoreBuffer::ExemptPopularPages(
int prime_sample_step,
int threshold) {
229 PointerChunkIterator it(heap_);
231 while ((chunk = it.next()) !=
NULL) {
234 bool created_new_scan_on_scavenge_pages =
false;
235 MemoryChunk* previous_chunk =
NULL;
236 for (
Address* p = old_start_; p < old_top_; p += prime_sample_step) {
238 MemoryChunk* containing_chunk =
NULL;
239 if (previous_chunk !=
NULL && previous_chunk->Contains(addr)) {
240 containing_chunk = previous_chunk;
244 int old_counter = containing_chunk->store_buffer_counter();
245 if (old_counter == threshold) {
246 containing_chunk->set_scan_on_scavenge(
true);
247 created_new_scan_on_scavenge_pages =
true;
249 containing_chunk->set_store_buffer_counter(old_counter + 1);
250 previous_chunk = containing_chunk;
252 if (created_new_scan_on_scavenge_pages) {
255 old_buffer_is_filtered_ =
true;
262 for (
Address* p = old_start_; p < old_top_; p++) {
265 if (previous_chunk !=
NULL && previous_chunk->
Contains(addr)) {
266 containing_chunk = previous_chunk;
269 previous_chunk = containing_chunk;
271 if (!containing_chunk->
IsFlagSet(flag)) {
279 ClearFilteringHashSets();
285 if (old_buffer_is_sorted_)
return;
286 qsort(reinterpret_cast<void*>(old_start_),
287 old_top_ - old_start_,
292 old_buffer_is_sorted_ =
true;
296 ClearFilteringHashSets();
302 PointerChunkIterator it(heap_);
304 bool page_has_scan_on_scavenge_flag =
false;
305 while ((chunk = it.next()) !=
NULL) {
309 if (page_has_scan_on_scavenge_flag) {
315 ClearFilteringHashSets();
317 return page_has_scan_on_scavenge_flag;
322 void StoreBuffer::Clean() {
323 ClearFilteringHashSets();
325 CheckForFullBuffer();
329 static Address* in_store_buffer_1_element_cache =
NULL;
332 bool StoreBuffer::CellIsInStoreBuffer(
Address cell_address) {
334 if (in_store_buffer_1_element_cache !=
NULL &&
335 *in_store_buffer_1_element_cache == cell_address) {
338 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
339 for (
Address* current = top - 1; current >= start_; current--) {
340 if (*current == cell_address) {
341 in_store_buffer_1_element_cache = current;
345 for (
Address* current = old_top_ - 1; current >= old_start_; current--) {
346 if (*current == cell_address) {
347 in_store_buffer_1_element_cache = current;
356 void StoreBuffer::ClearFilteringHashSets() {
357 if (!hash_sets_are_empty_) {
358 memset(reinterpret_cast<void*>(hash_set_1_),
361 memset(reinterpret_cast<void*>(hash_set_2_),
364 hash_sets_are_empty_ =
true;
370 ClearFilteringHashSets();
381 void StoreBuffer::VerifyPointers(PagedSpace* space,
383 PageIterator it(space);
385 while (it.has_next()) {
386 Page* page = it.next();
387 FindPointersToNewSpaceOnPage(
388 reinterpret_cast<PagedSpace*>(page->owner()),
391 &DummyScavengePointer);
396 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
397 LargeObjectIterator it(space);
398 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
399 if (object->IsFixedArray()) {
400 Address slot_address =
object->address();
401 Address end =
object->address() +
object->Size();
403 while (slot_address < end) {
404 HeapObject** slot =
reinterpret_cast<HeapObject**
>(slot_address);
420 &StoreBuffer::FindPointersToNewSpaceInRegion);
422 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
430 if (FLAG_verify_heap) {
436 void StoreBuffer::FindPointersToNewSpaceInRegion(
438 for (
Address slot_address = start;
441 Object** slot =
reinterpret_cast<Object**
>(slot_address);
444 ASSERT(object->IsHeapObject());
445 slot_callback(reinterpret_cast<HeapObject**>(slot),
object);
463 Address page = Page::FromAllocationTop(addr)->area_start();
468 void StoreBuffer::FindPointersToNewSpaceInMaps(
472 ASSERT(MapStartAlign(start) == start);
473 ASSERT(MapEndAlign(end) == end);
476 while (map_address < end) {
483 FindPointersToNewSpaceInRegion(pointer_fields_start,
491 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
495 Address map_aligned_start = MapStartAlign(start);
496 Address map_aligned_end = MapEndAlign(end);
498 ASSERT(map_aligned_start == start);
499 ASSERT(map_aligned_end == end);
501 FindPointersToNewSpaceInMaps(map_aligned_start,
521 void StoreBuffer::FindPointersToNewSpaceOnPage(
526 Address visitable_start = page->area_start();
527 Address end_of_page = page->area_end();
529 Address visitable_end = visitable_start;
531 Object* free_space_map = heap_->free_space_map();
532 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
534 while (visitable_end < end_of_page) {
538 if (o == free_space_map ||
539 o == two_pointer_filler_map ||
540 (visitable_end == space->top() && visitable_end != space->limit())) {
541 if (visitable_start != visitable_end) {
543 (this->*region_callback)(visitable_start,
546 if (visitable_end >= space->top() && visitable_end < space->limit()) {
547 visitable_end = space->limit();
548 visitable_start = visitable_end;
552 if (visitable_end == space->top() && visitable_end != space->limit()) {
553 visitable_start = visitable_end = space->limit();
563 visitable_end = visitable_start;
566 ASSERT(o != free_space_map);
567 ASSERT(o != two_pointer_filler_map);
568 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
572 ASSERT(visitable_end == end_of_page);
573 if (visitable_start != visitable_end) {
574 (this->*region_callback)(visitable_start,
581 void StoreBuffer::IteratePointersInStoreBuffer(
584 old_top_ = old_start_;
587 for (
Address* current = old_start_; current < limit; current++) {
594 HeapObject* heap_object =
reinterpret_cast<HeapObject*
>(object);
595 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
600 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
615 IteratePointersInStoreBuffer(slot_callback);
626 if (some_pages_to_scan) {
627 if (callback_ !=
NULL) {
630 PointerChunkIterator it(heap_);
632 while ((chunk = it.next()) !=
NULL) {
635 if (callback_ !=
NULL) {
641 ASSERT(array->IsFixedArray());
644 FindPointersToNewSpaceInRegion(start, end, slot_callback);
646 Page* page =
reinterpret_cast<Page*
>(chunk);
648 FindPointersToNewSpaceOnPage(
652 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
653 &StoreBuffer::FindPointersToNewSpaceInRegion),
658 if (callback_ !=
NULL) {
666 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
668 if (top == start_)
return;
675 ASSERT(may_move_store_buffer_entries_);
680 hash_sets_are_empty_ =
false;
681 for (
Address* current = start_; current < top; current++) {
685 uintptr_t int_addr =
reinterpret_cast<uintptr_t
>(*current);
690 if (hash_set_1_[hash1] == int_addr)
continue;
694 if (hash_set_2_[hash2] == int_addr)
continue;
695 if (hash_set_1_[hash1] == 0) {
696 hash_set_1_[hash1] = int_addr;
697 }
else if (hash_set_2_[hash2] == 0) {
698 hash_set_2_[hash2] = int_addr;
702 hash_set_1_[hash1] = int_addr;
703 hash_set_2_[hash2] = 0;
705 old_buffer_is_sorted_ =
false;
706 old_buffer_is_filtered_ =
false;
708 ASSERT(old_top_ <= old_limit_);
711 CheckForFullBuffer();
715 void StoreBuffer::CheckForFullBuffer() {
bool FLAG_enable_slow_asserts
static const int kPointerFieldsEndOffset
static const int kHashSetLength
static Object *& Object_at(Address addr)
bool Contains(Address addr)
bool InNewSpace(Object *object)
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
bool InFromSpace(Object *object)
#define ASSERT(condition)
friend class DontMoveStoreBufferEntriesScope
static void StoreBufferOverflow(Isolate *isolate)
const int kPointerSizeLog2
static const int kPageSize
StoreBuffer * store_buffer()
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static const int kStoreBufferOverflowBit
static const int kStoreBufferSize
bool Commit(void *address, size_t size, bool is_executable)
OldSpace * old_pointer_space()
T RoundUp(T x, intptr_t m)
void public_set_store_buffer_top(Address *top)
void EnsureSpace(intptr_t space_needed)
LargeObjectSpace * lo_space()
void(StoreBuffer::* RegionCallback)(Address start, Address end, ObjectSlotCallback slot_callback)
bool PrepareForIteration()
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
static HeapObject * FromAddress(Address address)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
void set_store_buffer_counter(int counter)
static const int kOldStoreBufferLength
static const int kHashSetLengthLog2
void EnterDirectlyIntoStoreBuffer(Address addr)
static intptr_t CommitPageSize()
OldSpace * old_data_space()
static MemoryChunk * FromAnyPointerAddress(Address addr)
static const int kPointerFieldsBeginOffset