44 old_reserved_limit_(
NULL),
45 old_buffer_is_sorted_(
false),
46 old_buffer_is_filtered_(
false),
48 store_buffer_rebuilding_enabled_(
false),
50 may_move_store_buffer_entries_(
true),
51 virtual_memory_(
NULL),
54 hash_sets_are_empty_(
true) {
60 uintptr_t start_as_int =
61 reinterpret_cast<uintptr_t
>(virtual_memory_->
address());
68 old_top_ = old_start_ =
69 reinterpret_cast<Address*
>(old_virtual_memory_->address());
72 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
74 ASSERT(initial_length > 0);
76 old_limit_ = old_start_ + initial_length;
79 CHECK(old_virtual_memory_->Commit(
80 reinterpret_cast<void*>(old_start_),
81 (old_limit_ - old_start_) * kPointerSize,
84 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->
address());
85 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->
address());
87 reinterpret_cast<char*
>(virtual_memory_->
address()) +
88 virtual_memory_->
size());
89 ASSERT(start_ <= vm_limit);
90 ASSERT(limit_ <= vm_limit);
93 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
96 CHECK(virtual_memory_->
Commit(reinterpret_cast<Address>(start_),
103 hash_sets_are_empty_ =
false;
105 ClearFilteringHashSets();
110 delete virtual_memory_;
111 delete old_virtual_memory_;
112 delete[] hash_set_1_;
113 delete[] hash_set_2_;
114 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ =
NULL;
115 start_ = limit_ =
NULL;
125 #if V8_TARGET_ARCH_X64
126 static int CompareAddresses(
const void* void_a,
const void* void_b) {
128 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_a));
130 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_b));
134 if (a == b)
return 0;
135 if (a < b)
return -1;
140 static int CompareAddresses(
const void* void_a,
const void* void_b) {
142 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_a));
144 reinterpret_cast<intptr_t
>(*
reinterpret_cast<const Address*
>(void_b));
145 ASSERT(
sizeof(1) ==
sizeof(a));
152 void StoreBuffer::Uniq() {
156 ASSERT(may_move_store_buffer_entries_);
157 for (
Address* read = old_start_; read < old_top_; read++) {
159 if (current != previous) {
160 if (heap_->
InNewSpace(*reinterpret_cast<Object**>(current))) {
171 while (old_limit_ - old_top_ < space_needed &&
172 old_limit_ < old_reserved_limit_) {
173 size_t grow = old_limit_ - old_start_;
174 CHECK(old_virtual_memory_->
Commit(reinterpret_cast<void*>(old_limit_),
180 if (old_limit_ - old_top_ >= space_needed)
return;
182 if (old_buffer_is_filtered_)
return;
183 ASSERT(may_move_store_buffer_entries_);
186 old_buffer_is_filtered_ =
true;
187 bool page_has_scan_on_scavenge_flag =
false;
189 PointerChunkIterator it(heap_);
191 while ((chunk = it.next()) !=
NULL) {
195 if (page_has_scan_on_scavenge_flag) {
201 if (old_limit_ - old_top_ > old_top_ - old_start_)
return;
205 static const int kSampleFinenesses = 5;
206 static const struct Samples {
207 int prime_sample_step;
209 } samples[kSampleFinenesses] = {
216 for (
int i = kSampleFinenesses - 1; i >= 0; i--) {
217 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
219 ASSERT(i != 0 || old_top_ == old_start_);
220 if (old_limit_ - old_top_ > old_top_ - old_start_)
return;
228 void StoreBuffer::ExemptPopularPages(
int prime_sample_step,
int threshold) {
229 PointerChunkIterator it(heap_);
231 while ((chunk = it.next()) !=
NULL) {
234 bool created_new_scan_on_scavenge_pages =
false;
235 MemoryChunk* previous_chunk =
NULL;
236 for (
Address* p = old_start_; p < old_top_; p += prime_sample_step) {
238 MemoryChunk* containing_chunk =
NULL;
239 if (previous_chunk !=
NULL && previous_chunk->Contains(addr)) {
240 containing_chunk = previous_chunk;
244 int old_counter = containing_chunk->store_buffer_counter();
245 if (old_counter == threshold) {
246 containing_chunk->set_scan_on_scavenge(
true);
247 created_new_scan_on_scavenge_pages =
true;
249 containing_chunk->set_store_buffer_counter(old_counter + 1);
250 previous_chunk = containing_chunk;
252 if (created_new_scan_on_scavenge_pages) {
255 old_buffer_is_filtered_ =
true;
262 for (
Address* p = old_start_; p < old_top_; p++) {
265 if (previous_chunk !=
NULL && previous_chunk->
Contains(addr)) {
266 containing_chunk = previous_chunk;
269 previous_chunk = containing_chunk;
271 if (!containing_chunk->
IsFlagSet(flag)) {
279 ClearFilteringHashSets();
285 if (old_buffer_is_sorted_)
return;
286 qsort(reinterpret_cast<void*>(old_start_),
287 old_top_ - old_start_,
292 old_buffer_is_sorted_ =
true;
296 ClearFilteringHashSets();
302 PointerChunkIterator it(heap_);
304 bool page_has_scan_on_scavenge_flag =
false;
305 while ((chunk = it.next()) !=
NULL) {
309 if (page_has_scan_on_scavenge_flag) {
315 ClearFilteringHashSets();
317 return page_has_scan_on_scavenge_flag;
322 void StoreBuffer::Clean() {
323 ClearFilteringHashSets();
325 CheckForFullBuffer();
329 static Address* in_store_buffer_1_element_cache =
NULL;
332 bool StoreBuffer::CellIsInStoreBuffer(
Address cell_address) {
334 if (in_store_buffer_1_element_cache !=
NULL &&
335 *in_store_buffer_1_element_cache == cell_address) {
338 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
339 for (
Address* current = top - 1; current >= start_; current--) {
340 if (*current == cell_address) {
341 in_store_buffer_1_element_cache = current;
345 for (
Address* current = old_top_ - 1; current >= old_start_; current--) {
346 if (*current == cell_address) {
347 in_store_buffer_1_element_cache = current;
356 void StoreBuffer::ClearFilteringHashSets() {
357 if (!hash_sets_are_empty_) {
358 memset(reinterpret_cast<void*>(hash_set_1_),
361 memset(reinterpret_cast<void*>(hash_set_2_),
364 hash_sets_are_empty_ =
true;
370 ClearFilteringHashSets();
381 void StoreBuffer::VerifyPointers(PagedSpace* space,
383 PageIterator it(space);
385 while (it.has_next()) {
386 Page* page = it.next();
387 FindPointersToNewSpaceOnPage(
388 reinterpret_cast<PagedSpace*>(page->owner()),
391 &DummyScavengePointer);
396 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
397 LargeObjectIterator it(space);
398 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
399 if (object->IsFixedArray()) {
400 Address slot_address =
object->address();
401 Address end =
object->address() +
object->Size();
403 while (slot_address < end) {
404 HeapObject** slot =
reinterpret_cast<HeapObject**
>(slot_address);
420 &StoreBuffer::FindPointersToNewSpaceInRegion);
422 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
431 if (FLAG_verify_heap) {
438 void StoreBuffer::FindPointersToNewSpaceInRegion(
440 for (
Address slot_address = start;
443 Object** slot =
reinterpret_cast<Object**
>(slot_address);
446 ASSERT(object->IsHeapObject());
447 slot_callback(reinterpret_cast<HeapObject**>(slot),
object);
465 Address page = Page::FromAllocationTop(addr)->area_start();
470 void StoreBuffer::FindPointersToNewSpaceInMaps(
474 ASSERT(MapStartAlign(start) == start);
475 ASSERT(MapEndAlign(end) == end);
478 while (map_address < end) {
485 FindPointersToNewSpaceInRegion(pointer_fields_start,
493 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
497 Address map_aligned_start = MapStartAlign(start);
498 Address map_aligned_end = MapEndAlign(end);
500 ASSERT(map_aligned_start == start);
501 ASSERT(map_aligned_end == end);
503 FindPointersToNewSpaceInMaps(map_aligned_start,
523 void StoreBuffer::FindPointersToNewSpaceOnPage(
528 Address visitable_start = page->area_start();
529 Address end_of_page = page->area_end();
531 Address visitable_end = visitable_start;
533 Object* free_space_map = heap_->free_space_map();
534 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
536 while (visitable_end < end_of_page) {
540 if (o == free_space_map ||
541 o == two_pointer_filler_map ||
542 (visitable_end == space->top() && visitable_end != space->limit())) {
543 if (visitable_start != visitable_end) {
545 (this->*region_callback)(visitable_start,
548 if (visitable_end >= space->top() && visitable_end < space->limit()) {
549 visitable_end = space->limit();
550 visitable_start = visitable_end;
554 if (visitable_end == space->top() && visitable_end != space->limit()) {
555 visitable_start = visitable_end = space->limit();
565 visitable_end = visitable_start;
568 ASSERT(o != free_space_map);
569 ASSERT(o != two_pointer_filler_map);
570 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
574 ASSERT(visitable_end == end_of_page);
575 if (visitable_start != visitable_end) {
576 (this->*region_callback)(visitable_start,
583 void StoreBuffer::IteratePointersInStoreBuffer(
586 old_top_ = old_start_;
589 for (
Address* current = old_start_; current < limit; current++) {
596 HeapObject* heap_object =
reinterpret_cast<HeapObject*
>(object);
597 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
602 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
617 IteratePointersInStoreBuffer(slot_callback);
628 if (some_pages_to_scan) {
629 if (callback_ !=
NULL) {
632 PointerChunkIterator it(heap_);
634 while ((chunk = it.next()) !=
NULL) {
637 if (callback_ !=
NULL) {
643 ASSERT(array->IsFixedArray());
646 FindPointersToNewSpaceInRegion(start, end, slot_callback);
648 Page* page =
reinterpret_cast<Page*
>(chunk);
650 FindPointersToNewSpaceOnPage(
654 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
655 &StoreBuffer::FindPointersToNewSpaceInRegion),
660 if (callback_ !=
NULL) {
668 Address* top =
reinterpret_cast<Address*
>(heap_->store_buffer_top());
670 if (top == start_)
return;
677 ASSERT(may_move_store_buffer_entries_);
682 hash_sets_are_empty_ =
false;
683 for (
Address* current = start_; current < top; current++) {
687 uintptr_t int_addr =
reinterpret_cast<uintptr_t
>(*current);
692 if (hash_set_1_[hash1] == int_addr)
continue;
696 if (hash_set_2_[hash2] == int_addr)
continue;
697 if (hash_set_1_[hash1] == 0) {
698 hash_set_1_[hash1] = int_addr;
699 }
else if (hash_set_2_[hash2] == 0) {
700 hash_set_2_[hash2] = int_addr;
704 hash_set_1_[hash1] = int_addr;
705 hash_set_2_[hash2] = 0;
707 old_buffer_is_sorted_ =
false;
708 old_buffer_is_filtered_ =
false;
710 ASSERT(old_top_ <= old_limit_);
713 CheckForFullBuffer();
717 void StoreBuffer::CheckForFullBuffer() {
bool FLAG_enable_slow_asserts
static const int kPointerFieldsEndOffset
static const int kHashSetLength
static Object *& Object_at(Address addr)
bool Contains(Address addr)
bool InNewSpace(Object *object)
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
bool InFromSpace(Object *object)
#define ASSERT(condition)
friend class DontMoveStoreBufferEntriesScope
static void StoreBufferOverflow(Isolate *isolate)
const int kPointerSizeLog2
static const int kPageSize
StoreBuffer * store_buffer()
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static const int kStoreBufferOverflowBit
static const int kStoreBufferSize
bool Commit(void *address, size_t size, bool is_executable)
OldSpace * old_pointer_space()
T RoundUp(T x, intptr_t m)
void public_set_store_buffer_top(Address *top)
void EnsureSpace(intptr_t space_needed)
LargeObjectSpace * lo_space()
activate correct semantics for inheriting readonliness false
void(StoreBuffer::* RegionCallback)(Address start, Address end, ObjectSlotCallback slot_callback)
bool PrepareForIteration()
static HeapObject * FromAddress(Address address)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
void set_store_buffer_counter(int counter)
static const int kOldStoreBufferLength
static const int kHashSetLengthLog2
void EnterDirectlyIntoStoreBuffer(Address addr)
static intptr_t CommitPageSize()
OldSpace * old_data_space()
static MemoryChunk * FromAnyPointerAddress(Address addr)
static const int kPointerFieldsBeginOffset
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag