v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
store-buffer.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "store-buffer.h"
29 
30 #include <algorithm>
31 
32 #include "v8.h"
33 #include "store-buffer-inl.h"
34 #include "v8-counters.h"
35 
36 namespace v8 {
37 namespace internal {
38 
40  : heap_(heap),
41  start_(NULL),
42  limit_(NULL),
43  old_start_(NULL),
44  old_limit_(NULL),
45  old_top_(NULL),
46  old_reserved_limit_(NULL),
47  old_buffer_is_sorted_(false),
48  old_buffer_is_filtered_(false),
49  during_gc_(false),
50  store_buffer_rebuilding_enabled_(false),
51  callback_(NULL),
52  may_move_store_buffer_entries_(true),
53  virtual_memory_(NULL),
54  hash_set_1_(NULL),
55  hash_set_2_(NULL),
56  hash_sets_are_empty_(true) {
57 }
58 
59 
61  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
62  uintptr_t start_as_int =
63  reinterpret_cast<uintptr_t>(virtual_memory_->address());
64  start_ =
65  reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
66  limit_ = start_ + (kStoreBufferSize / kPointerSize);
67 
68  old_virtual_memory_ =
70  old_top_ = old_start_ =
71  reinterpret_cast<Address*>(old_virtual_memory_->address());
72  // Don't know the alignment requirements of the OS, but it is certainly not
73  // less than 0xfff.
74  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
75  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
76  ASSERT(initial_length > 0);
77  ASSERT(initial_length <= kOldStoreBufferLength);
78  old_limit_ = old_start_ + initial_length;
79  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
80 
81  CHECK(old_virtual_memory_->Commit(
82  reinterpret_cast<void*>(old_start_),
83  (old_limit_ - old_start_) * kPointerSize,
84  false));
85 
86  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
87  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
88  Address* vm_limit = reinterpret_cast<Address*>(
89  reinterpret_cast<char*>(virtual_memory_->address()) +
90  virtual_memory_->size());
91  ASSERT(start_ <= vm_limit);
92  ASSERT(limit_ <= vm_limit);
93  USE(vm_limit);
94  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
95  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
96  0);
97 
98  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
100  false)); // Not executable.
101  heap_->public_set_store_buffer_top(start_);
102 
103  hash_set_1_ = new uintptr_t[kHashSetLength];
104  hash_set_2_ = new uintptr_t[kHashSetLength];
105  hash_sets_are_empty_ = false;
106 
107  ClearFilteringHashSets();
108 }
109 
110 
112  delete virtual_memory_;
113  delete old_virtual_memory_;
114  delete[] hash_set_1_;
115  delete[] hash_set_2_;
116  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
117  start_ = limit_ = NULL;
118  heap_->public_set_store_buffer_top(start_);
119 }
120 
121 
123  isolate->heap()->store_buffer()->Compact();
124  isolate->counters()->store_buffer_overflows()->Increment();
125 }
126 
127 
128 void StoreBuffer::Uniq() {
129  // Remove adjacent duplicates and cells that do not point at new space.
130  Address previous = NULL;
131  Address* write = old_start_;
132  ASSERT(may_move_store_buffer_entries_);
133  for (Address* read = old_start_; read < old_top_; read++) {
134  Address current = *read;
135  if (current != previous) {
136  if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
137  *write++ = current;
138  }
139  }
140  previous = current;
141  }
142  old_top_ = write;
143 }
144 
145 
146 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
147  return old_limit_ - old_top_ >= space_needed;
148 }
149 
150 
151 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
152  while (old_limit_ - old_top_ < space_needed &&
153  old_limit_ < old_reserved_limit_) {
154  size_t grow = old_limit_ - old_start_; // Double size.
155  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
156  grow * kPointerSize,
157  false));
158  old_limit_ += grow;
159  }
160 
161  if (SpaceAvailable(space_needed)) return;
162 
163  if (old_buffer_is_filtered_) return;
164  ASSERT(may_move_store_buffer_entries_);
165  Compact();
166 
167  old_buffer_is_filtered_ = true;
168  bool page_has_scan_on_scavenge_flag = false;
169 
170  PointerChunkIterator it(heap_);
171  MemoryChunk* chunk;
172  while ((chunk = it.next()) != NULL) {
173  if (chunk->scan_on_scavenge()) {
174  page_has_scan_on_scavenge_flag = true;
175  break;
176  }
177  }
178 
179  if (page_has_scan_on_scavenge_flag) {
181  }
182 
183  if (SpaceAvailable(space_needed)) return;
184 
185  // Sample 1 entry in 97 and filter out the pages where we estimate that more
186  // than 1 in 8 pointers are to new space.
187  static const int kSampleFinenesses = 5;
188  static const struct Samples {
189  int prime_sample_step;
190  int threshold;
191  } samples[kSampleFinenesses] = {
192  { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
193  { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
194  { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
195  { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
196  { 1, 0}
197  };
198  for (int i = 0; i < kSampleFinenesses; i++) {
199  ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
200  // As a last resort we mark all pages as being exempt from the store buffer.
201  ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
202  if (SpaceAvailable(space_needed)) return;
203  }
204  UNREACHABLE();
205 }
206 
207 
208 // Sample the store buffer to see if some pages are taking up a lot of space
209 // in the store buffer.
210 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
211  PointerChunkIterator it(heap_);
212  MemoryChunk* chunk;
213  while ((chunk = it.next()) != NULL) {
214  chunk->set_store_buffer_counter(0);
215  }
216  bool created_new_scan_on_scavenge_pages = false;
217  MemoryChunk* previous_chunk = NULL;
218  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
219  Address addr = *p;
220  MemoryChunk* containing_chunk = NULL;
221  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
222  containing_chunk = previous_chunk;
223  } else {
224  containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
225  }
226  int old_counter = containing_chunk->store_buffer_counter();
227  if (old_counter >= threshold) {
228  containing_chunk->set_scan_on_scavenge(true);
229  created_new_scan_on_scavenge_pages = true;
230  }
231  containing_chunk->set_store_buffer_counter(old_counter + 1);
232  previous_chunk = containing_chunk;
233  }
234  if (created_new_scan_on_scavenge_pages) {
236  }
237  old_buffer_is_filtered_ = true;
238 }
239 
240 
242  Address* new_top = old_start_;
243  MemoryChunk* previous_chunk = NULL;
244  for (Address* p = old_start_; p < old_top_; p++) {
245  Address addr = *p;
246  MemoryChunk* containing_chunk = NULL;
247  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
248  containing_chunk = previous_chunk;
249  } else {
250  containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
251  previous_chunk = containing_chunk;
252  }
253  if (!containing_chunk->IsFlagSet(flag)) {
254  *new_top++ = addr;
255  }
256  }
257  old_top_ = new_top;
258 
259  // Filtering hash sets are inconsistent with the store buffer after this
260  // operation.
261  ClearFilteringHashSets();
262 }
263 
264 
266  Compact();
267  if (old_buffer_is_sorted_) return;
268  std::sort(old_start_, old_top_);
269  Uniq();
270 
271  old_buffer_is_sorted_ = true;
272 
273  // Filtering hash sets are inconsistent with the store buffer after this
274  // operation.
275  ClearFilteringHashSets();
276 }
277 
278 
280  Compact();
281  PointerChunkIterator it(heap_);
282  MemoryChunk* chunk;
283  bool page_has_scan_on_scavenge_flag = false;
284  while ((chunk = it.next()) != NULL) {
285  if (chunk->scan_on_scavenge()) {
286  page_has_scan_on_scavenge_flag = true;
287  break;
288  }
289  }
290 
291  if (page_has_scan_on_scavenge_flag) {
293  }
294 
295  // Filtering hash sets are inconsistent with the store buffer after
296  // iteration.
297  ClearFilteringHashSets();
298 
299  return page_has_scan_on_scavenge_flag;
300 }
301 
302 
303 #ifdef DEBUG
304 void StoreBuffer::Clean() {
305  ClearFilteringHashSets();
306  Uniq(); // Also removes things that no longer point to new space.
308 }
309 
310 
311 static Address* in_store_buffer_1_element_cache = NULL;
312 
313 
314 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
315  if (!FLAG_enable_slow_asserts) return true;
316  if (in_store_buffer_1_element_cache != NULL &&
317  *in_store_buffer_1_element_cache == cell_address) {
318  return true;
319  }
320  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
321  for (Address* current = top - 1; current >= start_; current--) {
322  if (*current == cell_address) {
323  in_store_buffer_1_element_cache = current;
324  return true;
325  }
326  }
327  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
328  if (*current == cell_address) {
329  in_store_buffer_1_element_cache = current;
330  return true;
331  }
332  }
333  return false;
334 }
335 #endif
336 
337 
338 void StoreBuffer::ClearFilteringHashSets() {
339  if (!hash_sets_are_empty_) {
340  memset(reinterpret_cast<void*>(hash_set_1_),
341  0,
342  sizeof(uintptr_t) * kHashSetLength);
343  memset(reinterpret_cast<void*>(hash_set_2_),
344  0,
345  sizeof(uintptr_t) * kHashSetLength);
346  hash_sets_are_empty_ = true;
347  }
348 }
349 
350 
352  ClearFilteringHashSets();
353  during_gc_ = true;
354 }
355 
356 
357 #ifdef VERIFY_HEAP
358 static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
359  // Do nothing.
360 }
361 
362 
363 void StoreBuffer::VerifyPointers(PagedSpace* space,
364  RegionCallback region_callback) {
365  PageIterator it(space);
366 
367  while (it.has_next()) {
368  Page* page = it.next();
369  FindPointersToNewSpaceOnPage(
370  reinterpret_cast<PagedSpace*>(page->owner()),
371  page,
372  region_callback,
373  &DummyScavengePointer,
374  false);
375  }
376 }
377 
378 
379 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
380  LargeObjectIterator it(space);
381  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
382  if (object->IsFixedArray()) {
383  Address slot_address = object->address();
384  Address end = object->address() + object->Size();
385 
386  while (slot_address < end) {
387  HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
388  // When we are not in GC the Heap::InNewSpace() predicate
389  // checks that pointers which satisfy predicate point into
390  // the active semispace.
391  heap_->InNewSpace(*slot);
392  slot_address += kPointerSize;
393  }
394  }
395  }
396 }
397 #endif
398 
399 
401 #ifdef VERIFY_HEAP
402  VerifyPointers(heap_->old_pointer_space(),
403  &StoreBuffer::FindPointersToNewSpaceInRegion);
404  VerifyPointers(heap_->map_space(),
405  &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
406  VerifyPointers(heap_->lo_space());
407 #endif
408 }
409 
410 
412  during_gc_ = false;
413 #ifdef VERIFY_HEAP
414  if (FLAG_verify_heap) {
415  Verify();
416  }
417 #endif
418 }
419 
420 
421 void StoreBuffer::FindPointersToNewSpaceInRegion(
422  Address start,
423  Address end,
424  ObjectSlotCallback slot_callback,
425  bool clear_maps) {
426  for (Address slot_address = start;
427  slot_address < end;
428  slot_address += kPointerSize) {
429  Object** slot = reinterpret_cast<Object**>(slot_address);
430  if (heap_->InNewSpace(*slot)) {
431  HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
432  ASSERT(object->IsHeapObject());
433  // The new space object was not promoted if it still contains a map
434  // pointer. Clear the map field now lazily.
435  if (clear_maps) ClearDeadObject(object);
436  slot_callback(reinterpret_cast<HeapObject**>(slot), object);
437  if (heap_->InNewSpace(*slot)) {
438  EnterDirectlyIntoStoreBuffer(slot_address);
439  }
440  }
441  }
442 }
443 
444 
445 // Compute start address of the first map following given addr.
446 static inline Address MapStartAlign(Address addr) {
447  Address page = Page::FromAddress(addr)->area_start();
448  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
449 }
450 
451 
452 // Compute end address of the first map preceding given addr.
453 static inline Address MapEndAlign(Address addr) {
454  Address page = Page::FromAllocationTop(addr)->area_start();
455  return page + ((addr - page) / Map::kSize * Map::kSize);
456 }
457 
458 
459 void StoreBuffer::FindPointersToNewSpaceInMaps(
460  Address start,
461  Address end,
462  ObjectSlotCallback slot_callback,
463  bool clear_maps) {
464  ASSERT(MapStartAlign(start) == start);
465  ASSERT(MapEndAlign(end) == end);
466 
467  Address map_address = start;
468  while (map_address < end) {
469  ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
470  ASSERT(Memory::Object_at(map_address)->IsMap());
471 
472  Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
473  Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
474 
475  FindPointersToNewSpaceInRegion(pointer_fields_start,
476  pointer_fields_end,
477  slot_callback,
478  clear_maps);
479  map_address += Map::kSize;
480  }
481 }
482 
483 
484 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
485  Address start,
486  Address end,
487  ObjectSlotCallback slot_callback,
488  bool clear_maps) {
489  Address map_aligned_start = MapStartAlign(start);
490  Address map_aligned_end = MapEndAlign(end);
491 
492  ASSERT(map_aligned_start == start);
493  ASSERT(map_aligned_end == end);
494 
495  FindPointersToNewSpaceInMaps(map_aligned_start,
496  map_aligned_end,
497  slot_callback,
498  clear_maps);
499 }
500 
501 
502 // This function iterates over all the pointers in a paged space in the heap,
503 // looking for pointers into new space. Within the pages there may be dead
504 // objects that have not been overwritten by free spaces or fillers because of
505 // lazy sweeping. These dead objects may not contain pointers to new space.
506 // The garbage areas that have been swept properly (these will normally be the
507 // large ones) will be marked with free space and filler map words. In
508 // addition any area that has never been used at all for object allocation must
509 // be marked with a free space or filler. Because the free space and filler
510 // maps do not move we can always recognize these even after a compaction.
511 // Normal objects like FixedArrays and JSObjects should not contain references
512 // to these maps. Constant pool array objects may contain references to these
513 // maps, however, constant pool arrays cannot contain pointers to new space
514 // objects, therefore they are skipped. The special garbage section (see
515 // comment in spaces.h) is skipped since it can contain absolutely anything.
516 // Any objects that are allocated during iteration may or may not be visited by
517 // the iteration, but they will not be partially visited.
518 void StoreBuffer::FindPointersToNewSpaceOnPage(
519  PagedSpace* space,
520  Page* page,
521  RegionCallback region_callback,
522  ObjectSlotCallback slot_callback,
523  bool clear_maps) {
524  Address visitable_start = page->area_start();
525  Address end_of_page = page->area_end();
526 
527  Address visitable_end = visitable_start;
528 
529  Object* free_space_map = heap_->free_space_map();
530  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
531  Object* constant_pool_array_map = heap_->constant_pool_array_map();
532 
533  while (visitable_end < end_of_page) {
534  Object* o = *reinterpret_cast<Object**>(visitable_end);
535  // Skip fillers or constant pool arrays (which never contain new-space
536  // pointers but can contain pointers which can be confused for fillers)
537  // but not things that look like fillers in the special garbage section
538  // which can contain anything.
539  if (o == free_space_map ||
540  o == two_pointer_filler_map ||
541  o == constant_pool_array_map ||
542  (visitable_end == space->top() && visitable_end != space->limit())) {
543  if (visitable_start != visitable_end) {
544  // After calling this the special garbage section may have moved.
545  (this->*region_callback)(visitable_start,
546  visitable_end,
547  slot_callback,
548  clear_maps);
549  if (visitable_end >= space->top() && visitable_end < space->limit()) {
550  visitable_end = space->limit();
551  visitable_start = visitable_end;
552  continue;
553  }
554  }
555  if (visitable_end == space->top() && visitable_end != space->limit()) {
556  visitable_start = visitable_end = space->limit();
557  } else {
558  // At this point we are either at the start of a filler, a
559  // constant pool array, or we are at the point where the space->top()
560  // used to be before the visit_pointer_region call above. Either way we
561  // can skip the object at the current spot: We don't promise to visit
562  // objects allocated during heap traversal, and if space->top() moved
563  // then it must be because an object was allocated at this point.
564  visitable_start =
565  visitable_end + HeapObject::FromAddress(visitable_end)->Size();
566  visitable_end = visitable_start;
567  }
568  } else {
569  ASSERT(o != free_space_map);
570  ASSERT(o != two_pointer_filler_map);
571  ASSERT(o != constant_pool_array_map);
572  ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
573  visitable_end += kPointerSize;
574  }
575  }
576  ASSERT(visitable_end == end_of_page);
577  if (visitable_start != visitable_end) {
578  (this->*region_callback)(visitable_start,
579  visitable_end,
580  slot_callback,
581  clear_maps);
582  }
583 }
584 
585 
586 void StoreBuffer::IteratePointersInStoreBuffer(
587  ObjectSlotCallback slot_callback,
588  bool clear_maps) {
589  Address* limit = old_top_;
590  old_top_ = old_start_;
591  {
593  for (Address* current = old_start_; current < limit; current++) {
594 #ifdef DEBUG
595  Address* saved_top = old_top_;
596 #endif
597  Object** slot = reinterpret_cast<Object**>(*current);
598  Object* object = *slot;
599  if (heap_->InFromSpace(object)) {
600  HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
601  // The new space object was not promoted if it still contains a map
602  // pointer. Clear the map field now lazily.
603  if (clear_maps) ClearDeadObject(heap_object);
604  slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
605  if (heap_->InNewSpace(*slot)) {
606  EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
607  }
608  }
609  ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
610  }
611  }
612 }
613 
614 
616  IteratePointersToNewSpace(slot_callback, false);
617 }
618 
619 
621  ObjectSlotCallback slot_callback) {
622  IteratePointersToNewSpace(slot_callback, true);
623 }
624 
625 
627  bool clear_maps) {
628  // We do not sort or remove duplicated entries from the store buffer because
629  // we expect that callback will rebuild the store buffer thus removing
630  // all duplicates and pointers to old space.
631  bool some_pages_to_scan = PrepareForIteration();
632 
633  // TODO(gc): we want to skip slots on evacuation candidates
634  // but we can't simply figure that out from slot address
635  // because slot can belong to a large object.
636  IteratePointersInStoreBuffer(slot_callback, clear_maps);
637 
638  // We are done scanning all the pointers that were in the store buffer, but
639  // there may be some pages marked scan_on_scavenge that have pointers to new
640  // space that are not in the store buffer. We must scan them now. As we
641  // scan, the surviving pointers to new space will be added to the store
642  // buffer. If there are still a lot of pointers to new space then we will
643  // keep the scan_on_scavenge flag on the page and discard the pointers that
644  // were added to the store buffer. If there are not many pointers to new
645  // space left on the page we will keep the pointers in the store buffer and
646  // remove the flag from the page.
647  if (some_pages_to_scan) {
648  if (callback_ != NULL) {
649  (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
650  }
651  PointerChunkIterator it(heap_);
652  MemoryChunk* chunk;
653  while ((chunk = it.next()) != NULL) {
654  if (chunk->scan_on_scavenge()) {
655  chunk->set_scan_on_scavenge(false);
656  if (callback_ != NULL) {
657  (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
658  }
659  if (chunk->owner() == heap_->lo_space()) {
660  LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
661  HeapObject* array = large_page->GetObject();
662  ASSERT(array->IsFixedArray());
663  Address start = array->address();
664  Address end = start + array->Size();
665  FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
666  } else {
667  Page* page = reinterpret_cast<Page*>(chunk);
668  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
669  FindPointersToNewSpaceOnPage(
670  owner,
671  page,
672  (owner == heap_->map_space() ?
673  &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
674  &StoreBuffer::FindPointersToNewSpaceInRegion),
675  slot_callback,
676  clear_maps);
677  }
678  }
679  }
680  if (callback_ != NULL) {
681  (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
682  }
683  }
684 }
685 
686 
688  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
689 
690  if (top == start_) return;
691 
692  // There's no check of the limit in the loop below so we check here for
693  // the worst case (compaction doesn't eliminate any pointers).
694  ASSERT(top <= limit_);
695  heap_->public_set_store_buffer_top(start_);
696  EnsureSpace(top - start_);
697  ASSERT(may_move_store_buffer_entries_);
698  // Goes through the addresses in the store buffer attempting to remove
699  // duplicates. In the interest of speed this is a lossy operation. Some
700  // duplicates will remain. We have two hash sets with different hash
701  // functions to reduce the number of unnecessary clashes.
702  hash_sets_are_empty_ = false; // Hash sets are in use.
703  for (Address* current = start_; current < top; current++) {
704  ASSERT(!heap_->cell_space()->Contains(*current));
705  ASSERT(!heap_->code_space()->Contains(*current));
706  ASSERT(!heap_->old_data_space()->Contains(*current));
707  uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
708  // Shift out the last bits including any tags.
709  int_addr >>= kPointerSizeLog2;
710  // The upper part of an address is basically random because of ASLR and OS
711  // non-determinism, so we use only the bits within a page for hashing to
712  // make v8's behavior (more) deterministic.
713  uintptr_t hash_addr =
715  int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
716  (kHashSetLength - 1));
717  if (hash_set_1_[hash1] == int_addr) continue;
718  uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
719  hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
720  hash2 &= (kHashSetLength - 1);
721  if (hash_set_2_[hash2] == int_addr) continue;
722  if (hash_set_1_[hash1] == 0) {
723  hash_set_1_[hash1] = int_addr;
724  } else if (hash_set_2_[hash2] == 0) {
725  hash_set_2_[hash2] = int_addr;
726  } else {
727  // Rather than slowing down we just throw away some entries. This will
728  // cause some duplicates to remain undetected.
729  hash_set_1_[hash1] = int_addr;
730  hash_set_2_[hash2] = 0;
731  }
732  old_buffer_is_sorted_ = false;
733  old_buffer_is_filtered_ = false;
734  *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
735  ASSERT(old_top_ <= old_limit_);
736  }
737  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
738 }
739 
740 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const int kPointerFieldsEndOffset
Definition: objects.h:6445
static const int kHashSetLength
Definition: store-buffer.h:97
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
bool Contains(Address addr)
Definition: spaces.h:377
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
bool InNewSpace(Object *object)
Definition: heap-inl.h:307
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:44
Isolate * isolate()
Definition: heap-inl.h:624
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:186
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
kSerializedDataOffset Object
Definition: objects-inl.h:5016
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
bool InFromSpace(Object *object)
Definition: heap-inl.h:321
#define ASSERT(condition)
Definition: checks.h:329
friend class DontMoveStoreBufferEntriesScope
Definition: store-buffer.h:222
static void StoreBufferOverflow(Isolate *isolate)
const int kPointerSizeLog2
Definition: globals.h:281
void(StoreBuffer::* RegionCallback)(Address start, Address end, ObjectSlotCallback slot_callback, bool clear_maps)
Definition: store-buffer.h:46
#define CHECK(condition)
Definition: checks.h:75
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
static const int kPageSize
Definition: spaces.h:814
StoreBuffer * store_buffer()
Definition: heap.h:1773
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
#define UNREACHABLE()
Definition: checks.h:52
bool Contains(Address a)
Definition: spaces-inl.h:179
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static const int kStoreBufferOverflowBit
Definition: store-buffer.h:92
static const int kStoreBufferSize
Definition: store-buffer.h:93
const int kPointerSize
Definition: globals.h:268
bool IsFlagSet(int flag)
Definition: spaces.h:456
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:198
bool Commit(void *address, size_t size, bool is_executable)
OldSpace * old_pointer_space()
Definition: heap.h:638
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
static const int kSize
Definition: objects.h:6440
OldSpace * code_space()
Definition: heap.h:640
void public_set_store_buffer_top(Address *top)
Definition: heap.h:1442
void EnsureSpace(intptr_t space_needed)
LargeObjectSpace * lo_space()
Definition: heap.h:646
CellSpace * cell_space()
Definition: heap.h:642
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
void USE(T)
Definition: globals.h:341
Counters * counters()
Definition: isolate.h:859
MapSpace * map_space()
Definition: heap.h:641
void set_store_buffer_counter(int counter)
Definition: spaces.h:373
static const int kOldStoreBufferLength
Definition: store-buffer.h:95
static const int kHashSetLengthLog2
Definition: store-buffer.h:96
void EnterDirectlyIntoStoreBuffer(Address addr)
static intptr_t CommitPageSize()
OldSpace * old_data_space()
Definition: heap.h:639
static const int kPointerFieldsBeginOffset
Definition: objects.h:6444