v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
store-buffer.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "store-buffer.h"
31 #include "store-buffer-inl.h"
32 #include "v8-counters.h"
33 
34 namespace v8 {
35 namespace internal {
36 
38  : heap_(heap),
39  start_(NULL),
40  limit_(NULL),
41  old_start_(NULL),
42  old_limit_(NULL),
43  old_top_(NULL),
44  old_reserved_limit_(NULL),
45  old_buffer_is_sorted_(false),
46  old_buffer_is_filtered_(false),
47  during_gc_(false),
48  store_buffer_rebuilding_enabled_(false),
49  callback_(NULL),
50  may_move_store_buffer_entries_(true),
51  virtual_memory_(NULL),
52  hash_set_1_(NULL),
53  hash_set_2_(NULL),
54  hash_sets_are_empty_(true) {
55 }
56 
57 
59  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
60  uintptr_t start_as_int =
61  reinterpret_cast<uintptr_t>(virtual_memory_->address());
62  start_ =
63  reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
64  limit_ = start_ + (kStoreBufferSize / kPointerSize);
65 
66  old_virtual_memory_ =
68  old_top_ = old_start_ =
69  reinterpret_cast<Address*>(old_virtual_memory_->address());
70  // Don't know the alignment requirements of the OS, but it is certainly not
71  // less than 0xfff.
72  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
73  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
74  ASSERT(initial_length > 0);
75  ASSERT(initial_length <= kOldStoreBufferLength);
76  old_limit_ = old_start_ + initial_length;
77  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
78 
79  CHECK(old_virtual_memory_->Commit(
80  reinterpret_cast<void*>(old_start_),
81  (old_limit_ - old_start_) * kPointerSize,
82  false));
83 
84  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
85  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
86  Address* vm_limit = reinterpret_cast<Address*>(
87  reinterpret_cast<char*>(virtual_memory_->address()) +
88  virtual_memory_->size());
89  ASSERT(start_ <= vm_limit);
90  ASSERT(limit_ <= vm_limit);
91  USE(vm_limit);
92  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
93  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
94  0);
95 
96  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
98  false)); // Not executable.
99  heap_->public_set_store_buffer_top(start_);
100 
101  hash_set_1_ = new uintptr_t[kHashSetLength];
102  hash_set_2_ = new uintptr_t[kHashSetLength];
103  hash_sets_are_empty_ = false;
104 
105  ClearFilteringHashSets();
106 }
107 
108 
110  delete virtual_memory_;
111  delete old_virtual_memory_;
112  delete[] hash_set_1_;
113  delete[] hash_set_2_;
114  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
115  start_ = limit_ = NULL;
116  heap_->public_set_store_buffer_top(start_);
117 }
118 
119 
121  isolate->heap()->store_buffer()->Compact();
122 }
123 
124 
125 #if V8_TARGET_ARCH_X64
126 static int CompareAddresses(const void* void_a, const void* void_b) {
127  intptr_t a =
128  reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
129  intptr_t b =
130  reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
131  // Unfortunately if int is smaller than intptr_t there is no branch-free
132  // way to return a number with the same sign as the difference between the
133  // pointers.
134  if (a == b) return 0;
135  if (a < b) return -1;
136  ASSERT(a > b);
137  return 1;
138 }
139 #else
140 static int CompareAddresses(const void* void_a, const void* void_b) {
141  intptr_t a =
142  reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
143  intptr_t b =
144  reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
145  ASSERT(sizeof(1) == sizeof(a));
146  // Shift down to avoid wraparound.
147  return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
148 }
149 #endif
150 
151 
152 void StoreBuffer::Uniq() {
153  // Remove adjacent duplicates and cells that do not point at new space.
154  Address previous = NULL;
155  Address* write = old_start_;
156  ASSERT(may_move_store_buffer_entries_);
157  for (Address* read = old_start_; read < old_top_; read++) {
158  Address current = *read;
159  if (current != previous) {
160  if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
161  *write++ = current;
162  }
163  }
164  previous = current;
165  }
166  old_top_ = write;
167 }
168 
169 
170 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
171  while (old_limit_ - old_top_ < space_needed &&
172  old_limit_ < old_reserved_limit_) {
173  size_t grow = old_limit_ - old_start_; // Double size.
174  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
175  grow * kPointerSize,
176  false));
177  old_limit_ += grow;
178  }
179 
180  if (old_limit_ - old_top_ >= space_needed) return;
181 
182  if (old_buffer_is_filtered_) return;
183  ASSERT(may_move_store_buffer_entries_);
184  Compact();
185 
186  old_buffer_is_filtered_ = true;
187  bool page_has_scan_on_scavenge_flag = false;
188 
189  PointerChunkIterator it(heap_);
190  MemoryChunk* chunk;
191  while ((chunk = it.next()) != NULL) {
192  if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
193  }
194 
195  if (page_has_scan_on_scavenge_flag) {
197  }
198 
199  // If filtering out the entries from scan_on_scavenge pages got us down to
200  // less than half full, then we are satisfied with that.
201  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
202 
203  // Sample 1 entry in 97 and filter out the pages where we estimate that more
204  // than 1 in 8 pointers are to new space.
205  static const int kSampleFinenesses = 5;
206  static const struct Samples {
207  int prime_sample_step;
208  int threshold;
209  } samples[kSampleFinenesses] = {
210  { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
211  { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
212  { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
213  { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
214  { 1, 0}
215  };
216  for (int i = kSampleFinenesses - 1; i >= 0; i--) {
217  ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
218  // As a last resort we mark all pages as being exempt from the store buffer.
219  ASSERT(i != 0 || old_top_ == old_start_);
220  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
221  }
222  UNREACHABLE();
223 }
224 
225 
226 // Sample the store buffer to see if some pages are taking up a lot of space
227 // in the store buffer.
228 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
229  PointerChunkIterator it(heap_);
230  MemoryChunk* chunk;
231  while ((chunk = it.next()) != NULL) {
232  chunk->set_store_buffer_counter(0);
233  }
234  bool created_new_scan_on_scavenge_pages = false;
235  MemoryChunk* previous_chunk = NULL;
236  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
237  Address addr = *p;
238  MemoryChunk* containing_chunk = NULL;
239  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
240  containing_chunk = previous_chunk;
241  } else {
242  containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
243  }
244  int old_counter = containing_chunk->store_buffer_counter();
245  if (old_counter == threshold) {
246  containing_chunk->set_scan_on_scavenge(true);
247  created_new_scan_on_scavenge_pages = true;
248  }
249  containing_chunk->set_store_buffer_counter(old_counter + 1);
250  previous_chunk = containing_chunk;
251  }
252  if (created_new_scan_on_scavenge_pages) {
254  }
255  old_buffer_is_filtered_ = true;
256 }
257 
258 
260  Address* new_top = old_start_;
261  MemoryChunk* previous_chunk = NULL;
262  for (Address* p = old_start_; p < old_top_; p++) {
263  Address addr = *p;
264  MemoryChunk* containing_chunk = NULL;
265  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
266  containing_chunk = previous_chunk;
267  } else {
268  containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
269  previous_chunk = containing_chunk;
270  }
271  if (!containing_chunk->IsFlagSet(flag)) {
272  *new_top++ = addr;
273  }
274  }
275  old_top_ = new_top;
276 
277  // Filtering hash sets are inconsistent with the store buffer after this
278  // operation.
279  ClearFilteringHashSets();
280 }
281 
282 
284  Compact();
285  if (old_buffer_is_sorted_) return;
286  qsort(reinterpret_cast<void*>(old_start_),
287  old_top_ - old_start_,
288  sizeof(*old_top_),
289  &CompareAddresses);
290  Uniq();
291 
292  old_buffer_is_sorted_ = true;
293 
294  // Filtering hash sets are inconsistent with the store buffer after this
295  // operation.
296  ClearFilteringHashSets();
297 }
298 
299 
301  Compact();
302  PointerChunkIterator it(heap_);
303  MemoryChunk* chunk;
304  bool page_has_scan_on_scavenge_flag = false;
305  while ((chunk = it.next()) != NULL) {
306  if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
307  }
308 
309  if (page_has_scan_on_scavenge_flag) {
311  }
312 
313  // Filtering hash sets are inconsistent with the store buffer after
314  // iteration.
315  ClearFilteringHashSets();
316 
317  return page_has_scan_on_scavenge_flag;
318 }
319 
320 
321 #ifdef DEBUG
322 void StoreBuffer::Clean() {
323  ClearFilteringHashSets();
324  Uniq(); // Also removes things that no longer point to new space.
325  CheckForFullBuffer();
326 }
327 
328 
329 static Address* in_store_buffer_1_element_cache = NULL;
330 
331 
332 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
333  if (!FLAG_enable_slow_asserts) return true;
334  if (in_store_buffer_1_element_cache != NULL &&
335  *in_store_buffer_1_element_cache == cell_address) {
336  return true;
337  }
338  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
339  for (Address* current = top - 1; current >= start_; current--) {
340  if (*current == cell_address) {
341  in_store_buffer_1_element_cache = current;
342  return true;
343  }
344  }
345  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
346  if (*current == cell_address) {
347  in_store_buffer_1_element_cache = current;
348  return true;
349  }
350  }
351  return false;
352 }
353 #endif
354 
355 
356 void StoreBuffer::ClearFilteringHashSets() {
357  if (!hash_sets_are_empty_) {
358  memset(reinterpret_cast<void*>(hash_set_1_),
359  0,
360  sizeof(uintptr_t) * kHashSetLength);
361  memset(reinterpret_cast<void*>(hash_set_2_),
362  0,
363  sizeof(uintptr_t) * kHashSetLength);
364  hash_sets_are_empty_ = true;
365  }
366 }
367 
368 
370  ClearFilteringHashSets();
371  during_gc_ = true;
372 }
373 
374 
375 #ifdef VERIFY_HEAP
376 static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
377  // Do nothing.
378 }
379 
380 
381 void StoreBuffer::VerifyPointers(PagedSpace* space,
382  RegionCallback region_callback) {
383  PageIterator it(space);
384 
385  while (it.has_next()) {
386  Page* page = it.next();
387  FindPointersToNewSpaceOnPage(
388  reinterpret_cast<PagedSpace*>(page->owner()),
389  page,
390  region_callback,
391  &DummyScavengePointer);
392  }
393 }
394 
395 
396 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
397  LargeObjectIterator it(space);
398  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
399  if (object->IsFixedArray()) {
400  Address slot_address = object->address();
401  Address end = object->address() + object->Size();
402 
403  while (slot_address < end) {
404  HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
405  // When we are not in GC the Heap::InNewSpace() predicate
406  // checks that pointers which satisfy predicate point into
407  // the active semispace.
408  heap_->InNewSpace(*slot);
409  slot_address += kPointerSize;
410  }
411  }
412  }
413 }
414 #endif
415 
416 
418 #ifdef VERIFY_HEAP
419  VerifyPointers(heap_->old_pointer_space(),
420  &StoreBuffer::FindPointersToNewSpaceInRegion);
421  VerifyPointers(heap_->map_space(),
422  &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
423  VerifyPointers(heap_->lo_space());
424 #endif
425 }
426 
427 
429  during_gc_ = false;
430 #ifdef VERIFY_HEAP
431  if (FLAG_verify_heap) {
432  Verify();
433  }
434 #endif
435 }
436 
437 
438 void StoreBuffer::FindPointersToNewSpaceInRegion(
439  Address start, Address end, ObjectSlotCallback slot_callback) {
440  for (Address slot_address = start;
441  slot_address < end;
442  slot_address += kPointerSize) {
443  Object** slot = reinterpret_cast<Object**>(slot_address);
444  if (heap_->InNewSpace(*slot)) {
445  HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
446  ASSERT(object->IsHeapObject());
447  slot_callback(reinterpret_cast<HeapObject**>(slot), object);
448  if (heap_->InNewSpace(*slot)) {
449  EnterDirectlyIntoStoreBuffer(slot_address);
450  }
451  }
452  }
453 }
454 
455 
456 // Compute start address of the first map following given addr.
457 static inline Address MapStartAlign(Address addr) {
458  Address page = Page::FromAddress(addr)->area_start();
459  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
460 }
461 
462 
463 // Compute end address of the first map preceding given addr.
464 static inline Address MapEndAlign(Address addr) {
465  Address page = Page::FromAllocationTop(addr)->area_start();
466  return page + ((addr - page) / Map::kSize * Map::kSize);
467 }
468 
469 
470 void StoreBuffer::FindPointersToNewSpaceInMaps(
471  Address start,
472  Address end,
473  ObjectSlotCallback slot_callback) {
474  ASSERT(MapStartAlign(start) == start);
475  ASSERT(MapEndAlign(end) == end);
476 
477  Address map_address = start;
478  while (map_address < end) {
479  ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
480  ASSERT(Memory::Object_at(map_address)->IsMap());
481 
482  Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
483  Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
484 
485  FindPointersToNewSpaceInRegion(pointer_fields_start,
486  pointer_fields_end,
487  slot_callback);
488  map_address += Map::kSize;
489  }
490 }
491 
492 
493 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
494  Address start,
495  Address end,
496  ObjectSlotCallback slot_callback) {
497  Address map_aligned_start = MapStartAlign(start);
498  Address map_aligned_end = MapEndAlign(end);
499 
500  ASSERT(map_aligned_start == start);
501  ASSERT(map_aligned_end == end);
502 
503  FindPointersToNewSpaceInMaps(map_aligned_start,
504  map_aligned_end,
505  slot_callback);
506 }
507 
508 
509 // This function iterates over all the pointers in a paged space in the heap,
510 // looking for pointers into new space. Within the pages there may be dead
511 // objects that have not been overwritten by free spaces or fillers because of
512 // lazy sweeping. These dead objects may not contain pointers to new space.
513 // The garbage areas that have been swept properly (these will normally be the
514 // large ones) will be marked with free space and filler map words. In
515 // addition any area that has never been used at all for object allocation must
516 // be marked with a free space or filler. Because the free space and filler
517 // maps do not move we can always recognize these even after a compaction.
518 // Normal objects like FixedArrays and JSObjects should not contain references
519 // to these maps. The special garbage section (see comment in spaces.h) is
520 // skipped since it can contain absolutely anything. Any objects that are
521 // allocated during iteration may or may not be visited by the iteration, but
522 // they will not be partially visited.
523 void StoreBuffer::FindPointersToNewSpaceOnPage(
524  PagedSpace* space,
525  Page* page,
526  RegionCallback region_callback,
527  ObjectSlotCallback slot_callback) {
528  Address visitable_start = page->area_start();
529  Address end_of_page = page->area_end();
530 
531  Address visitable_end = visitable_start;
532 
533  Object* free_space_map = heap_->free_space_map();
534  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
535 
536  while (visitable_end < end_of_page) {
537  Object* o = *reinterpret_cast<Object**>(visitable_end);
538  // Skip fillers but not things that look like fillers in the special
539  // garbage section which can contain anything.
540  if (o == free_space_map ||
541  o == two_pointer_filler_map ||
542  (visitable_end == space->top() && visitable_end != space->limit())) {
543  if (visitable_start != visitable_end) {
544  // After calling this the special garbage section may have moved.
545  (this->*region_callback)(visitable_start,
546  visitable_end,
547  slot_callback);
548  if (visitable_end >= space->top() && visitable_end < space->limit()) {
549  visitable_end = space->limit();
550  visitable_start = visitable_end;
551  continue;
552  }
553  }
554  if (visitable_end == space->top() && visitable_end != space->limit()) {
555  visitable_start = visitable_end = space->limit();
556  } else {
557  // At this point we are either at the start of a filler or we are at
558  // the point where the space->top() used to be before the
559  // visit_pointer_region call above. Either way we can skip the
560  // object at the current spot: We don't promise to visit objects
561  // allocated during heap traversal, and if space->top() moved then it
562  // must be because an object was allocated at this point.
563  visitable_start =
564  visitable_end + HeapObject::FromAddress(visitable_end)->Size();
565  visitable_end = visitable_start;
566  }
567  } else {
568  ASSERT(o != free_space_map);
569  ASSERT(o != two_pointer_filler_map);
570  ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
571  visitable_end += kPointerSize;
572  }
573  }
574  ASSERT(visitable_end == end_of_page);
575  if (visitable_start != visitable_end) {
576  (this->*region_callback)(visitable_start,
577  visitable_end,
578  slot_callback);
579  }
580 }
581 
582 
583 void StoreBuffer::IteratePointersInStoreBuffer(
584  ObjectSlotCallback slot_callback) {
585  Address* limit = old_top_;
586  old_top_ = old_start_;
587  {
589  for (Address* current = old_start_; current < limit; current++) {
590 #ifdef DEBUG
591  Address* saved_top = old_top_;
592 #endif
593  Object** slot = reinterpret_cast<Object**>(*current);
594  Object* object = *slot;
595  if (heap_->InFromSpace(object)) {
596  HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
597  slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
598  if (heap_->InNewSpace(*slot)) {
599  EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
600  }
601  }
602  ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
603  }
604  }
605 }
606 
607 
609  // We do not sort or remove duplicated entries from the store buffer because
610  // we expect that callback will rebuild the store buffer thus removing
611  // all duplicates and pointers to old space.
612  bool some_pages_to_scan = PrepareForIteration();
613 
614  // TODO(gc): we want to skip slots on evacuation candidates
615  // but we can't simply figure that out from slot address
616  // because slot can belong to a large object.
617  IteratePointersInStoreBuffer(slot_callback);
618 
619  // We are done scanning all the pointers that were in the store buffer, but
620  // there may be some pages marked scan_on_scavenge that have pointers to new
621  // space that are not in the store buffer. We must scan them now. As we
622  // scan, the surviving pointers to new space will be added to the store
623  // buffer. If there are still a lot of pointers to new space then we will
624  // keep the scan_on_scavenge flag on the page and discard the pointers that
625  // were added to the store buffer. If there are not many pointers to new
626  // space left on the page we will keep the pointers in the store buffer and
627  // remove the flag from the page.
628  if (some_pages_to_scan) {
629  if (callback_ != NULL) {
630  (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
631  }
632  PointerChunkIterator it(heap_);
633  MemoryChunk* chunk;
634  while ((chunk = it.next()) != NULL) {
635  if (chunk->scan_on_scavenge()) {
636  chunk->set_scan_on_scavenge(false);
637  if (callback_ != NULL) {
638  (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
639  }
640  if (chunk->owner() == heap_->lo_space()) {
641  LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
642  HeapObject* array = large_page->GetObject();
643  ASSERT(array->IsFixedArray());
644  Address start = array->address();
645  Address end = start + array->Size();
646  FindPointersToNewSpaceInRegion(start, end, slot_callback);
647  } else {
648  Page* page = reinterpret_cast<Page*>(chunk);
649  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
650  FindPointersToNewSpaceOnPage(
651  owner,
652  page,
653  (owner == heap_->map_space() ?
654  &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
655  &StoreBuffer::FindPointersToNewSpaceInRegion),
656  slot_callback);
657  }
658  }
659  }
660  if (callback_ != NULL) {
661  (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
662  }
663  }
664 }
665 
666 
668  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
669 
670  if (top == start_) return;
671 
672  // There's no check of the limit in the loop below so we check here for
673  // the worst case (compaction doesn't eliminate any pointers).
674  ASSERT(top <= limit_);
675  heap_->public_set_store_buffer_top(start_);
676  EnsureSpace(top - start_);
677  ASSERT(may_move_store_buffer_entries_);
678  // Goes through the addresses in the store buffer attempting to remove
679  // duplicates. In the interest of speed this is a lossy operation. Some
680  // duplicates will remain. We have two hash sets with different hash
681  // functions to reduce the number of unnecessary clashes.
682  hash_sets_are_empty_ = false; // Hash sets are in use.
683  for (Address* current = start_; current < top; current++) {
684  ASSERT(!heap_->cell_space()->Contains(*current));
685  ASSERT(!heap_->code_space()->Contains(*current));
686  ASSERT(!heap_->old_data_space()->Contains(*current));
687  uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
688  // Shift out the last bits including any tags.
689  int_addr >>= kPointerSizeLog2;
690  int hash1 =
691  ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
692  if (hash_set_1_[hash1] == int_addr) continue;
693  uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
694  hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
695  hash2 &= (kHashSetLength - 1);
696  if (hash_set_2_[hash2] == int_addr) continue;
697  if (hash_set_1_[hash1] == 0) {
698  hash_set_1_[hash1] = int_addr;
699  } else if (hash_set_2_[hash2] == 0) {
700  hash_set_2_[hash2] = int_addr;
701  } else {
702  // Rather than slowing down we just throw away some entries. This will
703  // cause some duplicates to remain undetected.
704  hash_set_1_[hash1] = int_addr;
705  hash_set_2_[hash2] = 0;
706  }
707  old_buffer_is_sorted_ = false;
708  old_buffer_is_filtered_ = false;
709  *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
710  ASSERT(old_top_ <= old_limit_);
711  }
712  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
713  CheckForFullBuffer();
714 }
715 
716 
717 void StoreBuffer::CheckForFullBuffer() {
719 }
720 
721 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
bool FLAG_enable_slow_asserts
static const int kPointerFieldsEndOffset
Definition: objects.h:5144
static const int kHashSetLength
Definition: store-buffer.h:89
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
bool Contains(Address addr)
Definition: spaces.h:364
bool InNewSpace(Object *object)
Definition: heap-inl.h:288
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:42
Isolate * isolate()
Definition: heap-inl.h:503
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
bool InFromSpace(Object *object)
Definition: heap-inl.h:302
#define ASSERT(condition)
Definition: checks.h:270
friend class DontMoveStoreBufferEntriesScope
Definition: store-buffer.h:204
static void StoreBufferOverflow(Isolate *isolate)
const int kPointerSizeLog2
Definition: globals.h:232
#define CHECK(condition)
Definition: checks.h:56
static const int kPageSize
Definition: spaces.h:711
StoreBuffer * store_buffer()
Definition: heap.h:1545
#define UNREACHABLE()
Definition: checks.h:50
bool Contains(Address a)
Definition: spaces-inl.h:178
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static const int kStoreBufferOverflowBit
Definition: store-buffer.h:84
static const int kStoreBufferSize
Definition: store-buffer.h:85
const int kPointerSize
Definition: globals.h:220
bool IsFlagSet(int flag)
Definition: spaces.h:437
bool Commit(void *address, size_t size, bool is_executable)
OldSpace * old_pointer_space()
Definition: heap.h:506
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
static const int kSize
Definition: objects.h:5139
OldSpace * code_space()
Definition: heap.h:508
void public_set_store_buffer_top(Address *top)
Definition: heap.h:1249
void EnsureSpace(intptr_t space_needed)
LargeObjectSpace * lo_space()
Definition: heap.h:511
HeapObject * GetObject()
Definition: spaces.h:754
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
void(StoreBuffer::* RegionCallback)(Address start, Address end, ObjectSlotCallback slot_callback)
Definition: store-buffer.h:44
CellSpace * cell_space()
Definition: heap.h:510
Space * owner() const
Definition: spaces.h:320
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
void USE(T)
Definition: globals.h:289
Counters * counters()
Definition: isolate.h:819
MapSpace * map_space()
Definition: heap.h:509
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
void set_store_buffer_counter(int counter)
Definition: spaces.h:360
static const int kOldStoreBufferLength
Definition: store-buffer.h:87
static const int kHashSetLengthLog2
Definition: store-buffer.h:88
void EnterDirectlyIntoStoreBuffer(Address addr)
static intptr_t CommitPageSize()
OldSpace * old_data_space()
Definition: heap.h:507
static MemoryChunk * FromAnyPointerAddress(Address addr)
Definition: spaces-inl.h:197
static const int kPointerFieldsBeginOffset
Definition: objects.h:5143
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923