v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h"
32 #include "mark-compact.h"
33 #include "platform.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 // ----------------------------------------------------------------------------
40 // HeapObjectIterator
41 
43  // You can't actually iterate over the anchor page. It is not a real page,
44  // just an anchor for the double linked page list. Initialize as if we have
45  // reached the end of the anchor page, then the first iteration will move on
46  // to the first page.
47  Initialize(space,
48  NULL,
49  NULL,
50  kAllPagesInSpace,
51  NULL);
52 }
53 
54 
56  HeapObjectCallback size_func) {
57  // You can't actually iterate over the anchor page. It is not a real page,
58  // just an anchor for the double linked page list. Initialize the current
59  // address and end as NULL, then the first iteration will move on
60  // to the first page.
61  Initialize(space,
62  NULL,
63  NULL,
64  kAllPagesInSpace,
65  size_func);
66 }
67 
68 
70  HeapObjectCallback size_func) {
71  Space* owner = page->owner();
72  ASSERT(owner == HEAP->old_pointer_space() ||
73  owner == HEAP->old_data_space() ||
74  owner == HEAP->map_space() ||
75  owner == HEAP->cell_space() ||
76  owner == HEAP->code_space());
77  Initialize(reinterpret_cast<PagedSpace*>(owner),
78  page->area_start(),
79  page->area_end(),
80  kOnePageOnly,
81  size_func);
82  ASSERT(page->WasSweptPrecisely());
83 }
84 
85 
86 void HeapObjectIterator::Initialize(PagedSpace* space,
87  Address cur, Address end,
88  HeapObjectIterator::PageMode mode,
89  HeapObjectCallback size_f) {
90  // Check that we actually can iterate this space.
92 
93  space_ = space;
94  cur_addr_ = cur;
95  cur_end_ = end;
96  page_mode_ = mode;
97  size_func_ = size_f;
98 }
99 
100 
101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page.
103 bool HeapObjectIterator::AdvanceToNextPage() {
104  ASSERT(cur_addr_ == cur_end_);
105  if (page_mode_ == kOnePageOnly) return false;
106  Page* cur_page;
107  if (cur_addr_ == NULL) {
108  cur_page = space_->anchor();
109  } else {
110  cur_page = Page::FromAddress(cur_addr_ - 1);
111  ASSERT(cur_addr_ == cur_page->area_end());
112  }
113  cur_page = cur_page->next_page();
114  if (cur_page == space_->anchor()) return false;
115  cur_addr_ = cur_page->area_start();
116  cur_end_ = cur_page->area_end();
117  ASSERT(cur_page->WasSweptPrecisely());
118  return true;
119 }
120 
121 
122 // -----------------------------------------------------------------------------
123 // CodeRange
124 
125 
127  : isolate_(isolate),
128  code_range_(NULL),
129  free_list_(0),
130  allocation_list_(0),
131  current_allocation_block_index_(0) {
132 }
133 
134 
135 bool CodeRange::SetUp(const size_t requested) {
136  ASSERT(code_range_ == NULL);
137 
138  code_range_ = new VirtualMemory(requested);
139  CHECK(code_range_ != NULL);
140  if (!code_range_->IsReserved()) {
141  delete code_range_;
142  code_range_ = NULL;
143  return false;
144  }
145 
146  // We are sure that we have mapped a block of requested addresses.
147  ASSERT(code_range_->size() == requested);
148  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149  Address base = reinterpret_cast<Address>(code_range_->address());
150  Address aligned_base =
151  RoundUp(reinterpret_cast<Address>(code_range_->address()),
153  size_t size = code_range_->size() - (aligned_base - base);
154  allocation_list_.Add(FreeBlock(aligned_base, size));
155  current_allocation_block_index_ = 0;
156  return true;
157 }
158 
159 
160 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161  const FreeBlock* right) {
162  // The entire point of CodeRange is that the difference between two
163  // addresses in the range can be represented as a signed 32-bit int,
164  // so the cast is semantically correct.
165  return static_cast<int>(left->start - right->start);
166 }
167 
168 
169 void CodeRange::GetNextAllocationBlock(size_t requested) {
170  for (current_allocation_block_index_++;
171  current_allocation_block_index_ < allocation_list_.length();
172  current_allocation_block_index_++) {
173  if (requested <= allocation_list_[current_allocation_block_index_].size) {
174  return; // Found a large enough allocation block.
175  }
176  }
177 
178  // Sort and merge the free blocks on the free list and the allocation list.
179  free_list_.AddAll(allocation_list_);
180  allocation_list_.Clear();
181  free_list_.Sort(&CompareFreeBlockAddress);
182  for (int i = 0; i < free_list_.length();) {
183  FreeBlock merged = free_list_[i];
184  i++;
185  // Add adjacent free blocks to the current merged block.
186  while (i < free_list_.length() &&
187  free_list_[i].start == merged.start + merged.size) {
188  merged.size += free_list_[i].size;
189  i++;
190  }
191  if (merged.size > 0) {
192  allocation_list_.Add(merged);
193  }
194  }
195  free_list_.Clear();
196 
197  for (current_allocation_block_index_ = 0;
198  current_allocation_block_index_ < allocation_list_.length();
199  current_allocation_block_index_++) {
200  if (requested <= allocation_list_[current_allocation_block_index_].size) {
201  return; // Found a large enough allocation block.
202  }
203  }
204 
205  // Code range is full or too fragmented.
206  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207 }
208 
209 
210 
211 Address CodeRange::AllocateRawMemory(const size_t requested,
212  size_t* allocated) {
213  ASSERT(current_allocation_block_index_ < allocation_list_.length());
214  if (requested > allocation_list_[current_allocation_block_index_].size) {
215  // Find an allocation block large enough. This function call may
216  // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217  GetNextAllocationBlock(requested);
218  }
219  // Commit the requested memory at the start of the current allocation block.
220  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221  FreeBlock current = allocation_list_[current_allocation_block_index_];
222  if (aligned_requested >= (current.size - Page::kPageSize)) {
223  // Don't leave a small free block, useless for a large object or chunk.
224  *allocated = current.size;
225  } else {
226  *allocated = aligned_requested;
227  }
228  ASSERT(*allocated <= current.size);
230  if (!MemoryAllocator::CommitCodePage(code_range_,
231  current.start,
232  *allocated)) {
233  *allocated = 0;
234  return NULL;
235  }
236  allocation_list_[current_allocation_block_index_].start += *allocated;
237  allocation_list_[current_allocation_block_index_].size -= *allocated;
238  if (*allocated == current.size) {
239  GetNextAllocationBlock(0); // This block is used up, get the next one.
240  }
241  return current.start;
242 }
243 
244 
245 void CodeRange::FreeRawMemory(Address address, size_t length) {
247  free_list_.Add(FreeBlock(address, length));
248  code_range_->Uncommit(address, length);
249 }
250 
251 
253  delete code_range_; // Frees all memory in the virtual memory range.
254  code_range_ = NULL;
255  free_list_.Free();
256  allocation_list_.Free();
257 }
258 
259 
260 // -----------------------------------------------------------------------------
261 // MemoryAllocator
262 //
263 
265  : isolate_(isolate),
266  capacity_(0),
267  capacity_executable_(0),
268  size_(0),
269  size_executable_(0) {
270 }
271 
272 
273 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
274  capacity_ = RoundUp(capacity, Page::kPageSize);
275  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276  ASSERT_GE(capacity_, capacity_executable_);
277 
278  size_ = 0;
279  size_executable_ = 0;
280 
281  return true;
282 }
283 
284 
286  // Check that spaces were torn down before MemoryAllocator.
287  ASSERT(size_ == 0);
288  // TODO(gc) this will be true again when we fix FreeMemory.
289  // ASSERT(size_executable_ == 0);
290  capacity_ = 0;
291  capacity_executable_ = 0;
292 }
293 
294 
296  Executability executable) {
297  // TODO(gc) make code_range part of memory allocator?
298  ASSERT(reservation->IsReserved());
299  size_t size = reservation->size();
300  ASSERT(size_ >= size);
301  size_ -= size;
302 
303  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304 
305  if (executable == EXECUTABLE) {
306  ASSERT(size_executable_ >= size);
307  size_executable_ -= size;
308  }
309  // Code which is part of the code-range does not have its own VirtualMemory.
310  ASSERT(!isolate_->code_range()->contains(
311  static_cast<Address>(reservation->address())));
312  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313  reservation->Release();
314 }
315 
316 
318  size_t size,
319  Executability executable) {
320  // TODO(gc) make code_range part of memory allocator?
321  ASSERT(size_ >= size);
322  size_ -= size;
323 
324  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325 
326  if (executable == EXECUTABLE) {
327  ASSERT(size_executable_ >= size);
328  size_executable_ -= size;
329  }
330  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331  ASSERT(executable == EXECUTABLE);
332  isolate_->code_range()->FreeRawMemory(base, size);
333  } else {
334  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335  bool result = VirtualMemory::ReleaseRegion(base, size);
336  USE(result);
337  ASSERT(result);
338  }
339 }
340 
341 
343  size_t alignment,
344  VirtualMemory* controller) {
345  VirtualMemory reservation(size, alignment);
346 
347  if (!reservation.IsReserved()) return NULL;
348  size_ += reservation.size();
349  Address base = RoundUp(static_cast<Address>(reservation.address()),
350  alignment);
351  controller->TakeControl(&reservation);
352  return base;
353 }
354 
355 
357  size_t alignment,
358  Executability executable,
359  VirtualMemory* controller) {
360  VirtualMemory reservation;
361  Address base = ReserveAlignedMemory(size, alignment, &reservation);
362  if (base == NULL) return NULL;
363 
364  if (executable == EXECUTABLE) {
365  if (!CommitCodePage(&reservation, base, size)) {
366  base = NULL;
367  }
368  } else {
369  if (!reservation.Commit(base, size, false)) {
370  base = NULL;
371  }
372  }
373 
374  if (base == NULL) {
375  // Failed to commit the body. Release the mapping and any partially
376  // commited regions inside it.
377  reservation.Release();
378  return NULL;
379  }
380 
381  controller->TakeControl(&reservation);
382  return base;
383 }
384 
385 
387  set_owner(owner);
388  set_prev_page(this);
389  set_next_page(this);
390 }
391 
392 
393 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
394  Address start,
395  SemiSpace* semi_space) {
397  Address area_end = start + Page::kPageSize;
398 
399  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
400  start,
401  Page::kPageSize,
402  area_start,
403  area_end,
405  semi_space);
406  chunk->set_next_chunk(NULL);
407  chunk->set_prev_chunk(NULL);
408  chunk->initialize_scan_on_scavenge(true);
409  bool in_to_space = (semi_space->id() != kFromSpace);
410  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
412  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
414  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
416  return page;
417 }
418 
419 
420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
421  set_owner(semi_space);
422  set_next_chunk(this);
423  set_prev_chunk(this);
424  // Flags marks this invalid page as not being in new-space.
425  // All real new-space pages will be in new-space.
426  SetFlags(0, ~0);
427 }
428 
429 
431  Address base,
432  size_t size,
433  Address area_start,
434  Address area_end,
435  Executability executable,
436  Space* owner) {
437  MemoryChunk* chunk = FromAddress(base);
438 
439  ASSERT(base == chunk->address());
440 
441  chunk->heap_ = heap;
442  chunk->size_ = size;
443  chunk->area_start_ = area_start;
444  chunk->area_end_ = area_end;
445  chunk->flags_ = 0;
446  chunk->set_owner(owner);
447  chunk->InitializeReservedMemory();
448  chunk->slots_buffer_ = NULL;
449  chunk->skip_list_ = NULL;
451  chunk->ResetLiveBytes();
452  Bitmap::Clear(chunk);
453  chunk->initialize_scan_on_scavenge(false);
455 
458 
459  if (executable == EXECUTABLE) {
460  chunk->SetFlag(IS_EXECUTABLE);
461  }
462 
463  if (owner == heap->old_data_space()) {
464  chunk->SetFlag(CONTAINS_ONLY_DATA);
465  }
466 
467  return chunk;
468 }
469 
470 
472  next_chunk_ = other->next_chunk_;
473  prev_chunk_ = other;
474  other->next_chunk_->prev_chunk_ = this;
475  other->next_chunk_ = this;
476 }
477 
478 
483  }
486  prev_chunk_ = NULL;
487  next_chunk_ = NULL;
488 }
489 
490 
492  Executability executable,
493  Space* owner) {
494  size_t chunk_size;
495  Heap* heap = isolate_->heap();
496  Address base = NULL;
497  VirtualMemory reservation;
498  Address area_start = NULL;
499  Address area_end = NULL;
500 
501  if (executable == EXECUTABLE) {
502  chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
504 
505  // Check executable memory limit.
506  if (size_executable_ + chunk_size > capacity_executable_) {
507  LOG(isolate_,
508  StringEvent("MemoryAllocator::AllocateRawMemory",
509  "V8 Executable Allocation capacity exceeded"));
510  return NULL;
511  }
512 
513  // Allocate executable memory either from code range or from the
514  // OS.
515  if (isolate_->code_range()->exists()) {
516  base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
517  ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
519  if (base == NULL) return NULL;
520  size_ += chunk_size;
521  // Update executable memory size.
522  size_executable_ += chunk_size;
523  } else {
524  base = AllocateAlignedMemory(chunk_size,
526  executable,
527  &reservation);
528  if (base == NULL) return NULL;
529  // Update executable memory size.
530  size_executable_ += reservation.size();
531  }
532 
533  if (Heap::ShouldZapGarbage()) {
535  ZapBlock(base + CodePageAreaStartOffset(), body_size);
536  }
537 
538  area_start = base + CodePageAreaStartOffset();
539  area_end = area_start + body_size;
540  } else {
541  chunk_size = MemoryChunk::kObjectStartOffset + body_size;
542  base = AllocateAlignedMemory(chunk_size,
544  executable,
545  &reservation);
546 
547  if (base == NULL) return NULL;
548 
549  if (Heap::ShouldZapGarbage()) {
550  ZapBlock(base, chunk_size);
551  }
552 
553  area_start = base + Page::kObjectStartOffset;
554  area_end = base + chunk_size;
555  }
556 
557  isolate_->counters()->memory_allocated()->
558  Increment(static_cast<int>(chunk_size));
559 
560  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
561  if (owner != NULL) {
562  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
564  }
565 
566  MemoryChunk* result = MemoryChunk::Initialize(heap,
567  base,
568  chunk_size,
569  area_start,
570  area_end,
571  executable,
572  owner);
573  result->set_reserved_memory(&reservation);
574  return result;
575 }
576 
577 
579  PagedSpace* owner,
580  Executability executable) {
581  MemoryChunk* chunk = AllocateChunk(size, executable, owner);
582 
583  if (chunk == NULL) return NULL;
584 
585  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
586 }
587 
588 
590  Space* owner,
591  Executability executable) {
592  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
593  if (chunk == NULL) return NULL;
594  return LargePage::Initialize(isolate_->heap(), chunk);
595 }
596 
597 
599  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
600  if (chunk->owner() != NULL) {
601  ObjectSpace space =
602  static_cast<ObjectSpace>(1 << chunk->owner()->identity());
604  }
605 
606  isolate_->heap()->RememberUnmappedPage(
607  reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
608 
609  delete chunk->slots_buffer();
610  delete chunk->skip_list();
611 
612  VirtualMemory* reservation = chunk->reserved_memory();
613  if (reservation->IsReserved()) {
614  FreeMemory(reservation, chunk->executable());
615  } else {
616  FreeMemory(chunk->address(),
617  chunk->size(),
618  chunk->executable());
619  }
620 }
621 
622 
624  size_t size,
625  Executability executable) {
626  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
627 
628  if (Heap::ShouldZapGarbage()) {
629  ZapBlock(start, size);
630  }
631 
632  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
633  return true;
634 }
635 
636 
637 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
638  if (!VirtualMemory::UncommitRegion(start, size)) return false;
639  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
640  return true;
641 }
642 
643 
644 void MemoryAllocator::ZapBlock(Address start, size_t size) {
645  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
646  Memory::Address_at(start + s) = kZapValue;
647  }
648 }
649 
650 
652  AllocationAction action,
653  size_t size) {
654  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
655  MemoryAllocationCallbackRegistration registration =
656  memory_allocation_callbacks_[i];
657  if ((registration.space & space) == space &&
658  (registration.action & action) == action)
659  registration.callback(space, action, static_cast<int>(size));
660  }
661 }
662 
663 
665  MemoryAllocationCallback callback) {
666  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
667  if (memory_allocation_callbacks_[i].callback == callback) return true;
668  }
669  return false;
670 }
671 
672 
674  MemoryAllocationCallback callback,
675  ObjectSpace space,
676  AllocationAction action) {
677  ASSERT(callback != NULL);
678  MemoryAllocationCallbackRegistration registration(callback, space, action);
680  return memory_allocation_callbacks_.Add(registration);
681 }
682 
683 
685  MemoryAllocationCallback callback) {
686  ASSERT(callback != NULL);
687  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
688  if (memory_allocation_callbacks_[i].callback == callback) {
689  memory_allocation_callbacks_.Remove(i);
690  return;
691  }
692  }
693  UNREACHABLE();
694 }
695 
696 
697 #ifdef DEBUG
698 void MemoryAllocator::ReportStatistics() {
699  float pct = static_cast<float>(capacity_ - size_) / capacity_;
700  PrintF(" capacity: %" V8_PTR_PREFIX "d"
701  ", used: %" V8_PTR_PREFIX "d"
702  ", available: %%%d\n\n",
703  capacity_, size_, static_cast<int>(pct*100));
704 }
705 #endif
706 
707 
709  // We are guarding code pages: the first OS page after the header
710  // will be protected as non-writable.
712 }
713 
714 
716  return static_cast<int>(OS::CommitPageSize());
717 }
718 
719 
721  // We are guarding code pages: the first OS page after the header
722  // will be protected as non-writable.
724 }
725 
726 
728  // We are guarding code pages: the last OS page will be protected as
729  // non-writable.
730  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
731 }
732 
733 
735  Address start,
736  size_t size) {
737  // Commit page header (not executable).
738  if (!vm->Commit(start,
740  false)) {
741  return false;
742  }
743 
744  // Create guard page after the header.
745  if (!vm->Guard(start + CodePageGuardStartOffset())) {
746  return false;
747  }
748 
749  // Commit page body (executable).
750  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
751  if (!vm->Commit(start + CodePageAreaStartOffset(),
752  area_size,
753  true)) {
754  return false;
755  }
756 
757  // Create guard page after the allocatable area.
758  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
759  return false;
760  }
761 
762  return true;
763 }
764 
765 
766 // -----------------------------------------------------------------------------
767 // MemoryChunk implementation
768 
770  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
771  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
772  static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
773  }
774  chunk->IncrementLiveBytes(by);
775 }
776 
777 // -----------------------------------------------------------------------------
778 // PagedSpace implementation
779 
781  intptr_t max_capacity,
782  AllocationSpace id,
783  Executability executable)
784  : Space(heap, id, executable),
785  free_list_(this),
786  was_swept_conservatively_(false),
787  first_unswept_page_(Page::FromAddress(NULL)),
788  unswept_free_bytes_(0) {
789  if (id == CODE_SPACE) {
790  area_size_ = heap->isolate()->memory_allocator()->
791  CodePageAreaSize();
792  } else {
793  area_size_ = Page::kPageSize - Page::kObjectStartOffset;
794  }
795  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
796  * AreaSize();
797  accounting_stats_.Clear();
798 
801 
803 }
804 
805 
807  return true;
808 }
809 
810 
812  return true;
813 }
814 
815 
817  PageIterator iterator(this);
818  while (iterator.has_next()) {
819  heap()->isolate()->memory_allocator()->Free(iterator.next());
820  }
823  accounting_stats_.Clear();
824 }
825 
826 
827 MaybeObject* PagedSpace::FindObject(Address addr) {
828  // Note: this function can only be called on precisely swept spaces.
829  ASSERT(!heap()->mark_compact_collector()->in_use());
830 
831  if (!Contains(addr)) return Failure::Exception();
832 
833  Page* p = Page::FromAddress(addr);
834  HeapObjectIterator it(p, NULL);
835  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
836  Address cur = obj->address();
837  Address next = cur + obj->Size();
838  if ((cur <= addr) && (addr < next)) return obj;
839  }
840 
841  UNREACHABLE();
842  return Failure::Exception();
843 }
844 
846  ASSERT(max_capacity_ % AreaSize() == 0);
847 
848  if (Capacity() == max_capacity_) return false;
849 
851 
852  // Are we going to exceed capacity for this space?
853  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
854 
855  return true;
856 }
857 
859  if (!CanExpand()) return false;
860 
861  intptr_t size = AreaSize();
862 
863  if (anchor_.next_page() == &anchor_) {
864  size = SizeOfFirstPage();
865  }
866 
868  size, this, executable());
869  if (p == NULL) return false;
870 
872 
874 
875  return true;
876 }
877 
878 
880  int size = 0;
881  switch (identity()) {
882  case OLD_POINTER_SPACE:
883  size = 64 * kPointerSize * KB;
884  break;
885  case OLD_DATA_SPACE:
886  size = 192 * KB;
887  break;
888  case MAP_SPACE:
889  size = 16 * kPointerSize * KB;
890  break;
891  case CELL_SPACE:
892  size = 16 * kPointerSize * KB;
893  break;
894  case CODE_SPACE:
895  if (kPointerSize == 8) {
896  // On x64 we allocate code pages in a special way (from the reserved
897  // 2Byte area). That part of the code is not yet upgraded to handle
898  // small pages.
899  size = AreaSize();
900  } else {
901  size = 384 * KB;
902  }
903  break;
904  default:
905  UNREACHABLE();
906  }
907  return Min(size, AreaSize());
908 }
909 
910 
912  PageIterator it(this);
913  int count = 0;
914  while (it.has_next()) {
915  it.next();
916  count++;
917  }
918  return count;
919 }
920 
921 
923  ASSERT(page->LiveBytes() == 0);
924  ASSERT(AreaSize() == page->area_size());
925 
926  // Adjust list of unswept pages if the page is the head of the list.
927  if (first_unswept_page_ == page) {
928  first_unswept_page_ = page->next_page();
929  if (first_unswept_page_ == anchor()) {
931  }
932  }
933 
934  if (page->WasSwept()) {
935  intptr_t size = free_list_.EvictFreeListItems(page);
936  accounting_stats_.AllocateBytes(size);
937  ASSERT_EQ(AreaSize(), static_cast<int>(size));
938  } else {
940  }
941 
942  if (Page::FromAllocationTop(allocation_info_.top) == page) {
944  }
945 
946  page->Unlink();
948  heap()->isolate()->memory_allocator()->Free(page);
949  } else {
950  heap()->QueueMemoryChunkForFree(page);
951  }
952 
953  ASSERT(Capacity() > 0);
954  accounting_stats_.ShrinkSpace(AreaSize());
955 }
956 
957 
959  PageIterator it(this);
960  while (it.has_next()) {
961  Page* page = it.next();
962  if (!page->WasSwept()) {
963  if (page->LiveBytes() == 0) ReleasePage(page);
964  } else {
966  if (obj->IsFreeSpace() &&
967  FreeSpace::cast(obj)->size() == AreaSize()) {
968  // Sometimes we allocate memory from free list but don't
969  // immediately initialize it (e.g. see PagedSpace::ReserveSpace
970  // called from Heap::ReserveSpace that can cause GC before
971  // reserved space is actually initialized).
972  // Thus we can't simply assume that obj represents a valid
973  // node still owned by a free list
974  // Instead we should verify that the page is fully covered
975  // by free list items.
976  FreeList::SizeStats sizes;
977  free_list_.CountFreeListItems(page, &sizes);
978  if (sizes.Total() == AreaSize()) {
979  ReleasePage(page);
980  }
981  }
982  }
983  }
984  heap()->FreeQueuedChunks();
985 }
986 
987 
988 #ifdef DEBUG
989 void PagedSpace::Print() { }
990 #endif
991 
992 #ifdef VERIFY_HEAP
993 void PagedSpace::Verify(ObjectVisitor* visitor) {
994  // We can only iterate over the pages if they were swept precisely.
995  if (was_swept_conservatively_) return;
996 
997  bool allocation_pointer_found_in_space =
999  PageIterator page_iterator(this);
1000  while (page_iterator.has_next()) {
1001  Page* page = page_iterator.next();
1002  CHECK(page->owner() == this);
1003  if (page == Page::FromAllocationTop(allocation_info_.top)) {
1004  allocation_pointer_found_in_space = true;
1005  }
1006  CHECK(page->WasSweptPrecisely());
1007  HeapObjectIterator it(page, NULL);
1008  Address end_of_previous_object = page->area_start();
1009  Address top = page->area_end();
1010  int black_size = 0;
1011  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1012  CHECK(end_of_previous_object <= object->address());
1013 
1014  // The first word should be a map, and we expect all map pointers to
1015  // be in map space.
1016  Map* map = object->map();
1017  CHECK(map->IsMap());
1018  CHECK(heap()->map_space()->Contains(map));
1019 
1020  // Perform space-specific object verification.
1021  VerifyObject(object);
1022 
1023  // The object itself should look OK.
1024  object->Verify();
1025 
1026  // All the interior pointers should be contained in the heap.
1027  int size = object->Size();
1028  object->IterateBody(map->instance_type(), size, visitor);
1029  if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1030  black_size += size;
1031  }
1032 
1033  CHECK(object->address() + size <= top);
1034  end_of_previous_object = object->address() + size;
1035  }
1036  CHECK_LE(black_size, page->LiveBytes());
1037  }
1038  CHECK(allocation_pointer_found_in_space);
1039 }
1040 #endif // VERIFY_HEAP
1041 
1042 // -----------------------------------------------------------------------------
1043 // NewSpace implementation
1044 
1045 
1046 bool NewSpace::SetUp(int reserved_semispace_capacity,
1047  int maximum_semispace_capacity) {
1048  // Set up new space based on the preallocated memory block defined by
1049  // start and size. The provided space is divided into two semi-spaces.
1050  // To support fast containment testing in the new space, the size of
1051  // this chunk must be a power of two and it must be aligned to its size.
1052  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1053 
1054  size_t size = 2 * reserved_semispace_capacity;
1055  Address base =
1057  size, size, &reservation_);
1058  if (base == NULL) return false;
1059 
1060  chunk_base_ = base;
1061  chunk_size_ = static_cast<uintptr_t>(size);
1062  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1063 
1064  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1065  ASSERT(IsPowerOf2(maximum_semispace_capacity));
1066 
1067  // Allocate and set up the histogram arrays if necessary.
1068  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1069  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1070 
1071 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1072  promoted_histogram_[name].set_name(#name);
1074 #undef SET_NAME
1075 
1076  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1077  ASSERT(static_cast<intptr_t>(chunk_size_) >=
1078  2 * heap()->ReservedSemiSpaceSize());
1079  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1080 
1081  to_space_.SetUp(chunk_base_,
1082  initial_semispace_capacity,
1083  maximum_semispace_capacity);
1084  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1085  initial_semispace_capacity,
1086  maximum_semispace_capacity);
1087  if (!to_space_.Commit()) {
1088  return false;
1089  }
1090  ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1091 
1092  start_ = chunk_base_;
1093  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1094  object_mask_ = address_mask_ | kHeapObjectTagMask;
1095  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1096 
1098 
1099  return true;
1100 }
1101 
1102 
1104  if (allocated_histogram_) {
1105  DeleteArray(allocated_histogram_);
1106  allocated_histogram_ = NULL;
1107  }
1108  if (promoted_histogram_) {
1109  DeleteArray(promoted_histogram_);
1110  promoted_histogram_ = NULL;
1111  }
1112 
1113  start_ = NULL;
1114  allocation_info_.top = NULL;
1115  allocation_info_.limit = NULL;
1116 
1117  to_space_.TearDown();
1118  from_space_.TearDown();
1119 
1120  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1121 
1122  ASSERT(reservation_.IsReserved());
1123  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1124  NOT_EXECUTABLE);
1125  chunk_base_ = NULL;
1126  chunk_size_ = 0;
1127 }
1128 
1129 
1131  SemiSpace::Swap(&from_space_, &to_space_);
1132 }
1133 
1134 
1136  // Double the semispace size but only up to maximum capacity.
1138  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1139  if (to_space_.GrowTo(new_capacity)) {
1140  // Only grow from space if we managed to grow to-space.
1141  if (!from_space_.GrowTo(new_capacity)) {
1142  // If we managed to grow to-space but couldn't grow from-space,
1143  // attempt to shrink to-space.
1144  if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1145  // We are in an inconsistent state because we could not
1146  // commit/uncommit memory from new space.
1147  V8::FatalProcessOutOfMemory("Failed to grow new space.");
1148  }
1149  }
1150  }
1151  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1152 }
1153 
1154 
1156  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1157  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1158  if (rounded_new_capacity < Capacity() &&
1159  to_space_.ShrinkTo(rounded_new_capacity)) {
1160  // Only shrink from-space if we managed to shrink to-space.
1161  from_space_.Reset();
1162  if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1163  // If we managed to shrink to-space but couldn't shrink from
1164  // space, attempt to grow to-space again.
1165  if (!to_space_.GrowTo(from_space_.Capacity())) {
1166  // We are in an inconsistent state because we could not
1167  // commit/uncommit memory from new space.
1168  V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1169  }
1170  }
1171  }
1172  allocation_info_.limit = to_space_.page_high();
1173  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1174 }
1175 
1176 
1177 void NewSpace::UpdateAllocationInfo() {
1178  allocation_info_.top = to_space_.page_low();
1179  allocation_info_.limit = to_space_.page_high();
1180 
1181  // Lower limit during incremental marking.
1182  if (heap()->incremental_marking()->IsMarking() &&
1184  Address new_limit =
1185  allocation_info_.top + inline_allocation_limit_step();
1186  allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1187  }
1188  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1189 }
1190 
1191 
1193  to_space_.Reset();
1194  UpdateAllocationInfo();
1195  pages_used_ = 0;
1196  // Clear all mark-bits in the to-space.
1197  NewSpacePageIterator it(&to_space_);
1198  while (it.has_next()) {
1199  Bitmap::Clear(it.next());
1200  }
1201 }
1202 
1203 
1205  Address top = allocation_info_.top;
1206  if (NewSpacePage::IsAtStart(top)) {
1207  // The current page is already empty. Don't try to make another.
1208 
1209  // We should only get here if someone asks to allocate more
1210  // than what can be stored in a single page.
1211  // TODO(gc): Change the limit on new-space allocation to prevent this
1212  // from happening (all such allocations should go directly to LOSpace).
1213  return false;
1214  }
1215  if (!to_space_.AdvancePage()) {
1216  // Failed to get a new page in to-space.
1217  return false;
1218  }
1219 
1220  // Clear remainder of current page.
1221  Address limit = NewSpacePage::FromLimit(top)->area_end();
1222  if (heap()->gc_state() == Heap::SCAVENGE) {
1223  heap()->promotion_queue()->SetNewLimit(limit);
1225  }
1226 
1227  int remaining_in_page = static_cast<int>(limit - top);
1228  heap()->CreateFillerObjectAt(top, remaining_in_page);
1229  pages_used_++;
1230  UpdateAllocationInfo();
1231 
1232  return true;
1233 }
1234 
1235 
1236 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1237  Address old_top = allocation_info_.top;
1238  Address new_top = old_top + size_in_bytes;
1239  Address high = to_space_.page_high();
1240  if (allocation_info_.limit < high) {
1241  // Incremental marking has lowered the limit to get a
1242  // chance to do a step.
1243  allocation_info_.limit = Min(
1244  allocation_info_.limit + inline_allocation_limit_step_,
1245  high);
1246  int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1248  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1249  top_on_previous_step_ = new_top;
1250  return AllocateRaw(size_in_bytes);
1251  } else if (AddFreshPage()) {
1252  // Switched to new page. Try allocating again.
1253  int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1255  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1256  top_on_previous_step_ = to_space_.page_low();
1257  return AllocateRaw(size_in_bytes);
1258  } else {
1259  return Failure::RetryAfterGC();
1260  }
1261 }
1262 
1263 
1264 #ifdef VERIFY_HEAP
1265 // We do not use the SemiSpaceIterator because verification doesn't assume
1266 // that it works (it depends on the invariants we are checking).
1267 void NewSpace::Verify() {
1268  // The allocation pointer should be in the space or at the very end.
1269  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1270 
1271  // There should be objects packed in from the low address up to the
1272  // allocation pointer.
1273  Address current = to_space_.first_page()->area_start();
1274  CHECK_EQ(current, to_space_.space_start());
1275 
1276  while (current != top()) {
1277  if (!NewSpacePage::IsAtEnd(current)) {
1278  // The allocation pointer should not be in the middle of an object.
1279  CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1280  current < top());
1281 
1282  HeapObject* object = HeapObject::FromAddress(current);
1283 
1284  // The first word should be a map, and we expect all map pointers to
1285  // be in map space.
1286  Map* map = object->map();
1287  CHECK(map->IsMap());
1288  CHECK(heap()->map_space()->Contains(map));
1289 
1290  // The object should not be code or a map.
1291  CHECK(!object->IsMap());
1292  CHECK(!object->IsCode());
1293 
1294  // The object itself should look OK.
1295  object->Verify();
1296 
1297  // All the interior pointers should be contained in the heap.
1298  VerifyPointersVisitor visitor;
1299  int size = object->Size();
1300  object->IterateBody(map->instance_type(), size, &visitor);
1301 
1302  current += size;
1303  } else {
1304  // At end of page, switch to next page.
1305  NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1306  // Next page should be valid.
1307  CHECK(!page->is_anchor());
1308  current = page->area_start();
1309  }
1310  }
1311 
1312  // Check semi-spaces.
1313  CHECK_EQ(from_space_.id(), kFromSpace);
1314  CHECK_EQ(to_space_.id(), kToSpace);
1315  from_space_.Verify();
1316  to_space_.Verify();
1317 }
1318 #endif
1319 
1320 // -----------------------------------------------------------------------------
1321 // SemiSpace implementation
1322 
1324  int initial_capacity,
1325  int maximum_capacity) {
1326  // Creates a space in the young generation. The constructor does not
1327  // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1328  // memory of size 'capacity' when set up, and does not grow or shrink
1329  // otherwise. In the mark-compact collector, the memory region of the from
1330  // space is used as the marking stack. It requires contiguous memory
1331  // addresses.
1332  ASSERT(maximum_capacity >= Page::kPageSize);
1333  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1334  capacity_ = initial_capacity;
1335  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1336  committed_ = false;
1337  start_ = start;
1338  address_mask_ = ~(maximum_capacity - 1);
1339  object_mask_ = address_mask_ | kHeapObjectTagMask;
1340  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1341  age_mark_ = start_;
1342 }
1343 
1344 
1346  start_ = NULL;
1347  capacity_ = 0;
1348 }
1349 
1350 
1352  ASSERT(!is_committed());
1353  int pages = capacity_ / Page::kPageSize;
1354  Address end = start_ + maximum_capacity_;
1355  Address start = end - pages * Page::kPageSize;
1356  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1357  capacity_,
1358  executable())) {
1359  return false;
1360  }
1361 
1362  NewSpacePage* page = anchor();
1363  for (int i = 1; i <= pages; i++) {
1364  NewSpacePage* new_page =
1365  NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1366  new_page->InsertAfter(page);
1367  page = new_page;
1368  }
1369 
1370  committed_ = true;
1371  Reset();
1372  return true;
1373 }
1374 
1375 
1377  ASSERT(is_committed());
1378  Address start = start_ + maximum_capacity_ - capacity_;
1379  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1380  return false;
1381  }
1382  anchor()->set_next_page(anchor());
1383  anchor()->set_prev_page(anchor());
1384 
1385  committed_ = false;
1386  return true;
1387 }
1388 
1389 
1390 bool SemiSpace::GrowTo(int new_capacity) {
1391  if (!is_committed()) {
1392  if (!Commit()) return false;
1393  }
1394  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1395  ASSERT(new_capacity <= maximum_capacity_);
1396  ASSERT(new_capacity > capacity_);
1397  int pages_before = capacity_ / Page::kPageSize;
1398  int pages_after = new_capacity / Page::kPageSize;
1399 
1400  Address end = start_ + maximum_capacity_;
1401  Address start = end - new_capacity;
1402  size_t delta = new_capacity - capacity_;
1403 
1405  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1406  start, delta, executable())) {
1407  return false;
1408  }
1409  capacity_ = new_capacity;
1410  NewSpacePage* last_page = anchor()->prev_page();
1411  ASSERT(last_page != anchor());
1412  for (int i = pages_before + 1; i <= pages_after; i++) {
1413  Address page_address = end - i * Page::kPageSize;
1414  NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1415  page_address,
1416  this);
1417  new_page->InsertAfter(last_page);
1418  Bitmap::Clear(new_page);
1419  // Duplicate the flags that was set on the old page.
1420  new_page->SetFlags(last_page->GetFlags(),
1422  last_page = new_page;
1423  }
1424  return true;
1425 }
1426 
1427 
1428 bool SemiSpace::ShrinkTo(int new_capacity) {
1429  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1430  ASSERT(new_capacity >= initial_capacity_);
1431  ASSERT(new_capacity < capacity_);
1432  if (is_committed()) {
1433  // Semispaces grow backwards from the end of their allocated capacity,
1434  // so we find the before and after start addresses relative to the
1435  // end of the space.
1436  Address space_end = start_ + maximum_capacity_;
1437  Address old_start = space_end - capacity_;
1438  size_t delta = capacity_ - new_capacity;
1440 
1441  MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1442  if (!allocator->UncommitBlock(old_start, delta)) {
1443  return false;
1444  }
1445 
1446  int pages_after = new_capacity / Page::kPageSize;
1447  NewSpacePage* new_last_page =
1448  NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1449  new_last_page->set_next_page(anchor());
1450  anchor()->set_prev_page(new_last_page);
1451  ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1452  }
1453 
1454  capacity_ = new_capacity;
1455 
1456  return true;
1457 }
1458 
1459 
1460 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1461  anchor_.set_owner(this);
1462  // Fixup back-pointers to anchor. Address of anchor changes
1463  // when we swap.
1464  anchor_.prev_page()->set_next_page(&anchor_);
1465  anchor_.next_page()->set_prev_page(&anchor_);
1466 
1467  bool becomes_to_space = (id_ == kFromSpace);
1468  id_ = becomes_to_space ? kToSpace : kFromSpace;
1469  NewSpacePage* page = anchor_.next_page();
1470  while (page != &anchor_) {
1471  page->set_owner(this);
1472  page->SetFlags(flags, mask);
1473  if (becomes_to_space) {
1474  page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1475  page->SetFlag(MemoryChunk::IN_TO_SPACE);
1477  page->ResetLiveBytes();
1478  } else {
1479  page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1480  page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1481  }
1483  ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1484  page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1485  page = page->next_page();
1486  }
1487 }
1488 
1489 
1491  ASSERT(anchor_.next_page() != &anchor_);
1492  current_page_ = anchor_.next_page();
1493 }
1494 
1495 
1497  // We won't be swapping semispaces without data in them.
1498  ASSERT(from->anchor_.next_page() != &from->anchor_);
1499  ASSERT(to->anchor_.next_page() != &to->anchor_);
1500 
1501  // Swap bits.
1502  SemiSpace tmp = *from;
1503  *from = *to;
1504  *to = tmp;
1505 
1506  // Fixup back-pointers to the page list anchor now that its address
1507  // has changed.
1508  // Swap to/from-space bits on pages.
1509  // Copy GC flags from old active space (from-space) to new (to-space).
1510  intptr_t flags = from->current_page()->GetFlags();
1511  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1512 
1513  from->FlipPages(0, 0);
1514 }
1515 
1516 
1518  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1519  age_mark_ = mark;
1520  // Mark all pages up to the one containing mark.
1521  NewSpacePageIterator it(space_start(), mark);
1522  while (it.has_next()) {
1523  it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1524  }
1525 }
1526 
1527 
1528 #ifdef DEBUG
1529 void SemiSpace::Print() { }
1530 #endif
1531 
1532 #ifdef VERIFY_HEAP
1533 void SemiSpace::Verify() {
1534  bool is_from_space = (id_ == kFromSpace);
1535  NewSpacePage* page = anchor_.next_page();
1536  CHECK(anchor_.semi_space() == this);
1537  while (page != &anchor_) {
1538  CHECK(page->semi_space() == this);
1539  CHECK(page->InNewSpace());
1540  CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1541  : MemoryChunk::IN_TO_SPACE));
1542  CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1543  : MemoryChunk::IN_FROM_SPACE));
1545  if (!is_from_space) {
1546  // The pointers-from-here-are-interesting flag isn't updated dynamically
1547  // on from-space pages, so it might be out of sync with the marking state.
1548  if (page->heap()->incremental_marking()->IsMarking()) {
1550  } else {
1551  CHECK(!page->IsFlagSet(
1553  }
1554  // TODO(gc): Check that the live_bytes_count_ field matches the
1555  // black marking on the page (if we make it match in new-space).
1556  }
1557  CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1558  CHECK(page->prev_page()->next_page() == page);
1559  page = page->next_page();
1560  }
1561 }
1562 #endif
1563 
1564 #ifdef DEBUG
1565 void SemiSpace::AssertValidRange(Address start, Address end) {
1566  // Addresses belong to same semi-space
1567  NewSpacePage* page = NewSpacePage::FromLimit(start);
1568  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1569  SemiSpace* space = page->semi_space();
1570  CHECK_EQ(space, end_page->semi_space());
1571  // Start address is before end address, either on same page,
1572  // or end address is on a later page in the linked list of
1573  // semi-space pages.
1574  if (page == end_page) {
1575  CHECK(start <= end);
1576  } else {
1577  while (page != end_page) {
1578  page = page->next_page();
1579  CHECK_NE(page, space->anchor());
1580  }
1581  }
1582 }
1583 #endif
1584 
1585 
1586 // -----------------------------------------------------------------------------
1587 // SemiSpaceIterator implementation.
1589  Initialize(space->bottom(), space->top(), NULL);
1590 }
1591 
1592 
1594  HeapObjectCallback size_func) {
1595  Initialize(space->bottom(), space->top(), size_func);
1596 }
1597 
1598 
1600  Initialize(start, space->top(), NULL);
1601 }
1602 
1603 
1605  Initialize(from, to, NULL);
1606 }
1607 
1608 
1609 void SemiSpaceIterator::Initialize(Address start,
1610  Address end,
1611  HeapObjectCallback size_func) {
1612  SemiSpace::AssertValidRange(start, end);
1613  current_ = start;
1614  limit_ = end;
1615  size_func_ = size_func;
1616 }
1617 
1618 
1619 #ifdef DEBUG
1620 // heap_histograms is shared, always clear it before using it.
1621 static void ClearHistograms() {
1622  Isolate* isolate = Isolate::Current();
1623  // We reset the name each time, though it hasn't changed.
1624 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1625  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1626 #undef DEF_TYPE_NAME
1627 
1628 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1629  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1630 #undef CLEAR_HISTOGRAM
1631 
1632  isolate->js_spill_information()->Clear();
1633 }
1634 
1635 
1636 static void ClearCodeKindStatistics() {
1637  Isolate* isolate = Isolate::Current();
1638  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1639  isolate->code_kind_statistics()[i] = 0;
1640  }
1641 }
1642 
1643 
1644 static void ReportCodeKindStatistics() {
1645  Isolate* isolate = Isolate::Current();
1646  const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1647 
1648 #define CASE(name) \
1649  case Code::name: table[Code::name] = #name; \
1650  break
1651 
1652  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1653  switch (static_cast<Code::Kind>(i)) {
1654  CASE(FUNCTION);
1655  CASE(OPTIMIZED_FUNCTION);
1656  CASE(STUB);
1657  CASE(BUILTIN);
1658  CASE(LOAD_IC);
1659  CASE(KEYED_LOAD_IC);
1660  CASE(STORE_IC);
1661  CASE(KEYED_STORE_IC);
1662  CASE(CALL_IC);
1663  CASE(KEYED_CALL_IC);
1664  CASE(UNARY_OP_IC);
1665  CASE(BINARY_OP_IC);
1666  CASE(COMPARE_IC);
1667  CASE(TO_BOOLEAN_IC);
1668  }
1669  }
1670 
1671 #undef CASE
1672 
1673  PrintF("\n Code kind histograms: \n");
1674  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1675  if (isolate->code_kind_statistics()[i] > 0) {
1676  PrintF(" %-20s: %10d bytes\n", table[i],
1677  isolate->code_kind_statistics()[i]);
1678  }
1679  }
1680  PrintF("\n");
1681 }
1682 
1683 
1684 static int CollectHistogramInfo(HeapObject* obj) {
1685  Isolate* isolate = Isolate::Current();
1686  InstanceType type = obj->map()->instance_type();
1687  ASSERT(0 <= type && type <= LAST_TYPE);
1688  ASSERT(isolate->heap_histograms()[type].name() != NULL);
1689  isolate->heap_histograms()[type].increment_number(1);
1690  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1691 
1692  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1693  JSObject::cast(obj)->IncrementSpillStatistics(
1694  isolate->js_spill_information());
1695  }
1696 
1697  return obj->Size();
1698 }
1699 
1700 
1701 static void ReportHistogram(bool print_spill) {
1702  Isolate* isolate = Isolate::Current();
1703  PrintF("\n Object Histogram:\n");
1704  for (int i = 0; i <= LAST_TYPE; i++) {
1705  if (isolate->heap_histograms()[i].number() > 0) {
1706  PrintF(" %-34s%10d (%10d bytes)\n",
1707  isolate->heap_histograms()[i].name(),
1708  isolate->heap_histograms()[i].number(),
1709  isolate->heap_histograms()[i].bytes());
1710  }
1711  }
1712  PrintF("\n");
1713 
1714  // Summarize string types.
1715  int string_number = 0;
1716  int string_bytes = 0;
1717 #define INCREMENT(type, size, name, camel_name) \
1718  string_number += isolate->heap_histograms()[type].number(); \
1719  string_bytes += isolate->heap_histograms()[type].bytes();
1721 #undef INCREMENT
1722  if (string_number > 0) {
1723  PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1724  string_bytes);
1725  }
1726 
1727  if (FLAG_collect_heap_spill_statistics && print_spill) {
1728  isolate->js_spill_information()->Print();
1729  }
1730 }
1731 #endif // DEBUG
1732 
1733 
1734 // Support for statistics gathering for --heap-stats and --log-gc.
1736  for (int i = 0; i <= LAST_TYPE; i++) {
1737  allocated_histogram_[i].clear();
1738  promoted_histogram_[i].clear();
1739  }
1740 }
1741 
1742 // Because the copying collector does not touch garbage objects, we iterate
1743 // the new space before a collection to get a histogram of allocated objects.
1744 // This only happens when --log-gc flag is set.
1746  ClearHistograms();
1747  SemiSpaceIterator it(this);
1748  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1749  RecordAllocation(obj);
1750 }
1751 
1752 
1753 static void DoReportStatistics(Isolate* isolate,
1754  HistogramInfo* info, const char* description) {
1755  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1756  // Lump all the string types together.
1757  int string_number = 0;
1758  int string_bytes = 0;
1759 #define INCREMENT(type, size, name, camel_name) \
1760  string_number += info[type].number(); \
1761  string_bytes += info[type].bytes();
1763 #undef INCREMENT
1764  if (string_number > 0) {
1765  LOG(isolate,
1766  HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1767  }
1768 
1769  // Then do the other types.
1770  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1771  if (info[i].number() > 0) {
1772  LOG(isolate,
1773  HeapSampleItemEvent(info[i].name(), info[i].number(),
1774  info[i].bytes()));
1775  }
1776  }
1777  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1778 }
1779 
1780 
1782 #ifdef DEBUG
1783  if (FLAG_heap_stats) {
1784  float pct = static_cast<float>(Available()) / Capacity();
1785  PrintF(" capacity: %" V8_PTR_PREFIX "d"
1786  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1787  Capacity(), Available(), static_cast<int>(pct*100));
1788  PrintF("\n Object Histogram:\n");
1789  for (int i = 0; i <= LAST_TYPE; i++) {
1790  if (allocated_histogram_[i].number() > 0) {
1791  PrintF(" %-34s%10d (%10d bytes)\n",
1792  allocated_histogram_[i].name(),
1793  allocated_histogram_[i].number(),
1794  allocated_histogram_[i].bytes());
1795  }
1796  }
1797  PrintF("\n");
1798  }
1799 #endif // DEBUG
1800 
1801  if (FLAG_log_gc) {
1802  Isolate* isolate = ISOLATE;
1803  DoReportStatistics(isolate, allocated_histogram_, "allocated");
1804  DoReportStatistics(isolate, promoted_histogram_, "promoted");
1805  }
1806 }
1807 
1808 
1810  InstanceType type = obj->map()->instance_type();
1811  ASSERT(0 <= type && type <= LAST_TYPE);
1812  allocated_histogram_[type].increment_number(1);
1813  allocated_histogram_[type].increment_bytes(obj->Size());
1814 }
1815 
1816 
1818  InstanceType type = obj->map()->instance_type();
1819  ASSERT(0 <= type && type <= LAST_TYPE);
1820  promoted_histogram_[type].increment_number(1);
1821  promoted_histogram_[type].increment_bytes(obj->Size());
1822 }
1823 
1824 // -----------------------------------------------------------------------------
1825 // Free lists for old object spaces implementation
1826 
1827 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1828  ASSERT(size_in_bytes > 0);
1829  ASSERT(IsAligned(size_in_bytes, kPointerSize));
1830 
1831  // We write a map and possibly size information to the block. If the block
1832  // is big enough to be a FreeSpace with at least one extra word (the next
1833  // pointer), we set its map to be the free space map and its size to an
1834  // appropriate array length for the desired size from HeapObject::Size().
1835  // If the block is too small (eg, one or two words), to hold both a size
1836  // field and a next pointer, we give it a filler map that gives it the
1837  // correct size.
1838  if (size_in_bytes > FreeSpace::kHeaderSize) {
1839  set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1840  // Can't use FreeSpace::cast because it fails during deserialization.
1841  FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1842  this_as_free_space->set_size(size_in_bytes);
1843  } else if (size_in_bytes == kPointerSize) {
1844  set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1845  } else if (size_in_bytes == 2 * kPointerSize) {
1846  set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1847  } else {
1848  UNREACHABLE();
1849  }
1850  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1851  // deserialization because the free space map is not done yet.
1852 }
1853 
1854 
1856  ASSERT(IsFreeListNode(this));
1857  if (map() == HEAP->raw_unchecked_free_space_map()) {
1858  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1859  return reinterpret_cast<FreeListNode*>(
1860  Memory::Address_at(address() + kNextOffset));
1861  } else {
1862  return reinterpret_cast<FreeListNode*>(
1864  }
1865 }
1866 
1867 
1869  ASSERT(IsFreeListNode(this));
1870  if (map() == HEAP->raw_unchecked_free_space_map()) {
1871  ASSERT(Size() >= kNextOffset + kPointerSize);
1872  return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1873  } else {
1874  return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1875  }
1876 }
1877 
1878 
1880  ASSERT(IsFreeListNode(this));
1881  // While we are booting the VM the free space map will actually be null. So
1882  // we have to make sure that we don't try to use it for anything at that
1883  // stage.
1884  if (map() == HEAP->raw_unchecked_free_space_map()) {
1885  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1886  Memory::Address_at(address() + kNextOffset) =
1887  reinterpret_cast<Address>(next);
1888  } else {
1890  reinterpret_cast<Address>(next);
1891  }
1892 }
1893 
1894 
1895 FreeList::FreeList(PagedSpace* owner)
1896  : owner_(owner), heap_(owner->heap()) {
1897  Reset();
1898 }
1899 
1900 
1901 void FreeList::Reset() {
1902  available_ = 0;
1903  small_list_ = NULL;
1904  medium_list_ = NULL;
1905  large_list_ = NULL;
1906  huge_list_ = NULL;
1907 }
1908 
1909 
1910 int FreeList::Free(Address start, int size_in_bytes) {
1911  if (size_in_bytes == 0) return 0;
1912  FreeListNode* node = FreeListNode::FromAddress(start);
1913  node->set_size(heap_, size_in_bytes);
1914 
1915  // Early return to drop too-small blocks on the floor.
1916  if (size_in_bytes < kSmallListMin) return size_in_bytes;
1917 
1918  // Insert other blocks at the head of a free list of the appropriate
1919  // magnitude.
1920  if (size_in_bytes <= kSmallListMax) {
1921  node->set_next(small_list_);
1922  small_list_ = node;
1923  } else if (size_in_bytes <= kMediumListMax) {
1924  node->set_next(medium_list_);
1925  medium_list_ = node;
1926  } else if (size_in_bytes <= kLargeListMax) {
1927  node->set_next(large_list_);
1928  large_list_ = node;
1929  } else {
1930  node->set_next(huge_list_);
1931  huge_list_ = node;
1932  }
1933  available_ += size_in_bytes;
1934  ASSERT(IsVeryLong() || available_ == SumFreeLists());
1935  return 0;
1936 }
1937 
1938 
1939 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1940  FreeListNode* node = *list;
1941 
1942  if (node == NULL) return NULL;
1943 
1944  while (node != NULL &&
1945  Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1946  available_ -= node->Size();
1947  node = node->next();
1948  }
1949 
1950  if (node != NULL) {
1951  *node_size = node->Size();
1952  *list = node->next();
1953  } else {
1954  *list = NULL;
1955  }
1956 
1957  return node;
1958 }
1959 
1960 
1961 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1962  FreeListNode* node = NULL;
1963 
1964  if (size_in_bytes <= kSmallAllocationMax) {
1965  node = PickNodeFromList(&small_list_, node_size);
1966  if (node != NULL) return node;
1967  }
1968 
1969  if (size_in_bytes <= kMediumAllocationMax) {
1970  node = PickNodeFromList(&medium_list_, node_size);
1971  if (node != NULL) return node;
1972  }
1973 
1974  if (size_in_bytes <= kLargeAllocationMax) {
1975  node = PickNodeFromList(&large_list_, node_size);
1976  if (node != NULL) return node;
1977  }
1978 
1979  for (FreeListNode** cur = &huge_list_;
1980  *cur != NULL;
1981  cur = (*cur)->next_address()) {
1982  FreeListNode* cur_node = *cur;
1983  while (cur_node != NULL &&
1984  Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1985  available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1986  cur_node = cur_node->next();
1987  }
1988 
1989  *cur = cur_node;
1990  if (cur_node == NULL) break;
1991 
1992  ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1993  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1994  int size = cur_as_free_space->Size();
1995  if (size >= size_in_bytes) {
1996  // Large enough node found. Unlink it from the list.
1997  node = *cur;
1998  *node_size = size;
1999  *cur = node->next();
2000  break;
2001  }
2002  }
2003 
2004  return node;
2005 }
2006 
2007 
2008 // Allocation on the old space free list. If it succeeds then a new linear
2009 // allocation space has been set up with the top and limit of the space. If
2010 // the allocation fails then NULL is returned, and the caller can perform a GC
2011 // or allocate a new page before retrying.
2012 HeapObject* FreeList::Allocate(int size_in_bytes) {
2013  ASSERT(0 < size_in_bytes);
2014  ASSERT(size_in_bytes <= kMaxBlockSize);
2015  ASSERT(IsAligned(size_in_bytes, kPointerSize));
2016  // Don't free list allocate if there is linear space available.
2017  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2018 
2019  int new_node_size = 0;
2020  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2021  if (new_node == NULL) return NULL;
2022 
2023  available_ -= new_node_size;
2024  ASSERT(IsVeryLong() || available_ == SumFreeLists());
2025 
2026  int bytes_left = new_node_size - size_in_bytes;
2027  ASSERT(bytes_left >= 0);
2028 
2029  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2030  // Mark the old linear allocation area with a free space map so it can be
2031  // skipped when scanning the heap. This also puts it back in the free list
2032  // if it is big enough.
2033  owner_->Free(owner_->top(), old_linear_size);
2034 
2035  owner_->heap()->incremental_marking()->OldSpaceStep(
2036  size_in_bytes - old_linear_size);
2037 
2038 #ifdef DEBUG
2039  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2040  reinterpret_cast<Object**>(new_node->address())[i] =
2042  }
2043 #endif
2044 
2045  // The old-space-step might have finished sweeping and restarted marking.
2046  // Verify that it did not turn the page of the new node into an evacuation
2047  // candidate.
2048  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2049 
2050  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2051 
2052  // Memory in the linear allocation area is counted as allocated. We may free
2053  // a little of this again immediately - see below.
2054  owner_->Allocate(new_node_size);
2055 
2056  if (bytes_left > kThreshold &&
2057  owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2058  FLAG_incremental_marking_steps) {
2059  int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2060  // We don't want to give too large linear areas to the allocator while
2061  // incremental marking is going on, because we won't check again whether
2062  // we want to do another increment until the linear area is used up.
2063  owner_->Free(new_node->address() + size_in_bytes + linear_size,
2064  new_node_size - size_in_bytes - linear_size);
2065  owner_->SetTop(new_node->address() + size_in_bytes,
2066  new_node->address() + size_in_bytes + linear_size);
2067  } else if (bytes_left > 0) {
2068  // Normally we give the rest of the node to the allocator as its new
2069  // linear allocation area.
2070  owner_->SetTop(new_node->address() + size_in_bytes,
2071  new_node->address() + new_node_size);
2072  } else {
2073  // TODO(gc) Try not freeing linear allocation region when bytes_left
2074  // are zero.
2075  owner_->SetTop(NULL, NULL);
2076  }
2077 
2078  return new_node;
2079 }
2080 
2081 
2082 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2083  intptr_t sum = 0;
2084  while (n != NULL) {
2085  if (Page::FromAddress(n->address()) == p) {
2086  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2087  sum += free_space->Size();
2088  }
2089  n = n->next();
2090  }
2091  return sum;
2092 }
2093 
2094 
2095 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2096  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2097  if (sizes->huge_size_ < p->area_size()) {
2098  sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2099  sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2100  sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2101  } else {
2102  sizes->small_size_ = 0;
2103  sizes->medium_size_ = 0;
2104  sizes->large_size_ = 0;
2105  }
2106 }
2107 
2108 
2109 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2110  intptr_t sum = 0;
2111  while (*n != NULL) {
2112  if (Page::FromAddress((*n)->address()) == p) {
2113  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2114  sum += free_space->Size();
2115  *n = (*n)->next();
2116  } else {
2117  n = (*n)->next_address();
2118  }
2119  }
2120  return sum;
2121 }
2122 
2123 
2124 intptr_t FreeList::EvictFreeListItems(Page* p) {
2125  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2126 
2127  if (sum < p->area_size()) {
2128  sum += EvictFreeListItemsInList(&small_list_, p) +
2129  EvictFreeListItemsInList(&medium_list_, p) +
2130  EvictFreeListItemsInList(&large_list_, p);
2131  }
2132 
2133  available_ -= static_cast<int>(sum);
2134 
2135  return sum;
2136 }
2137 
2138 
2139 #ifdef DEBUG
2140 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2141  intptr_t sum = 0;
2142  while (cur != NULL) {
2143  ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2144  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2145  sum += cur_as_free_space->Size();
2146  cur = cur->next();
2147  }
2148  return sum;
2149 }
2150 
2151 
2152 static const int kVeryLongFreeList = 500;
2153 
2154 
2155 int FreeList::FreeListLength(FreeListNode* cur) {
2156  int length = 0;
2157  while (cur != NULL) {
2158  length++;
2159  cur = cur->next();
2160  if (length == kVeryLongFreeList) return length;
2161  }
2162  return length;
2163 }
2164 
2165 
2166 bool FreeList::IsVeryLong() {
2167  if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2168  if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2169  if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2170  if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2171  return false;
2172 }
2173 
2174 
2175 // This can take a very long time because it is linear in the number of entries
2176 // on the free list, so it should not be called if FreeListLength returns
2177 // kVeryLongFreeList.
2178 intptr_t FreeList::SumFreeLists() {
2179  intptr_t sum = SumFreeList(small_list_);
2180  sum += SumFreeList(medium_list_);
2181  sum += SumFreeList(large_list_);
2182  sum += SumFreeList(huge_list_);
2183  return sum;
2184 }
2185 #endif
2186 
2187 
2188 // -----------------------------------------------------------------------------
2189 // OldSpace implementation
2190 
2191 bool NewSpace::ReserveSpace(int bytes) {
2192  // We can't reliably unpack a partial snapshot that needs more new space
2193  // space than the minimum NewSpace size. The limit can be set lower than
2194  // the end of new space either because there is more space on the next page
2195  // or because we have lowered the limit in order to get periodic incremental
2196  // marking. The most reliable way to ensure that there is linear space is
2197  // to do the allocation, then rewind the limit.
2198  ASSERT(bytes <= InitialCapacity());
2199  MaybeObject* maybe = AllocateRaw(bytes);
2200  Object* object = NULL;
2201  if (!maybe->ToObject(&object)) return false;
2202  HeapObject* allocation = HeapObject::cast(object);
2203  Address top = allocation_info_.top;
2204  if ((top - bytes) == allocation->address()) {
2205  allocation_info_.top = allocation->address();
2206  return true;
2207  }
2208  // There may be a borderline case here where the allocation succeeded, but
2209  // the limit and top have moved on to a new page. In that case we try again.
2210  return ReserveSpace(bytes);
2211 }
2212 
2213 
2215  // We don't have a linear allocation area while sweeping. It will be restored
2216  // on the first allocation after the sweep.
2217  // Mark the old linear allocation area with a free space map so it can be
2218  // skipped when scanning the heap.
2219  int old_linear_size = static_cast<int>(limit() - top());
2220  Free(top(), old_linear_size);
2221  SetTop(NULL, NULL);
2222 
2223  // Stop lazy sweeping and clear marking bits for unswept pages.
2224  if (first_unswept_page_ != NULL) {
2226  do {
2227  // Do not use ShouldBeSweptLazily predicate here.
2228  // New evacuation candidates were selected but they still have
2229  // to be swept before collection starts.
2230  if (!p->WasSwept()) {
2231  Bitmap::Clear(p);
2232  if (FLAG_gc_verbose) {
2233  PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2234  reinterpret_cast<intptr_t>(p));
2235  }
2236  }
2237  p = p->next_page();
2238  } while (p != anchor());
2239  }
2241  unswept_free_bytes_ = 0;
2242 
2243  // Clear the free list before a full GC---it will be rebuilt afterward.
2244  free_list_.Reset();
2245 }
2246 
2247 
2248 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2249  ASSERT(size_in_bytes <= AreaSize());
2250  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2251  Address current_top = allocation_info_.top;
2252  Address new_top = current_top + size_in_bytes;
2253  if (new_top <= allocation_info_.limit) return true;
2254 
2255  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2256  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2257  if (new_area == NULL) return false;
2258 
2259  int old_linear_size = static_cast<int>(limit() - top());
2260  // Mark the old linear allocation area with a free space so it can be
2261  // skipped when scanning the heap. This also puts it back in the free list
2262  // if it is big enough.
2263  Free(top(), old_linear_size);
2264 
2265  SetTop(new_area->address(), new_area->address() + size_in_bytes);
2266  return true;
2267 }
2268 
2269 
2270 static void RepairFreeList(Heap* heap, FreeListNode* n) {
2271  while (n != NULL) {
2272  Map** map_location = reinterpret_cast<Map**>(n->address());
2273  if (*map_location == NULL) {
2274  *map_location = heap->free_space_map();
2275  } else {
2276  ASSERT(*map_location == heap->free_space_map());
2277  }
2278  n = n->next();
2279  }
2280 }
2281 
2282 
2283 void FreeList::RepairLists(Heap* heap) {
2284  RepairFreeList(heap, small_list_);
2285  RepairFreeList(heap, medium_list_);
2286  RepairFreeList(heap, large_list_);
2287  RepairFreeList(heap, huge_list_);
2288 }
2289 
2290 
2291 // After we have booted, we have created a map which represents free space
2292 // on the heap. If there was already a free list then the elements on it
2293 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2294 // fix them.
2296  free_list_.RepairLists(heap());
2297 }
2298 
2299 
2300 // You have to call this last, since the implementation from PagedSpace
2301 // doesn't know that memory was 'promised' to large object space.
2303  return heap()->OldGenerationCapacityAvailable() >= bytes &&
2304  (!heap()->incremental_marking()->IsStopped() ||
2305  heap()->OldGenerationSpaceAvailable() >= bytes);
2306 }
2307 
2308 
2309 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2310  if (IsSweepingComplete()) return true;
2311 
2312  intptr_t freed_bytes = 0;
2314  do {
2315  Page* next_page = p->next_page();
2316  if (ShouldBeSweptLazily(p)) {
2317  if (FLAG_gc_verbose) {
2318  PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2319  reinterpret_cast<intptr_t>(p));
2320  }
2322  freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2323  }
2324  p = next_page;
2325  } while (p != anchor() && freed_bytes < bytes_to_sweep);
2326 
2327  if (p == anchor()) {
2329  } else {
2330  first_unswept_page_ = p;
2331  }
2332 
2333  heap()->FreeQueuedChunks();
2334 
2335  return IsSweepingComplete();
2336 }
2337 
2338 
2340  if (allocation_info_.top >= allocation_info_.limit) return;
2341 
2342  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2343  // Create filler object to keep page iterable if it was iterable.
2344  int remaining =
2345  static_cast<int>(allocation_info_.limit - allocation_info_.top);
2347 
2350  }
2351 }
2352 
2353 
2355  // Allocation in this space has failed.
2356 
2357  // If there are unswept pages advance lazy sweeper then sweep one page before
2358  // allocating a new page.
2359  if (first_unswept_page_->is_valid()) {
2360  AdvanceSweeper(size_in_bytes);
2361 
2362  // Retry the free list allocation.
2363  HeapObject* object = free_list_.Allocate(size_in_bytes);
2364  if (object != NULL) return object;
2365  }
2366 
2367  // Free list allocation failed and there is no next page. Fail if we have
2368  // hit the old generation size limit that should cause a garbage
2369  // collection.
2370  if (!heap()->always_allocate() &&
2371  heap()->OldGenerationAllocationLimitReached()) {
2372  return NULL;
2373  }
2374 
2375  // Try to expand the space and allocate in the new next page.
2376  if (Expand()) {
2377  return free_list_.Allocate(size_in_bytes);
2378  }
2379 
2380  // Last ditch, sweep all the remaining pages to try to find space. This may
2381  // cause a pause.
2382  if (!IsSweepingComplete()) {
2384 
2385  // Retry the free list allocation.
2386  HeapObject* object = free_list_.Allocate(size_in_bytes);
2387  if (object != NULL) return object;
2388  }
2389 
2390  // Finally, fail.
2391  return NULL;
2392 }
2393 
2394 
2395 #ifdef DEBUG
2396 void PagedSpace::ReportCodeStatistics() {
2397  Isolate* isolate = Isolate::Current();
2398  CommentStatistic* comments_statistics =
2399  isolate->paged_space_comments_statistics();
2400  ReportCodeKindStatistics();
2401  PrintF("Code comment statistics (\" [ comment-txt : size/ "
2402  "count (average)\"):\n");
2403  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2404  const CommentStatistic& cs = comments_statistics[i];
2405  if (cs.size > 0) {
2406  PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2407  cs.size/cs.count);
2408  }
2409  }
2410  PrintF("\n");
2411 }
2412 
2413 
2414 void PagedSpace::ResetCodeStatistics() {
2415  Isolate* isolate = Isolate::Current();
2416  CommentStatistic* comments_statistics =
2417  isolate->paged_space_comments_statistics();
2418  ClearCodeKindStatistics();
2419  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2420  comments_statistics[i].Clear();
2421  }
2422  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2423  comments_statistics[CommentStatistic::kMaxComments].size = 0;
2424  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2425 }
2426 
2427 
2428 // Adds comment to 'comment_statistics' table. Performance OK as long as
2429 // 'kMaxComments' is small
2430 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2431  CommentStatistic* comments_statistics =
2432  isolate->paged_space_comments_statistics();
2433  // Do not count empty comments
2434  if (delta <= 0) return;
2435  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2436  // Search for a free or matching entry in 'comments_statistics': 'cs'
2437  // points to result.
2438  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2439  if (comments_statistics[i].comment == NULL) {
2440  cs = &comments_statistics[i];
2441  cs->comment = comment;
2442  break;
2443  } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2444  cs = &comments_statistics[i];
2445  break;
2446  }
2447  }
2448  // Update entry for 'comment'
2449  cs->size += delta;
2450  cs->count += 1;
2451 }
2452 
2453 
2454 // Call for each nested comment start (start marked with '[ xxx', end marked
2455 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2456 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2457  ASSERT(!it->done());
2458  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2459  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2460  if (tmp[0] != '[') {
2461  // Not a nested comment; skip
2462  return;
2463  }
2464 
2465  // Search for end of nested comment or a new nested comment
2466  const char* const comment_txt =
2467  reinterpret_cast<const char*>(it->rinfo()->data());
2468  const byte* prev_pc = it->rinfo()->pc();
2469  int flat_delta = 0;
2470  it->next();
2471  while (true) {
2472  // All nested comments must be terminated properly, and therefore exit
2473  // from loop.
2474  ASSERT(!it->done());
2475  if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2476  const char* const txt =
2477  reinterpret_cast<const char*>(it->rinfo()->data());
2478  flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2479  if (txt[0] == ']') break; // End of nested comment
2480  // A new comment
2481  CollectCommentStatistics(isolate, it);
2482  // Skip code that was covered with previous comment
2483  prev_pc = it->rinfo()->pc();
2484  }
2485  it->next();
2486  }
2487  EnterComment(isolate, comment_txt, flat_delta);
2488 }
2489 
2490 
2491 // Collects code size statistics:
2492 // - by code kind
2493 // - by code comment
2494 void PagedSpace::CollectCodeStatistics() {
2495  Isolate* isolate = heap()->isolate();
2496  HeapObjectIterator obj_it(this);
2497  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2498  if (obj->IsCode()) {
2499  Code* code = Code::cast(obj);
2500  isolate->code_kind_statistics()[code->kind()] += code->Size();
2501  RelocIterator it(code);
2502  int delta = 0;
2503  const byte* prev_pc = code->instruction_start();
2504  while (!it.done()) {
2505  if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2506  delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2507  CollectCommentStatistics(isolate, &it);
2508  prev_pc = it.rinfo()->pc();
2509  }
2510  it.next();
2511  }
2512 
2513  ASSERT(code->instruction_start() <= prev_pc &&
2514  prev_pc <= code->instruction_end());
2515  delta += static_cast<int>(code->instruction_end() - prev_pc);
2516  EnterComment(isolate, "NoComment", delta);
2517  }
2518  }
2519 }
2520 
2521 
2522 void PagedSpace::ReportStatistics() {
2523  int pct = static_cast<int>(Available() * 100 / Capacity());
2524  PrintF(" capacity: %" V8_PTR_PREFIX "d"
2525  ", waste: %" V8_PTR_PREFIX "d"
2526  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2527  Capacity(), Waste(), Available(), pct);
2528 
2529  if (was_swept_conservatively_) return;
2530  ClearHistograms();
2531  HeapObjectIterator obj_it(this);
2532  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2533  CollectHistogramInfo(obj);
2534  ReportHistogram(true);
2535 }
2536 #endif
2537 
2538 // -----------------------------------------------------------------------------
2539 // FixedSpace implementation
2540 
2542  // Call prepare of the super class.
2544 
2545  // During a non-compacting collection, everything below the linear
2546  // allocation pointer except wasted top-of-page blocks is considered
2547  // allocated and we will rediscover available bytes during the
2548  // collection.
2549  accounting_stats_.AllocateBytes(free_list_.available());
2550 
2551  // Clear the free list before a full GC---it will be rebuilt afterward.
2552  free_list_.Reset();
2553 }
2554 
2555 
2556 // -----------------------------------------------------------------------------
2557 // MapSpace implementation
2558 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2559 // there is at least one non-inlined virtual function. I would prefer to hide
2560 // the VerifyObject definition behind VERIFY_HEAP.
2561 
2563  // The object should be a map or a free-list node.
2564  CHECK(object->IsMap() || object->IsFreeSpace());
2565 }
2566 
2567 
2568 // -----------------------------------------------------------------------------
2569 // GlobalPropertyCellSpace implementation
2570 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2571 // there is at least one non-inlined virtual function. I would prefer to hide
2572 // the VerifyObject definition behind VERIFY_HEAP.
2573 
2575  // The object should be a global object property cell or a free-list node.
2576  CHECK(object->IsJSGlobalPropertyCell() ||
2577  object->map() == heap()->two_pointer_filler_map());
2578 }
2579 
2580 
2581 // -----------------------------------------------------------------------------
2582 // LargeObjectIterator
2583 
2585  current_ = space->first_page_;
2586  size_func_ = NULL;
2587 }
2588 
2589 
2591  HeapObjectCallback size_func) {
2592  current_ = space->first_page_;
2593  size_func_ = size_func;
2594 }
2595 
2596 
2598  if (current_ == NULL) return NULL;
2599 
2600  HeapObject* object = current_->GetObject();
2601  current_ = current_->next_page();
2602  return object;
2603 }
2604 
2605 
2606 // -----------------------------------------------------------------------------
2607 // LargeObjectSpace
2608 static bool ComparePointers(void* key1, void* key2) {
2609  return key1 == key2;
2610 }
2611 
2612 
2614  intptr_t max_capacity,
2615  AllocationSpace id)
2616  : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2617  max_capacity_(max_capacity),
2618  first_page_(NULL),
2619  size_(0),
2620  page_count_(0),
2621  objects_size_(0),
2622  chunk_map_(ComparePointers, 1024) {}
2623 
2624 
2626  first_page_ = NULL;
2627  size_ = 0;
2628  page_count_ = 0;
2629  objects_size_ = 0;
2630  chunk_map_.Clear();
2631  return true;
2632 }
2633 
2634 
2636  while (first_page_ != NULL) {
2637  LargePage* page = first_page_;
2638  first_page_ = first_page_->next_page();
2639  LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2640 
2641  ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2643  space, kAllocationActionFree, page->size());
2644  heap()->isolate()->memory_allocator()->Free(page);
2645  }
2646  SetUp();
2647 }
2648 
2649 
2650 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2651  Executability executable) {
2652  // Check if we want to force a GC before growing the old space further.
2653  // If so, fail the allocation.
2654  if (!heap()->always_allocate() &&
2655  heap()->OldGenerationAllocationLimitReached()) {
2656  return Failure::RetryAfterGC(identity());
2657  }
2658 
2659  if (Size() + object_size > max_capacity_) {
2660  return Failure::RetryAfterGC(identity());
2661  }
2662 
2663  LargePage* page = heap()->isolate()->memory_allocator()->
2664  AllocateLargePage(object_size, this, executable);
2665  if (page == NULL) return Failure::RetryAfterGC(identity());
2666  ASSERT(page->area_size() >= object_size);
2667 
2668  size_ += static_cast<int>(page->size());
2669  objects_size_ += object_size;
2670  page_count_++;
2671  page->set_next_page(first_page_);
2672  first_page_ = page;
2673 
2674  // Register all MemoryChunk::kAlignment-aligned chunks covered by
2675  // this large page in the chunk map.
2676  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2677  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2678  for (uintptr_t key = base; key <= limit; key++) {
2679  HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2680  static_cast<uint32_t>(key),
2681  true);
2682  ASSERT(entry != NULL);
2683  entry->value = page;
2684  }
2685 
2686  HeapObject* object = page->GetObject();
2687 
2688  if (Heap::ShouldZapGarbage()) {
2689  // Make the object consistent so the heap can be verified in OldSpaceStep.
2690  // We only need to do this in debug builds or if verify_heap is on.
2691  reinterpret_cast<Object**>(object->address())[0] =
2692  heap()->fixed_array_map();
2693  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2694  }
2695 
2696  heap()->incremental_marking()->OldSpaceStep(object_size);
2697  return object;
2698 }
2699 
2700 
2701 // GC support
2703  LargePage* page = FindPage(a);
2704  if (page != NULL) {
2705  return page->GetObject();
2706  }
2707  return Failure::Exception();
2708 }
2709 
2710 
2712  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2713  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2714  static_cast<uint32_t>(key),
2715  false);
2716  if (e != NULL) {
2717  ASSERT(e->value != NULL);
2718  LargePage* page = reinterpret_cast<LargePage*>(e->value);
2719  ASSERT(page->is_valid());
2720  if (page->Contains(a)) {
2721  return page;
2722  }
2723  }
2724  return NULL;
2725 }
2726 
2727 
2729  LargePage* previous = NULL;
2730  LargePage* current = first_page_;
2731  while (current != NULL) {
2732  HeapObject* object = current->GetObject();
2733  // Can this large page contain pointers to non-trivial objects. No other
2734  // pointer object is this big.
2735  bool is_pointer_object = object->IsFixedArray();
2736  MarkBit mark_bit = Marking::MarkBitFrom(object);
2737  if (mark_bit.Get()) {
2738  mark_bit.Clear();
2739  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2740  previous = current;
2741  current = current->next_page();
2742  } else {
2743  LargePage* page = current;
2744  // Cut the chunk out from the chunk list.
2745  current = current->next_page();
2746  if (previous == NULL) {
2747  first_page_ = current;
2748  } else {
2749  previous->set_next_page(current);
2750  }
2751 
2752  // Free the chunk.
2754  object, heap()->isolate());
2755  size_ -= static_cast<int>(page->size());
2756  objects_size_ -= object->Size();
2757  page_count_--;
2758 
2759  // Remove entries belonging to this page.
2760  // Use variable alignment to help pass length check (<= 80 characters)
2761  // of single line in tools/presubmit.py.
2762  const intptr_t alignment = MemoryChunk::kAlignment;
2763  uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2764  uintptr_t limit = base + (page->size()-1)/alignment;
2765  for (uintptr_t key = base; key <= limit; key++) {
2766  chunk_map_.Remove(reinterpret_cast<void*>(key),
2767  static_cast<uint32_t>(key));
2768  }
2769 
2770  if (is_pointer_object) {
2771  heap()->QueueMemoryChunkForFree(page);
2772  } else {
2773  heap()->isolate()->memory_allocator()->Free(page);
2774  }
2775  }
2776  }
2777  heap()->FreeQueuedChunks();
2778 }
2779 
2780 
2782  Address address = object->address();
2783  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2784 
2785  bool owned = (chunk->owner() == this);
2786 
2787  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2788 
2789  return owned;
2790 }
2791 
2792 
2793 #ifdef VERIFY_HEAP
2794 // We do not assume that the large object iterator works, because it depends
2795 // on the invariants we are checking during verification.
2796 void LargeObjectSpace::Verify() {
2797  for (LargePage* chunk = first_page_;
2798  chunk != NULL;
2799  chunk = chunk->next_page()) {
2800  // Each chunk contains an object that starts at the large object page's
2801  // object area start.
2802  HeapObject* object = chunk->GetObject();
2803  Page* page = Page::FromAddress(object->address());
2804  CHECK(object->address() == page->area_start());
2805 
2806  // The first word should be a map, and we expect all map pointers to be
2807  // in map space.
2808  Map* map = object->map();
2809  CHECK(map->IsMap());
2810  CHECK(heap()->map_space()->Contains(map));
2811 
2812  // We have only code, sequential strings, external strings
2813  // (sequential strings that have been morphed into external
2814  // strings), fixed arrays, and byte arrays in large object space.
2815  CHECK(object->IsCode() || object->IsSeqString() ||
2816  object->IsExternalString() || object->IsFixedArray() ||
2817  object->IsFixedDoubleArray() || object->IsByteArray());
2818 
2819  // The object itself should look OK.
2820  object->Verify();
2821 
2822  // Byte arrays and strings don't have interior pointers.
2823  if (object->IsCode()) {
2824  VerifyPointersVisitor code_visitor;
2825  object->IterateBody(map->instance_type(),
2826  object->Size(),
2827  &code_visitor);
2828  } else if (object->IsFixedArray()) {
2829  FixedArray* array = FixedArray::cast(object);
2830  for (int j = 0; j < array->length(); j++) {
2831  Object* element = array->get(j);
2832  if (element->IsHeapObject()) {
2833  HeapObject* element_object = HeapObject::cast(element);
2834  CHECK(heap()->Contains(element_object));
2835  CHECK(element_object->map()->IsMap());
2836  }
2837  }
2838  }
2839  }
2840 }
2841 #endif
2842 
2843 
2844 #ifdef DEBUG
2845 void LargeObjectSpace::Print() {
2846  LargeObjectIterator it(this);
2847  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2848  obj->Print();
2849  }
2850 }
2851 
2852 
2853 void LargeObjectSpace::ReportStatistics() {
2854  PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
2855  int num_objects = 0;
2856  ClearHistograms();
2857  LargeObjectIterator it(this);
2858  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2859  num_objects++;
2860  CollectHistogramInfo(obj);
2861  }
2862 
2863  PrintF(" number of objects %d, "
2864  "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
2865  if (num_objects > 0) ReportHistogram(false);
2866 }
2867 
2868 
2869 void LargeObjectSpace::CollectCodeStatistics() {
2870  Isolate* isolate = heap()->isolate();
2871  LargeObjectIterator obj_it(this);
2872  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2873  if (obj->IsCode()) {
2874  Code* code = Code::cast(obj);
2875  isolate->code_kind_statistics()[code->kind()] += code->Size();
2876  }
2877  }
2878 }
2879 
2880 
2881 void Page::Print() {
2882  // Make a best-effort to print the objects in the page.
2883  PrintF("Page@%p in %s\n",
2884  this->address(),
2885  AllocationSpaceName(this->owner()->identity()));
2886  printf(" --------------------------------------\n");
2887  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2888  unsigned mark_size = 0;
2889  for (HeapObject* object = objects.Next();
2890  object != NULL;
2891  object = objects.Next()) {
2892  bool is_marked = Marking::MarkBitFrom(object).Get();
2893  PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2894  if (is_marked) {
2895  mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2896  }
2897  object->ShortPrint();
2898  PrintF("\n");
2899  }
2900  printf(" --------------------------------------\n");
2901  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2902 }
2903 
2904 #endif // DEBUG
2905 
2906 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2191
static const int kHeaderSize
Definition: objects.h:3706
virtual intptr_t Size()
Definition: spaces.h:2515
#define SLOW_ASSERT(condition)
Definition: checks.h:276
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:2719
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void set_next_page(Page *page)
Definition: spaces-inl.h:236
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:644
bool GrowTo(int new_capacity)
Definition: spaces.cc:1390
void SetNewLimit(Address limit)
Definition: heap.h:320
virtual void RepairFreeListsAfterBoot()
Definition: spaces.cc:2295
#define V8PRIxPTR
Definition: globals.h:189
intptr_t OldGenerationCapacityAvailable()
Definition: heap.h:1394
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:212
CodeRange * code_range()
Definition: isolate.h:825
intptr_t Available()
Definition: spaces.h:1506
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:318
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1827
bool Contains(Address addr)
Definition: spaces.h:364
friend class PageIterator
Definition: spaces.h:1701
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void SetTop(Address top, Address limit)
Definition: spaces.h:1550
bool was_swept_conservatively()
Definition: spaces.h:1596
void set_next(FreeListNode *next)
Definition: spaces.cc:1879
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:664
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2214
Isolate * isolate()
Definition: heap-inl.h:503
bool SetUp(const size_t requested_size)
Definition: spaces.cc:135
void ReleaseAllUnusedPages()
Definition: spaces.cc:958
static Smi * FromInt(int value)
Definition: objects-inl.h:981
virtual intptr_t Waste()
Definition: spaces.h:1523
#define LOG(isolate, Call)
Definition: log.h:81
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2574
const int KB
Definition: globals.h:207
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2562
MemoryChunk * next_chunk_
Definition: spaces.h:620
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1601
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2584
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:222
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:273
void TakeControl(VirtualMemory *from)
Definition: platform.h:418
void ResetAllocationInfo()
Definition: spaces.cc:1192
void SetNewSpacePageFlags(NewSpacePage *chunk)
Address space_start()
Definition: spaces.h:1870
intptr_t SizeOfFirstPage()
Definition: spaces.cc:879
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static FreeSpace * cast(Object *obj)
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
Definition: heap.h:1236
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
static Failure * Exception()
Definition: objects-inl.h:1024
PromotionQueue * promotion_queue()
Definition: heap.h:1130
const int kMaxInt
Definition: globals.h:210
AllocationAction
Definition: v8.h:2713
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2300
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:578
const char * AllocationSpaceName(AllocationSpace space)
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:378
#define ASSERT(condition)
Definition: checks.h:270
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:344
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
void ClearFlag(int flag)
Definition: spaces.h:425
#define ASSERT_GE(v1, v2)
Definition: checks.h:273
void Step(intptr_t allocated, CompletionAction action)
FreeListNode * next()
Definition: spaces.cc:1855
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:589
static const int kFlagsOffset
Definition: spaces.h:579
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1809
NewSpacePage * current_page()
Definition: spaces.h:1934
#define CHECK(condition)
Definition: checks.h:56
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:264
#define STRING_TYPE_LIST(V)
Definition: objects.h:325
static const int kPageSize
Definition: spaces.h:711
CodeRange(Isolate *isolate)
Definition: spaces.cc:126
static Code * cast(Object *obj)
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:356
const intptr_t kHeapObjectTagMask
Definition: v8.h:4011
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
static bool IsAtEnd(Address addr)
Definition: spaces.h:1787
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2613
static MarkBit MarkBitFrom(Address addr)
AllocationStats accounting_stats_
Definition: spaces.h:1661
void Free(MemoryChunk *chunk)
Definition: spaces.cc:598
void set_size(int value)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:342
uint8_t byte
Definition: globals.h:156
Executability executable()
Definition: spaces.h:785
NewSpacePage * first_page()
Definition: spaces.h:1933
SlotsBuffer * slots_buffer_
Definition: spaces.h:641
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:684
#define UNREACHABLE()
Definition: checks.h:50
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:238
LargePage * FindPage(Address a)
Definition: spaces.cc:2711
FreeListNode ** next_address()
Definition: spaces.cc:1868
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2309
bool Contains(Address a)
Definition: spaces.h:2122
bool Contains(Address a)
Definition: spaces-inl.h:178
#define OFFSET_OF(type, field)
Definition: globals.h:273
void SetFlag(int flag)
Definition: spaces.h:421
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1796
friend class NewSpacePageIterator
Definition: spaces.h:1993
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:637
static Failure * RetryAfterGC()
Definition: objects-inl.h:1040
static int CodePageAreaStartOffset()
Definition: spaces.cc:720
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2339
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:797
const int kPointerSize
Definition: globals.h:220
MemoryAllocator * memory_allocator()
Definition: isolate.h:845
bool IsFlagSet(int flag)
Definition: spaces.h:437
static Address & Address_at(Address addr)
Definition: v8memory.h:71
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:7220
static int CodePageGuardSize()
Definition: spaces.cc:715
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
T Remove(int i)
Definition: list-inl.h:116
void InitializeReservedMemory()
Definition: spaces.h:340
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1123
intptr_t Available()
Definition: spaces.h:2162
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2248
bool Commit(void *address, size_t size, bool is_executable)
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1622
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:131
bool WasSweptPrecisely()
Definition: spaces.h:731
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:350
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
size_t size() const
Definition: spaces.h:519
SemiSpaceId id()
Definition: spaces.h:1960
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1817
bool contains(Address address)
Definition: spaces.h:838
#define V8_PTR_PREFIX
Definition: globals.h:181
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
Definition: spaces.cc:211
bool IsPowerOf2(T x)
Definition: utils.h:50
bool WasSwept()
Definition: spaces.h:733
static int CodePageAreaEndOffset()
Definition: spaces.cc:727
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1539
HeapObject * GetObject()
Definition: spaces.h:754
#define CHECK_NE(unexpected, value)
Definition: checks.h:223
void ReleasePage(Page *page)
Definition: spaces.cc:922
MemoryChunk * prev_chunk_
Definition: spaces.h:621
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1154
void FreeQueuedChunks()
Definition: heap.cc:7226
#define CHECK_LE(a, b)
Definition: checks.h:230
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
MaybeObject * FindObject(Address a)
Definition: spaces.cc:2702
static const int kObjectStartOffset
Definition: spaces.h:516
#define ISOLATE
Definition: isolate.h:1435
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:1772
bool Contains(HeapObject *obj)
Definition: spaces.cc:2781
Space * owner() const
Definition: spaces.h:320
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1496
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:386
int InitialSemiSpaceSize()
Definition: heap.h:476
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1588
static const intptr_t kLiveBytesOffset
Definition: spaces.h:497
intptr_t Capacity()
Definition: spaces.h:2150
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1764
void Sort(int(*cmp)(const T *x, const T *y))
Definition: list-inl.h:198
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2376
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
Definition: spaces.cc:491
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:1806
Heap * heap() const
Definition: spaces.h:782
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
Page * prev_page()
Definition: spaces-inl.h:230
void IncrementLiveBytes(int by)
Definition: spaces.h:460
#define SET_NAME(name)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1323
SemiSpace * semi_space()
Definition: spaces.h:1776
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2302
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1310
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:1841
NewSpacePage * next_page() const
Definition: spaces.h:1760
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
Definition: spaces.h:490
void set_owner(Space *space)
Definition: spaces.h:329
void * Remove(void *key, uint32_t hash)
Definition: hashmap.h:162
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:7274
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:769
#define HEAP
Definition: isolate.h:1433
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:651
LargePage * next_page() const
Definition: spaces.h:758
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1753
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:318
InstanceType instance_type()
Definition: objects-inl.h:3009
friend class LargeObjectIterator
Definition: spaces.h:2579
bool IsEvacuationCandidate()
Definition: spaces.h:581
static bool ShouldZapGarbage()
Definition: heap.h:1287
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:780
T RoundDown(T x, intptr_t m)
Definition: utils.h:142
void USE(T)
Definition: globals.h:289
Counters * counters()
Definition: isolate.h:819
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:471
void set_next_page(LargePage *page)
Definition: spaces.h:762
Page * next_page()
Definition: spaces-inl.h:224
ObjectSpace
Definition: v8.h:2700
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:38
static size_t AllocateAlignment()
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2541
intptr_t write_barrier_counter_
Definition: spaces.h:643
void set_age_mark(Address mark)
Definition: spaces.cc:1517
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static int CodePageGuardStartOffset()
Definition: spaces.cc:708
NewSpacePage * prev_page() const
Definition: spaces.h:1768
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:3558
SkipList * skip_list()
Definition: spaces.h:587
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
intptr_t unswept_free_bytes_
Definition: spaces.h:1687
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:623
AllocationInfo allocation_info_
Definition: spaces.h:1670
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:245
Executability executable()
Definition: spaces.h:530
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1046
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:358
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:827
void DeleteArray(T *array)
Definition: allocation.h:91
static bool IsAtStart(Address addr)
Definition: spaces.h:1782
T Min(T a, T b)
Definition: utils.h:229
void OldSpaceStep(intptr_t allocated)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1428
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2650
intptr_t OldGenerationSpaceAvailable()
Definition: heap.h:1390
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:444
SkipList * skip_list_
Definition: spaces.h:642
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
Definition: spaces.cc:734
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:317
static JSObject * cast(Object *obj)
const int kCodeZapValue
Definition: v8globals.h:88
VirtualMemory * reserved_memory()
Definition: spaces.h:336
SlotsBuffer * slots_buffer()
Definition: spaces.h:595
OldSpace * old_data_space()
Definition: heap.h:507
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1948
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1541
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:673
AllocationSpace identity()
Definition: spaces.h:788
void Free()
Definition: list.h:64
bool Uncommit(void *address, size_t size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2354