v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h"
32 #include "mark-compact.h"
33 #include "platform.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 // ----------------------------------------------------------------------------
40 // HeapObjectIterator
41 
43  // You can't actually iterate over the anchor page. It is not a real page,
44  // just an anchor for the double linked page list. Initialize as if we have
45  // reached the end of the anchor page, then the first iteration will move on
46  // to the first page.
47  Initialize(space,
48  NULL,
49  NULL,
50  kAllPagesInSpace,
51  NULL);
52 }
53 
54 
56  HeapObjectCallback size_func) {
57  // You can't actually iterate over the anchor page. It is not a real page,
58  // just an anchor for the double linked page list. Initialize the current
59  // address and end as NULL, then the first iteration will move on
60  // to the first page.
61  Initialize(space,
62  NULL,
63  NULL,
64  kAllPagesInSpace,
65  size_func);
66 }
67 
68 
70  HeapObjectCallback size_func) {
71  Space* owner = page->owner();
72  ASSERT(owner == HEAP->old_pointer_space() ||
73  owner == HEAP->old_data_space() ||
74  owner == HEAP->map_space() ||
75  owner == HEAP->cell_space() ||
76  owner == HEAP->code_space());
77  Initialize(reinterpret_cast<PagedSpace*>(owner),
78  page->area_start(),
79  page->area_end(),
80  kOnePageOnly,
81  size_func);
82  ASSERT(page->WasSweptPrecisely());
83 }
84 
85 
86 void HeapObjectIterator::Initialize(PagedSpace* space,
87  Address cur, Address end,
88  HeapObjectIterator::PageMode mode,
89  HeapObjectCallback size_f) {
90  // Check that we actually can iterate this space.
92 
93  space_ = space;
94  cur_addr_ = cur;
95  cur_end_ = end;
96  page_mode_ = mode;
97  size_func_ = size_f;
98 }
99 
100 
101 // We have hit the end of the page and should advance to the next block of
102 // objects. This happens at the end of the page.
103 bool HeapObjectIterator::AdvanceToNextPage() {
104  ASSERT(cur_addr_ == cur_end_);
105  if (page_mode_ == kOnePageOnly) return false;
106  Page* cur_page;
107  if (cur_addr_ == NULL) {
108  cur_page = space_->anchor();
109  } else {
110  cur_page = Page::FromAddress(cur_addr_ - 1);
111  ASSERT(cur_addr_ == cur_page->area_end());
112  }
113  cur_page = cur_page->next_page();
114  if (cur_page == space_->anchor()) return false;
115  cur_addr_ = cur_page->area_start();
116  cur_end_ = cur_page->area_end();
117  ASSERT(cur_page->WasSweptPrecisely());
118  return true;
119 }
120 
121 
122 // -----------------------------------------------------------------------------
123 // CodeRange
124 
125 
127  : isolate_(isolate),
128  code_range_(NULL),
129  free_list_(0),
130  allocation_list_(0),
131  current_allocation_block_index_(0) {
132 }
133 
134 
135 bool CodeRange::SetUp(const size_t requested) {
136  ASSERT(code_range_ == NULL);
137 
138  code_range_ = new VirtualMemory(requested);
139  CHECK(code_range_ != NULL);
140  if (!code_range_->IsReserved()) {
141  delete code_range_;
142  code_range_ = NULL;
143  return false;
144  }
145 
146  // We are sure that we have mapped a block of requested addresses.
147  ASSERT(code_range_->size() == requested);
148  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149  Address base = reinterpret_cast<Address>(code_range_->address());
150  Address aligned_base =
151  RoundUp(reinterpret_cast<Address>(code_range_->address()),
153  size_t size = code_range_->size() - (aligned_base - base);
154  allocation_list_.Add(FreeBlock(aligned_base, size));
155  current_allocation_block_index_ = 0;
156  return true;
157 }
158 
159 
160 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161  const FreeBlock* right) {
162  // The entire point of CodeRange is that the difference between two
163  // addresses in the range can be represented as a signed 32-bit int,
164  // so the cast is semantically correct.
165  return static_cast<int>(left->start - right->start);
166 }
167 
168 
169 void CodeRange::GetNextAllocationBlock(size_t requested) {
170  for (current_allocation_block_index_++;
171  current_allocation_block_index_ < allocation_list_.length();
172  current_allocation_block_index_++) {
173  if (requested <= allocation_list_[current_allocation_block_index_].size) {
174  return; // Found a large enough allocation block.
175  }
176  }
177 
178  // Sort and merge the free blocks on the free list and the allocation list.
179  free_list_.AddAll(allocation_list_);
180  allocation_list_.Clear();
181  free_list_.Sort(&CompareFreeBlockAddress);
182  for (int i = 0; i < free_list_.length();) {
183  FreeBlock merged = free_list_[i];
184  i++;
185  // Add adjacent free blocks to the current merged block.
186  while (i < free_list_.length() &&
187  free_list_[i].start == merged.start + merged.size) {
188  merged.size += free_list_[i].size;
189  i++;
190  }
191  if (merged.size > 0) {
192  allocation_list_.Add(merged);
193  }
194  }
195  free_list_.Clear();
196 
197  for (current_allocation_block_index_ = 0;
198  current_allocation_block_index_ < allocation_list_.length();
199  current_allocation_block_index_++) {
200  if (requested <= allocation_list_[current_allocation_block_index_].size) {
201  return; // Found a large enough allocation block.
202  }
203  }
204 
205  // Code range is full or too fragmented.
206  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207 }
208 
209 
210 
211 Address CodeRange::AllocateRawMemory(const size_t requested,
212  size_t* allocated) {
213  ASSERT(current_allocation_block_index_ < allocation_list_.length());
214  if (requested > allocation_list_[current_allocation_block_index_].size) {
215  // Find an allocation block large enough. This function call may
216  // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217  GetNextAllocationBlock(requested);
218  }
219  // Commit the requested memory at the start of the current allocation block.
220  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221  FreeBlock current = allocation_list_[current_allocation_block_index_];
222  if (aligned_requested >= (current.size - Page::kPageSize)) {
223  // Don't leave a small free block, useless for a large object or chunk.
224  *allocated = current.size;
225  } else {
226  *allocated = aligned_requested;
227  }
228  ASSERT(*allocated <= current.size);
230  if (!MemoryAllocator::CommitCodePage(code_range_,
231  current.start,
232  *allocated)) {
233  *allocated = 0;
234  return NULL;
235  }
236  allocation_list_[current_allocation_block_index_].start += *allocated;
237  allocation_list_[current_allocation_block_index_].size -= *allocated;
238  if (*allocated == current.size) {
239  GetNextAllocationBlock(0); // This block is used up, get the next one.
240  }
241  return current.start;
242 }
243 
244 
245 void CodeRange::FreeRawMemory(Address address, size_t length) {
247  free_list_.Add(FreeBlock(address, length));
248  code_range_->Uncommit(address, length);
249 }
250 
251 
253  delete code_range_; // Frees all memory in the virtual memory range.
254  code_range_ = NULL;
255  free_list_.Free();
256  allocation_list_.Free();
257 }
258 
259 
260 // -----------------------------------------------------------------------------
261 // MemoryAllocator
262 //
263 
265  : isolate_(isolate),
266  capacity_(0),
267  capacity_executable_(0),
268  size_(0),
269  size_executable_(0) {
270 }
271 
272 
273 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
274  capacity_ = RoundUp(capacity, Page::kPageSize);
275  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
276  ASSERT_GE(capacity_, capacity_executable_);
277 
278  size_ = 0;
279  size_executable_ = 0;
280 
281  return true;
282 }
283 
284 
286  // Check that spaces were torn down before MemoryAllocator.
287  ASSERT(size_ == 0);
288  // TODO(gc) this will be true again when we fix FreeMemory.
289  // ASSERT(size_executable_ == 0);
290  capacity_ = 0;
291  capacity_executable_ = 0;
292 }
293 
294 
296  Executability executable) {
297  // TODO(gc) make code_range part of memory allocator?
298  ASSERT(reservation->IsReserved());
299  size_t size = reservation->size();
300  ASSERT(size_ >= size);
301  size_ -= size;
302 
303  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304 
305  if (executable == EXECUTABLE) {
306  ASSERT(size_executable_ >= size);
307  size_executable_ -= size;
308  }
309  // Code which is part of the code-range does not have its own VirtualMemory.
310  ASSERT(!isolate_->code_range()->contains(
311  static_cast<Address>(reservation->address())));
312  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
313  reservation->Release();
314 }
315 
316 
318  size_t size,
319  Executability executable) {
320  // TODO(gc) make code_range part of memory allocator?
321  ASSERT(size_ >= size);
322  size_ -= size;
323 
324  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
325 
326  if (executable == EXECUTABLE) {
327  ASSERT(size_executable_ >= size);
328  size_executable_ -= size;
329  }
330  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
331  ASSERT(executable == EXECUTABLE);
332  isolate_->code_range()->FreeRawMemory(base, size);
333  } else {
334  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335  bool result = VirtualMemory::ReleaseRegion(base, size);
336  USE(result);
337  ASSERT(result);
338  }
339 }
340 
341 
343  size_t alignment,
344  VirtualMemory* controller) {
345  VirtualMemory reservation(size, alignment);
346 
347  if (!reservation.IsReserved()) return NULL;
348  size_ += reservation.size();
349  Address base = RoundUp(static_cast<Address>(reservation.address()),
350  alignment);
351  controller->TakeControl(&reservation);
352  return base;
353 }
354 
355 
357  size_t alignment,
358  Executability executable,
359  VirtualMemory* controller) {
360  VirtualMemory reservation;
361  Address base = ReserveAlignedMemory(size, alignment, &reservation);
362  if (base == NULL) return NULL;
363 
364  if (executable == EXECUTABLE) {
365  if (!CommitCodePage(&reservation, base, size)) {
366  base = NULL;
367  }
368  } else {
369  if (!reservation.Commit(base, size, false)) {
370  base = NULL;
371  }
372  }
373 
374  if (base == NULL) {
375  // Failed to commit the body. Release the mapping and any partially
376  // commited regions inside it.
377  reservation.Release();
378  return NULL;
379  }
380 
381  controller->TakeControl(&reservation);
382  return base;
383 }
384 
385 
387  set_owner(owner);
388  set_prev_page(this);
389  set_next_page(this);
390 }
391 
392 
393 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
394  Address start,
395  SemiSpace* semi_space) {
397  Address area_end = start + Page::kPageSize;
398 
399  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
400  start,
401  Page::kPageSize,
402  area_start,
403  area_end,
405  semi_space);
406  chunk->set_next_chunk(NULL);
407  chunk->set_prev_chunk(NULL);
408  chunk->initialize_scan_on_scavenge(true);
409  bool in_to_space = (semi_space->id() != kFromSpace);
410  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
412  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
414  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
416  return page;
417 }
418 
419 
420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
421  set_owner(semi_space);
422  set_next_chunk(this);
423  set_prev_chunk(this);
424  // Flags marks this invalid page as not being in new-space.
425  // All real new-space pages will be in new-space.
426  SetFlags(0, ~0);
427 }
428 
429 
431  Address base,
432  size_t size,
433  Address area_start,
434  Address area_end,
435  Executability executable,
436  Space* owner) {
437  MemoryChunk* chunk = FromAddress(base);
438 
439  ASSERT(base == chunk->address());
440 
441  chunk->heap_ = heap;
442  chunk->size_ = size;
443  chunk->area_start_ = area_start;
444  chunk->area_end_ = area_end;
445  chunk->flags_ = 0;
446  chunk->set_owner(owner);
447  chunk->InitializeReservedMemory();
448  chunk->slots_buffer_ = NULL;
449  chunk->skip_list_ = NULL;
450  chunk->ResetLiveBytes();
451  Bitmap::Clear(chunk);
452  chunk->initialize_scan_on_scavenge(false);
454 
457 
458  if (executable == EXECUTABLE) {
459  chunk->SetFlag(IS_EXECUTABLE);
460  }
461 
462  if (owner == heap->old_data_space()) {
463  chunk->SetFlag(CONTAINS_ONLY_DATA);
464  }
465 
466  return chunk;
467 }
468 
469 
471  next_chunk_ = other->next_chunk_;
472  prev_chunk_ = other;
473  other->next_chunk_->prev_chunk_ = this;
474  other->next_chunk_ = this;
475 }
476 
477 
482  }
485  prev_chunk_ = NULL;
486  next_chunk_ = NULL;
487 }
488 
489 
491  Executability executable,
492  Space* owner) {
493  size_t chunk_size;
494  Heap* heap = isolate_->heap();
495  Address base = NULL;
496  VirtualMemory reservation;
497  Address area_start = NULL;
498  Address area_end = NULL;
499  if (executable == EXECUTABLE) {
500  chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
502 
503  // Check executable memory limit.
504  if (size_executable_ + chunk_size > capacity_executable_) {
505  LOG(isolate_,
506  StringEvent("MemoryAllocator::AllocateRawMemory",
507  "V8 Executable Allocation capacity exceeded"));
508  return NULL;
509  }
510 
511  // Allocate executable memory either from code range or from the
512  // OS.
513  if (isolate_->code_range()->exists()) {
514  base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
515  ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
517  if (base == NULL) return NULL;
518  size_ += chunk_size;
519  // Update executable memory size.
520  size_executable_ += chunk_size;
521  } else {
522  base = AllocateAlignedMemory(chunk_size,
524  executable,
525  &reservation);
526  if (base == NULL) return NULL;
527  // Update executable memory size.
528  size_executable_ += reservation.size();
529  }
530 
531 #ifdef DEBUG
533  ZapBlock(base + CodePageAreaStartOffset(), body_size);
534 #endif
535  area_start = base + CodePageAreaStartOffset();
536  area_end = area_start + body_size;
537  } else {
538  chunk_size = MemoryChunk::kObjectStartOffset + body_size;
539  base = AllocateAlignedMemory(chunk_size,
541  executable,
542  &reservation);
543 
544  if (base == NULL) return NULL;
545 
546 #ifdef DEBUG
547  ZapBlock(base, chunk_size);
548 #endif
549 
550  area_start = base + Page::kObjectStartOffset;
551  area_end = base + chunk_size;
552  }
553 
554  isolate_->counters()->memory_allocated()->
555  Increment(static_cast<int>(chunk_size));
556 
557  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
558  if (owner != NULL) {
559  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
561  }
562 
563  MemoryChunk* result = MemoryChunk::Initialize(heap,
564  base,
565  chunk_size,
566  area_start,
567  area_end,
568  executable,
569  owner);
570  result->set_reserved_memory(&reservation);
571  return result;
572 }
573 
574 
576  PagedSpace* owner,
577  Executability executable) {
578  MemoryChunk* chunk = AllocateChunk(size, executable, owner);
579 
580  if (chunk == NULL) return NULL;
581 
582  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
583 }
584 
585 
587  Space* owner,
588  Executability executable) {
589  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
590  if (chunk == NULL) return NULL;
591  return LargePage::Initialize(isolate_->heap(), chunk);
592 }
593 
594 
596  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
597  if (chunk->owner() != NULL) {
598  ObjectSpace space =
599  static_cast<ObjectSpace>(1 << chunk->owner()->identity());
601  }
602 
603  isolate_->heap()->RememberUnmappedPage(
604  reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
605 
606  delete chunk->slots_buffer();
607  delete chunk->skip_list();
608 
609  VirtualMemory* reservation = chunk->reserved_memory();
610  if (reservation->IsReserved()) {
611  FreeMemory(reservation, chunk->executable());
612  } else {
613  FreeMemory(chunk->address(),
614  chunk->size(),
615  chunk->executable());
616  }
617 }
618 
619 
621  size_t size,
622  Executability executable) {
623  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
624 #ifdef DEBUG
625  ZapBlock(start, size);
626 #endif
627  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
628  return true;
629 }
630 
631 
632 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
633  if (!VirtualMemory::UncommitRegion(start, size)) return false;
634  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
635  return true;
636 }
637 
638 
639 void MemoryAllocator::ZapBlock(Address start, size_t size) {
640  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
641  Memory::Address_at(start + s) = kZapValue;
642  }
643 }
644 
645 
647  AllocationAction action,
648  size_t size) {
649  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
650  MemoryAllocationCallbackRegistration registration =
651  memory_allocation_callbacks_[i];
652  if ((registration.space & space) == space &&
653  (registration.action & action) == action)
654  registration.callback(space, action, static_cast<int>(size));
655  }
656 }
657 
658 
660  MemoryAllocationCallback callback) {
661  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
662  if (memory_allocation_callbacks_[i].callback == callback) return true;
663  }
664  return false;
665 }
666 
667 
669  MemoryAllocationCallback callback,
670  ObjectSpace space,
671  AllocationAction action) {
672  ASSERT(callback != NULL);
673  MemoryAllocationCallbackRegistration registration(callback, space, action);
675  return memory_allocation_callbacks_.Add(registration);
676 }
677 
678 
680  MemoryAllocationCallback callback) {
681  ASSERT(callback != NULL);
682  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
683  if (memory_allocation_callbacks_[i].callback == callback) {
684  memory_allocation_callbacks_.Remove(i);
685  return;
686  }
687  }
688  UNREACHABLE();
689 }
690 
691 
692 #ifdef DEBUG
693 void MemoryAllocator::ReportStatistics() {
694  float pct = static_cast<float>(capacity_ - size_) / capacity_;
695  PrintF(" capacity: %" V8_PTR_PREFIX "d"
696  ", used: %" V8_PTR_PREFIX "d"
697  ", available: %%%d\n\n",
698  capacity_, size_, static_cast<int>(pct*100));
699 }
700 #endif
701 
702 
704  // We are guarding code pages: the first OS page after the header
705  // will be protected as non-writable.
707 }
708 
709 
711  return static_cast<int>(OS::CommitPageSize());
712 }
713 
714 
716  // We are guarding code pages: the first OS page after the header
717  // will be protected as non-writable.
719 }
720 
721 
723  // We are guarding code pages: the last OS page will be protected as
724  // non-writable.
725  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
726 }
727 
728 
730  Address start,
731  size_t size) {
732  // Commit page header (not executable).
733  if (!vm->Commit(start,
735  false)) {
736  return false;
737  }
738 
739  // Create guard page after the header.
740  if (!vm->Guard(start + CodePageGuardStartOffset())) {
741  return false;
742  }
743 
744  // Commit page body (executable).
745  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
746  if (!vm->Commit(start + CodePageAreaStartOffset(),
747  area_size,
748  true)) {
749  return false;
750  }
751 
752  // Create guard page after the allocatable area.
753  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
754  return false;
755  }
756 
757  return true;
758 }
759 
760 
761 // -----------------------------------------------------------------------------
762 // MemoryChunk implementation
763 
765  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
766  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
767  static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
768  }
769  chunk->IncrementLiveBytes(by);
770 }
771 
772 // -----------------------------------------------------------------------------
773 // PagedSpace implementation
774 
776  intptr_t max_capacity,
777  AllocationSpace id,
778  Executability executable)
779  : Space(heap, id, executable),
780  free_list_(this),
781  was_swept_conservatively_(false),
782  first_unswept_page_(Page::FromAddress(NULL)),
783  unswept_free_bytes_(0) {
784  if (id == CODE_SPACE) {
785  area_size_ = heap->isolate()->memory_allocator()->
786  CodePageAreaSize();
787  } else {
788  area_size_ = Page::kPageSize - Page::kObjectStartOffset;
789  }
790  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
791  * AreaSize();
792  accounting_stats_.Clear();
793 
796 
798 }
799 
800 
802  return true;
803 }
804 
805 
807  return true;
808 }
809 
810 
812  PageIterator iterator(this);
813  while (iterator.has_next()) {
814  heap()->isolate()->memory_allocator()->Free(iterator.next());
815  }
818  accounting_stats_.Clear();
819 }
820 
821 
822 MaybeObject* PagedSpace::FindObject(Address addr) {
823  // Note: this function can only be called on precisely swept spaces.
824  ASSERT(!heap()->mark_compact_collector()->in_use());
825 
826  if (!Contains(addr)) return Failure::Exception();
827 
828  Page* p = Page::FromAddress(addr);
829  HeapObjectIterator it(p, NULL);
830  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
831  Address cur = obj->address();
832  Address next = cur + obj->Size();
833  if ((cur <= addr) && (addr < next)) return obj;
834  }
835 
836  UNREACHABLE();
837  return Failure::Exception();
838 }
839 
841  ASSERT(max_capacity_ % AreaSize() == 0);
842 
843  if (Capacity() == max_capacity_) return false;
844 
846 
847  // Are we going to exceed capacity for this space?
848  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
849 
850  return true;
851 }
852 
854  if (!CanExpand()) return false;
855 
856  intptr_t size = AreaSize();
857 
858  if (anchor_.next_page() == &anchor_) {
859  size = SizeOfFirstPage();
860  }
861 
863  size, this, executable());
864  if (p == NULL) return false;
865 
867 
869 
870  return true;
871 }
872 
873 
875  int size = 0;
876  switch (identity()) {
877  case OLD_POINTER_SPACE:
878  size = 64 * kPointerSize * KB;
879  break;
880  case OLD_DATA_SPACE:
881  size = 192 * KB;
882  break;
883  case MAP_SPACE:
884  size = 128 * KB;
885  break;
886  case CELL_SPACE:
887  size = 96 * KB;
888  break;
889  case CODE_SPACE:
890  if (kPointerSize == 8) {
891  // On x64 we allocate code pages in a special way (from the reserved
892  // 2Byte area). That part of the code is not yet upgraded to handle
893  // small pages.
894  size = AreaSize();
895  } else {
896  size = 384 * KB;
897  }
898  break;
899  default:
900  UNREACHABLE();
901  }
902  return Min(size, AreaSize());
903 }
904 
905 
907  PageIterator it(this);
908  int count = 0;
909  while (it.has_next()) {
910  it.next();
911  count++;
912  }
913  return count;
914 }
915 
916 
918  ASSERT(page->LiveBytes() == 0);
919  ASSERT(AreaSize() == page->area_size());
920 
921  // Adjust list of unswept pages if the page is the head of the list.
922  if (first_unswept_page_ == page) {
923  first_unswept_page_ = page->next_page();
924  if (first_unswept_page_ == anchor()) {
926  }
927  }
928 
929  if (page->WasSwept()) {
930  intptr_t size = free_list_.EvictFreeListItems(page);
931  accounting_stats_.AllocateBytes(size);
932  ASSERT_EQ(AreaSize(), static_cast<int>(size));
933  } else {
935  }
936 
937  if (Page::FromAllocationTop(allocation_info_.top) == page) {
939  }
940 
941  page->Unlink();
943  heap()->isolate()->memory_allocator()->Free(page);
944  } else {
945  heap()->QueueMemoryChunkForFree(page);
946  }
947 
948  ASSERT(Capacity() > 0);
949  accounting_stats_.ShrinkSpace(AreaSize());
950 }
951 
952 
954  PageIterator it(this);
955  while (it.has_next()) {
956  Page* page = it.next();
957  if (!page->WasSwept()) {
958  if (page->LiveBytes() == 0) ReleasePage(page);
959  } else {
961  if (obj->IsFreeSpace() &&
962  FreeSpace::cast(obj)->size() == AreaSize()) {
963  // Sometimes we allocate memory from free list but don't
964  // immediately initialize it (e.g. see PagedSpace::ReserveSpace
965  // called from Heap::ReserveSpace that can cause GC before
966  // reserved space is actually initialized).
967  // Thus we can't simply assume that obj represents a valid
968  // node still owned by a free list
969  // Instead we should verify that the page is fully covered
970  // by free list items.
971  FreeList::SizeStats sizes;
972  free_list_.CountFreeListItems(page, &sizes);
973  if (sizes.Total() == AreaSize()) {
974  ReleasePage(page);
975  }
976  }
977  }
978  }
979  heap()->FreeQueuedChunks();
980 }
981 
982 
983 #ifdef DEBUG
984 void PagedSpace::Print() { }
985 #endif
986 
987 
988 #ifdef DEBUG
989 void PagedSpace::Verify(ObjectVisitor* visitor) {
990  // We can only iterate over the pages if they were swept precisely.
991  if (was_swept_conservatively_) return;
992 
993  bool allocation_pointer_found_in_space =
995  PageIterator page_iterator(this);
996  while (page_iterator.has_next()) {
997  Page* page = page_iterator.next();
998  ASSERT(page->owner() == this);
999  if (page == Page::FromAllocationTop(allocation_info_.top)) {
1000  allocation_pointer_found_in_space = true;
1001  }
1002  ASSERT(page->WasSweptPrecisely());
1003  HeapObjectIterator it(page, NULL);
1004  Address end_of_previous_object = page->area_start();
1005  Address top = page->area_end();
1006  int black_size = 0;
1007  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1008  ASSERT(end_of_previous_object <= object->address());
1009 
1010  // The first word should be a map, and we expect all map pointers to
1011  // be in map space.
1012  Map* map = object->map();
1013  ASSERT(map->IsMap());
1014  ASSERT(heap()->map_space()->Contains(map));
1015 
1016  // Perform space-specific object verification.
1017  VerifyObject(object);
1018 
1019  // The object itself should look OK.
1020  object->Verify();
1021 
1022  // All the interior pointers should be contained in the heap.
1023  int size = object->Size();
1024  object->IterateBody(map->instance_type(), size, visitor);
1025  if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1026  black_size += size;
1027  }
1028 
1029  ASSERT(object->address() + size <= top);
1030  end_of_previous_object = object->address() + size;
1031  }
1032  ASSERT_LE(black_size, page->LiveBytes());
1033  }
1034  ASSERT(allocation_pointer_found_in_space);
1035 }
1036 #endif
1037 
1038 
1039 // -----------------------------------------------------------------------------
1040 // NewSpace implementation
1041 
1042 
1043 bool NewSpace::SetUp(int reserved_semispace_capacity,
1044  int maximum_semispace_capacity) {
1045  // Set up new space based on the preallocated memory block defined by
1046  // start and size. The provided space is divided into two semi-spaces.
1047  // To support fast containment testing in the new space, the size of
1048  // this chunk must be a power of two and it must be aligned to its size.
1049  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1050 
1051  size_t size = 2 * reserved_semispace_capacity;
1052  Address base =
1054  size, size, &reservation_);
1055  if (base == NULL) return false;
1056 
1057  chunk_base_ = base;
1058  chunk_size_ = static_cast<uintptr_t>(size);
1059  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1060 
1061  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1062  ASSERT(IsPowerOf2(maximum_semispace_capacity));
1063 
1064  // Allocate and set up the histogram arrays if necessary.
1065  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1066  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1067 
1068 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1069  promoted_histogram_[name].set_name(#name);
1071 #undef SET_NAME
1072 
1073  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1074  ASSERT(static_cast<intptr_t>(chunk_size_) >=
1075  2 * heap()->ReservedSemiSpaceSize());
1076  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1077 
1078  to_space_.SetUp(chunk_base_,
1079  initial_semispace_capacity,
1080  maximum_semispace_capacity);
1081  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1082  initial_semispace_capacity,
1083  maximum_semispace_capacity);
1084  if (!to_space_.Commit()) {
1085  return false;
1086  }
1087  ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1088 
1089  start_ = chunk_base_;
1090  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1091  object_mask_ = address_mask_ | kHeapObjectTagMask;
1092  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1093 
1095 
1096  return true;
1097 }
1098 
1099 
1101  if (allocated_histogram_) {
1102  DeleteArray(allocated_histogram_);
1103  allocated_histogram_ = NULL;
1104  }
1105  if (promoted_histogram_) {
1106  DeleteArray(promoted_histogram_);
1107  promoted_histogram_ = NULL;
1108  }
1109 
1110  start_ = NULL;
1111  allocation_info_.top = NULL;
1112  allocation_info_.limit = NULL;
1113 
1114  to_space_.TearDown();
1115  from_space_.TearDown();
1116 
1117  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1118 
1119  ASSERT(reservation_.IsReserved());
1120  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1121  NOT_EXECUTABLE);
1122  chunk_base_ = NULL;
1123  chunk_size_ = 0;
1124 }
1125 
1126 
1128  SemiSpace::Swap(&from_space_, &to_space_);
1129 }
1130 
1131 
1133  // Double the semispace size but only up to maximum capacity.
1135  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1136  if (to_space_.GrowTo(new_capacity)) {
1137  // Only grow from space if we managed to grow to-space.
1138  if (!from_space_.GrowTo(new_capacity)) {
1139  // If we managed to grow to-space but couldn't grow from-space,
1140  // attempt to shrink to-space.
1141  if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1142  // We are in an inconsistent state because we could not
1143  // commit/uncommit memory from new space.
1144  V8::FatalProcessOutOfMemory("Failed to grow new space.");
1145  }
1146  }
1147  }
1148  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1149 }
1150 
1151 
1153  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1154  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1155  if (rounded_new_capacity < Capacity() &&
1156  to_space_.ShrinkTo(rounded_new_capacity)) {
1157  // Only shrink from-space if we managed to shrink to-space.
1158  from_space_.Reset();
1159  if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1160  // If we managed to shrink to-space but couldn't shrink from
1161  // space, attempt to grow to-space again.
1162  if (!to_space_.GrowTo(from_space_.Capacity())) {
1163  // We are in an inconsistent state because we could not
1164  // commit/uncommit memory from new space.
1165  V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1166  }
1167  }
1168  }
1169  allocation_info_.limit = to_space_.page_high();
1170  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1171 }
1172 
1173 
1174 void NewSpace::UpdateAllocationInfo() {
1175  allocation_info_.top = to_space_.page_low();
1176  allocation_info_.limit = to_space_.page_high();
1177 
1178  // Lower limit during incremental marking.
1179  if (heap()->incremental_marking()->IsMarking() &&
1181  Address new_limit =
1182  allocation_info_.top + inline_allocation_limit_step();
1183  allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1184  }
1185  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1186 }
1187 
1188 
1190  to_space_.Reset();
1191  UpdateAllocationInfo();
1192  pages_used_ = 0;
1193  // Clear all mark-bits in the to-space.
1194  NewSpacePageIterator it(&to_space_);
1195  while (it.has_next()) {
1196  Bitmap::Clear(it.next());
1197  }
1198 }
1199 
1200 
1202  Address top = allocation_info_.top;
1203  if (NewSpacePage::IsAtStart(top)) {
1204  // The current page is already empty. Don't try to make another.
1205 
1206  // We should only get here if someone asks to allocate more
1207  // than what can be stored in a single page.
1208  // TODO(gc): Change the limit on new-space allocation to prevent this
1209  // from happening (all such allocations should go directly to LOSpace).
1210  return false;
1211  }
1212  if (!to_space_.AdvancePage()) {
1213  // Failed to get a new page in to-space.
1214  return false;
1215  }
1216 
1217  // Clear remainder of current page.
1218  Address limit = NewSpacePage::FromLimit(top)->area_end();
1219  if (heap()->gc_state() == Heap::SCAVENGE) {
1220  heap()->promotion_queue()->SetNewLimit(limit);
1222  }
1223 
1224  int remaining_in_page = static_cast<int>(limit - top);
1225  heap()->CreateFillerObjectAt(top, remaining_in_page);
1226  pages_used_++;
1227  UpdateAllocationInfo();
1228 
1229  return true;
1230 }
1231 
1232 
1233 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1234  Address old_top = allocation_info_.top;
1235  Address new_top = old_top + size_in_bytes;
1236  Address high = to_space_.page_high();
1237  if (allocation_info_.limit < high) {
1238  // Incremental marking has lowered the limit to get a
1239  // chance to do a step.
1240  allocation_info_.limit = Min(
1241  allocation_info_.limit + inline_allocation_limit_step_,
1242  high);
1243  int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1245  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1246  top_on_previous_step_ = new_top;
1247  return AllocateRaw(size_in_bytes);
1248  } else if (AddFreshPage()) {
1249  // Switched to new page. Try allocating again.
1250  int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1252  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1253  top_on_previous_step_ = to_space_.page_low();
1254  return AllocateRaw(size_in_bytes);
1255  } else {
1256  return Failure::RetryAfterGC();
1257  }
1258 }
1259 
1260 
1261 #ifdef DEBUG
1262 // We do not use the SemiSpaceIterator because verification doesn't assume
1263 // that it works (it depends on the invariants we are checking).
1264 void NewSpace::Verify() {
1265  // The allocation pointer should be in the space or at the very end.
1266  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1267 
1268  // There should be objects packed in from the low address up to the
1269  // allocation pointer.
1270  Address current = to_space_.first_page()->area_start();
1271  CHECK_EQ(current, to_space_.space_start());
1272 
1273  while (current != top()) {
1274  if (!NewSpacePage::IsAtEnd(current)) {
1275  // The allocation pointer should not be in the middle of an object.
1276  CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1277  current < top());
1278 
1279  HeapObject* object = HeapObject::FromAddress(current);
1280 
1281  // The first word should be a map, and we expect all map pointers to
1282  // be in map space.
1283  Map* map = object->map();
1284  CHECK(map->IsMap());
1285  CHECK(heap()->map_space()->Contains(map));
1286 
1287  // The object should not be code or a map.
1288  CHECK(!object->IsMap());
1289  CHECK(!object->IsCode());
1290 
1291  // The object itself should look OK.
1292  object->Verify();
1293 
1294  // All the interior pointers should be contained in the heap.
1295  VerifyPointersVisitor visitor;
1296  int size = object->Size();
1297  object->IterateBody(map->instance_type(), size, &visitor);
1298 
1299  current += size;
1300  } else {
1301  // At end of page, switch to next page.
1302  NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1303  // Next page should be valid.
1304  CHECK(!page->is_anchor());
1305  current = page->area_start();
1306  }
1307  }
1308 
1309  // Check semi-spaces.
1310  ASSERT_EQ(from_space_.id(), kFromSpace);
1311  ASSERT_EQ(to_space_.id(), kToSpace);
1312  from_space_.Verify();
1313  to_space_.Verify();
1314 }
1315 #endif
1316 
1317 // -----------------------------------------------------------------------------
1318 // SemiSpace implementation
1319 
1321  int initial_capacity,
1322  int maximum_capacity) {
1323  // Creates a space in the young generation. The constructor does not
1324  // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1325  // memory of size 'capacity' when set up, and does not grow or shrink
1326  // otherwise. In the mark-compact collector, the memory region of the from
1327  // space is used as the marking stack. It requires contiguous memory
1328  // addresses.
1329  ASSERT(maximum_capacity >= Page::kPageSize);
1330  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1331  capacity_ = initial_capacity;
1332  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1333  committed_ = false;
1334  start_ = start;
1335  address_mask_ = ~(maximum_capacity - 1);
1336  object_mask_ = address_mask_ | kHeapObjectTagMask;
1337  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1338  age_mark_ = start_;
1339 }
1340 
1341 
1343  start_ = NULL;
1344  capacity_ = 0;
1345 }
1346 
1347 
1349  ASSERT(!is_committed());
1350  int pages = capacity_ / Page::kPageSize;
1351  Address end = start_ + maximum_capacity_;
1352  Address start = end - pages * Page::kPageSize;
1353  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1354  capacity_,
1355  executable())) {
1356  return false;
1357  }
1358 
1359  NewSpacePage* page = anchor();
1360  for (int i = 1; i <= pages; i++) {
1361  NewSpacePage* new_page =
1362  NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1363  new_page->InsertAfter(page);
1364  page = new_page;
1365  }
1366 
1367  committed_ = true;
1368  Reset();
1369  return true;
1370 }
1371 
1372 
1374  ASSERT(is_committed());
1375  Address start = start_ + maximum_capacity_ - capacity_;
1376  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1377  return false;
1378  }
1379  anchor()->set_next_page(anchor());
1380  anchor()->set_prev_page(anchor());
1381 
1382  committed_ = false;
1383  return true;
1384 }
1385 
1386 
1387 bool SemiSpace::GrowTo(int new_capacity) {
1388  if (!is_committed()) {
1389  if (!Commit()) return false;
1390  }
1391  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1392  ASSERT(new_capacity <= maximum_capacity_);
1393  ASSERT(new_capacity > capacity_);
1394  int pages_before = capacity_ / Page::kPageSize;
1395  int pages_after = new_capacity / Page::kPageSize;
1396 
1397  Address end = start_ + maximum_capacity_;
1398  Address start = end - new_capacity;
1399  size_t delta = new_capacity - capacity_;
1400 
1402  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1403  start, delta, executable())) {
1404  return false;
1405  }
1406  capacity_ = new_capacity;
1407  NewSpacePage* last_page = anchor()->prev_page();
1408  ASSERT(last_page != anchor());
1409  for (int i = pages_before + 1; i <= pages_after; i++) {
1410  Address page_address = end - i * Page::kPageSize;
1411  NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1412  page_address,
1413  this);
1414  new_page->InsertAfter(last_page);
1415  Bitmap::Clear(new_page);
1416  // Duplicate the flags that was set on the old page.
1417  new_page->SetFlags(last_page->GetFlags(),
1419  last_page = new_page;
1420  }
1421  return true;
1422 }
1423 
1424 
1425 bool SemiSpace::ShrinkTo(int new_capacity) {
1426  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1427  ASSERT(new_capacity >= initial_capacity_);
1428  ASSERT(new_capacity < capacity_);
1429  if (is_committed()) {
1430  // Semispaces grow backwards from the end of their allocated capacity,
1431  // so we find the before and after start addresses relative to the
1432  // end of the space.
1433  Address space_end = start_ + maximum_capacity_;
1434  Address old_start = space_end - capacity_;
1435  size_t delta = capacity_ - new_capacity;
1437 
1438  MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1439  if (!allocator->UncommitBlock(old_start, delta)) {
1440  return false;
1441  }
1442 
1443  int pages_after = new_capacity / Page::kPageSize;
1444  NewSpacePage* new_last_page =
1445  NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1446  new_last_page->set_next_page(anchor());
1447  anchor()->set_prev_page(new_last_page);
1448  ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1449  }
1450 
1451  capacity_ = new_capacity;
1452 
1453  return true;
1454 }
1455 
1456 
1457 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1458  anchor_.set_owner(this);
1459  // Fixup back-pointers to anchor. Address of anchor changes
1460  // when we swap.
1461  anchor_.prev_page()->set_next_page(&anchor_);
1462  anchor_.next_page()->set_prev_page(&anchor_);
1463 
1464  bool becomes_to_space = (id_ == kFromSpace);
1465  id_ = becomes_to_space ? kToSpace : kFromSpace;
1466  NewSpacePage* page = anchor_.next_page();
1467  while (page != &anchor_) {
1468  page->set_owner(this);
1469  page->SetFlags(flags, mask);
1470  if (becomes_to_space) {
1471  page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1472  page->SetFlag(MemoryChunk::IN_TO_SPACE);
1474  page->ResetLiveBytes();
1475  } else {
1476  page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1477  page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1478  }
1480  ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1481  page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1482  page = page->next_page();
1483  }
1484 }
1485 
1486 
1488  ASSERT(anchor_.next_page() != &anchor_);
1489  current_page_ = anchor_.next_page();
1490 }
1491 
1492 
1494  // We won't be swapping semispaces without data in them.
1495  ASSERT(from->anchor_.next_page() != &from->anchor_);
1496  ASSERT(to->anchor_.next_page() != &to->anchor_);
1497 
1498  // Swap bits.
1499  SemiSpace tmp = *from;
1500  *from = *to;
1501  *to = tmp;
1502 
1503  // Fixup back-pointers to the page list anchor now that its address
1504  // has changed.
1505  // Swap to/from-space bits on pages.
1506  // Copy GC flags from old active space (from-space) to new (to-space).
1507  intptr_t flags = from->current_page()->GetFlags();
1508  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1509 
1510  from->FlipPages(0, 0);
1511 }
1512 
1513 
1515  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1516  age_mark_ = mark;
1517  // Mark all pages up to the one containing mark.
1518  NewSpacePageIterator it(space_start(), mark);
1519  while (it.has_next()) {
1520  it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1521  }
1522 }
1523 
1524 
1525 #ifdef DEBUG
1526 void SemiSpace::Print() { }
1527 
1528 
1529 void SemiSpace::Verify() {
1530  bool is_from_space = (id_ == kFromSpace);
1531  NewSpacePage* page = anchor_.next_page();
1532  CHECK(anchor_.semi_space() == this);
1533  while (page != &anchor_) {
1534  CHECK(page->semi_space() == this);
1535  CHECK(page->InNewSpace());
1536  CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1537  : MemoryChunk::IN_TO_SPACE));
1538  CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1539  : MemoryChunk::IN_FROM_SPACE));
1541  if (!is_from_space) {
1542  // The pointers-from-here-are-interesting flag isn't updated dynamically
1543  // on from-space pages, so it might be out of sync with the marking state.
1544  if (page->heap()->incremental_marking()->IsMarking()) {
1546  } else {
1547  CHECK(!page->IsFlagSet(
1549  }
1550  // TODO(gc): Check that the live_bytes_count_ field matches the
1551  // black marking on the page (if we make it match in new-space).
1552  }
1553  CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1554  CHECK(page->prev_page()->next_page() == page);
1555  page = page->next_page();
1556  }
1557 }
1558 
1559 
1560 void SemiSpace::AssertValidRange(Address start, Address end) {
1561  // Addresses belong to same semi-space
1562  NewSpacePage* page = NewSpacePage::FromLimit(start);
1563  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1564  SemiSpace* space = page->semi_space();
1565  CHECK_EQ(space, end_page->semi_space());
1566  // Start address is before end address, either on same page,
1567  // or end address is on a later page in the linked list of
1568  // semi-space pages.
1569  if (page == end_page) {
1570  CHECK(start <= end);
1571  } else {
1572  while (page != end_page) {
1573  page = page->next_page();
1574  CHECK_NE(page, space->anchor());
1575  }
1576  }
1577 }
1578 #endif
1579 
1580 
1581 // -----------------------------------------------------------------------------
1582 // SemiSpaceIterator implementation.
1584  Initialize(space->bottom(), space->top(), NULL);
1585 }
1586 
1587 
1589  HeapObjectCallback size_func) {
1590  Initialize(space->bottom(), space->top(), size_func);
1591 }
1592 
1593 
1595  Initialize(start, space->top(), NULL);
1596 }
1597 
1598 
1600  Initialize(from, to, NULL);
1601 }
1602 
1603 
1604 void SemiSpaceIterator::Initialize(Address start,
1605  Address end,
1606  HeapObjectCallback size_func) {
1607  SemiSpace::AssertValidRange(start, end);
1608  current_ = start;
1609  limit_ = end;
1610  size_func_ = size_func;
1611 }
1612 
1613 
1614 #ifdef DEBUG
1615 // heap_histograms is shared, always clear it before using it.
1616 static void ClearHistograms() {
1617  Isolate* isolate = Isolate::Current();
1618  // We reset the name each time, though it hasn't changed.
1619 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1620  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1621 #undef DEF_TYPE_NAME
1622 
1623 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1624  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1625 #undef CLEAR_HISTOGRAM
1626 
1627  isolate->js_spill_information()->Clear();
1628 }
1629 
1630 
1631 static void ClearCodeKindStatistics() {
1632  Isolate* isolate = Isolate::Current();
1633  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1634  isolate->code_kind_statistics()[i] = 0;
1635  }
1636 }
1637 
1638 
1639 static void ReportCodeKindStatistics() {
1640  Isolate* isolate = Isolate::Current();
1641  const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1642 
1643 #define CASE(name) \
1644  case Code::name: table[Code::name] = #name; \
1645  break
1646 
1647  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1648  switch (static_cast<Code::Kind>(i)) {
1649  CASE(FUNCTION);
1650  CASE(OPTIMIZED_FUNCTION);
1651  CASE(STUB);
1652  CASE(BUILTIN);
1653  CASE(LOAD_IC);
1654  CASE(KEYED_LOAD_IC);
1655  CASE(STORE_IC);
1656  CASE(KEYED_STORE_IC);
1657  CASE(CALL_IC);
1658  CASE(KEYED_CALL_IC);
1659  CASE(UNARY_OP_IC);
1660  CASE(BINARY_OP_IC);
1661  CASE(COMPARE_IC);
1662  CASE(TO_BOOLEAN_IC);
1663  }
1664  }
1665 
1666 #undef CASE
1667 
1668  PrintF("\n Code kind histograms: \n");
1669  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1670  if (isolate->code_kind_statistics()[i] > 0) {
1671  PrintF(" %-20s: %10d bytes\n", table[i],
1672  isolate->code_kind_statistics()[i]);
1673  }
1674  }
1675  PrintF("\n");
1676 }
1677 
1678 
1679 static int CollectHistogramInfo(HeapObject* obj) {
1680  Isolate* isolate = Isolate::Current();
1681  InstanceType type = obj->map()->instance_type();
1682  ASSERT(0 <= type && type <= LAST_TYPE);
1683  ASSERT(isolate->heap_histograms()[type].name() != NULL);
1684  isolate->heap_histograms()[type].increment_number(1);
1685  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1686 
1687  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1688  JSObject::cast(obj)->IncrementSpillStatistics(
1689  isolate->js_spill_information());
1690  }
1691 
1692  return obj->Size();
1693 }
1694 
1695 
1696 static void ReportHistogram(bool print_spill) {
1697  Isolate* isolate = Isolate::Current();
1698  PrintF("\n Object Histogram:\n");
1699  for (int i = 0; i <= LAST_TYPE; i++) {
1700  if (isolate->heap_histograms()[i].number() > 0) {
1701  PrintF(" %-34s%10d (%10d bytes)\n",
1702  isolate->heap_histograms()[i].name(),
1703  isolate->heap_histograms()[i].number(),
1704  isolate->heap_histograms()[i].bytes());
1705  }
1706  }
1707  PrintF("\n");
1708 
1709  // Summarize string types.
1710  int string_number = 0;
1711  int string_bytes = 0;
1712 #define INCREMENT(type, size, name, camel_name) \
1713  string_number += isolate->heap_histograms()[type].number(); \
1714  string_bytes += isolate->heap_histograms()[type].bytes();
1716 #undef INCREMENT
1717  if (string_number > 0) {
1718  PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1719  string_bytes);
1720  }
1721 
1722  if (FLAG_collect_heap_spill_statistics && print_spill) {
1723  isolate->js_spill_information()->Print();
1724  }
1725 }
1726 #endif // DEBUG
1727 
1728 
1729 // Support for statistics gathering for --heap-stats and --log-gc.
1731  for (int i = 0; i <= LAST_TYPE; i++) {
1732  allocated_histogram_[i].clear();
1733  promoted_histogram_[i].clear();
1734  }
1735 }
1736 
1737 // Because the copying collector does not touch garbage objects, we iterate
1738 // the new space before a collection to get a histogram of allocated objects.
1739 // This only happens when --log-gc flag is set.
1741  ClearHistograms();
1742  SemiSpaceIterator it(this);
1743  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1744  RecordAllocation(obj);
1745 }
1746 
1747 
1748 static void DoReportStatistics(Isolate* isolate,
1749  HistogramInfo* info, const char* description) {
1750  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1751  // Lump all the string types together.
1752  int string_number = 0;
1753  int string_bytes = 0;
1754 #define INCREMENT(type, size, name, camel_name) \
1755  string_number += info[type].number(); \
1756  string_bytes += info[type].bytes();
1758 #undef INCREMENT
1759  if (string_number > 0) {
1760  LOG(isolate,
1761  HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1762  }
1763 
1764  // Then do the other types.
1765  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1766  if (info[i].number() > 0) {
1767  LOG(isolate,
1768  HeapSampleItemEvent(info[i].name(), info[i].number(),
1769  info[i].bytes()));
1770  }
1771  }
1772  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1773 }
1774 
1775 
1777 #ifdef DEBUG
1778  if (FLAG_heap_stats) {
1779  float pct = static_cast<float>(Available()) / Capacity();
1780  PrintF(" capacity: %" V8_PTR_PREFIX "d"
1781  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1782  Capacity(), Available(), static_cast<int>(pct*100));
1783  PrintF("\n Object Histogram:\n");
1784  for (int i = 0; i <= LAST_TYPE; i++) {
1785  if (allocated_histogram_[i].number() > 0) {
1786  PrintF(" %-34s%10d (%10d bytes)\n",
1787  allocated_histogram_[i].name(),
1788  allocated_histogram_[i].number(),
1789  allocated_histogram_[i].bytes());
1790  }
1791  }
1792  PrintF("\n");
1793  }
1794 #endif // DEBUG
1795 
1796  if (FLAG_log_gc) {
1797  Isolate* isolate = ISOLATE;
1798  DoReportStatistics(isolate, allocated_histogram_, "allocated");
1799  DoReportStatistics(isolate, promoted_histogram_, "promoted");
1800  }
1801 }
1802 
1803 
1805  InstanceType type = obj->map()->instance_type();
1806  ASSERT(0 <= type && type <= LAST_TYPE);
1807  allocated_histogram_[type].increment_number(1);
1808  allocated_histogram_[type].increment_bytes(obj->Size());
1809 }
1810 
1811 
1813  InstanceType type = obj->map()->instance_type();
1814  ASSERT(0 <= type && type <= LAST_TYPE);
1815  promoted_histogram_[type].increment_number(1);
1816  promoted_histogram_[type].increment_bytes(obj->Size());
1817 }
1818 
1819 // -----------------------------------------------------------------------------
1820 // Free lists for old object spaces implementation
1821 
1822 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1823  ASSERT(size_in_bytes > 0);
1824  ASSERT(IsAligned(size_in_bytes, kPointerSize));
1825 
1826  // We write a map and possibly size information to the block. If the block
1827  // is big enough to be a FreeSpace with at least one extra word (the next
1828  // pointer), we set its map to be the free space map and its size to an
1829  // appropriate array length for the desired size from HeapObject::Size().
1830  // If the block is too small (eg, one or two words), to hold both a size
1831  // field and a next pointer, we give it a filler map that gives it the
1832  // correct size.
1833  if (size_in_bytes > FreeSpace::kHeaderSize) {
1834  set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1835  // Can't use FreeSpace::cast because it fails during deserialization.
1836  FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1837  this_as_free_space->set_size(size_in_bytes);
1838  } else if (size_in_bytes == kPointerSize) {
1839  set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1840  } else if (size_in_bytes == 2 * kPointerSize) {
1841  set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1842  } else {
1843  UNREACHABLE();
1844  }
1845  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1846  // deserialization because the free space map is not done yet.
1847 }
1848 
1849 
1851  ASSERT(IsFreeListNode(this));
1852  if (map() == HEAP->raw_unchecked_free_space_map()) {
1853  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1854  return reinterpret_cast<FreeListNode*>(
1855  Memory::Address_at(address() + kNextOffset));
1856  } else {
1857  return reinterpret_cast<FreeListNode*>(
1859  }
1860 }
1861 
1862 
1864  ASSERT(IsFreeListNode(this));
1865  if (map() == HEAP->raw_unchecked_free_space_map()) {
1866  ASSERT(Size() >= kNextOffset + kPointerSize);
1867  return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1868  } else {
1869  return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1870  }
1871 }
1872 
1873 
1875  ASSERT(IsFreeListNode(this));
1876  // While we are booting the VM the free space map will actually be null. So
1877  // we have to make sure that we don't try to use it for anything at that
1878  // stage.
1879  if (map() == HEAP->raw_unchecked_free_space_map()) {
1880  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1881  Memory::Address_at(address() + kNextOffset) =
1882  reinterpret_cast<Address>(next);
1883  } else {
1885  reinterpret_cast<Address>(next);
1886  }
1887 }
1888 
1889 
1890 FreeList::FreeList(PagedSpace* owner)
1891  : owner_(owner), heap_(owner->heap()) {
1892  Reset();
1893 }
1894 
1895 
1896 void FreeList::Reset() {
1897  available_ = 0;
1898  small_list_ = NULL;
1899  medium_list_ = NULL;
1900  large_list_ = NULL;
1901  huge_list_ = NULL;
1902 }
1903 
1904 
1905 int FreeList::Free(Address start, int size_in_bytes) {
1906  if (size_in_bytes == 0) return 0;
1907  FreeListNode* node = FreeListNode::FromAddress(start);
1908  node->set_size(heap_, size_in_bytes);
1909 
1910  // Early return to drop too-small blocks on the floor.
1911  if (size_in_bytes < kSmallListMin) return size_in_bytes;
1912 
1913  // Insert other blocks at the head of a free list of the appropriate
1914  // magnitude.
1915  if (size_in_bytes <= kSmallListMax) {
1916  node->set_next(small_list_);
1917  small_list_ = node;
1918  } else if (size_in_bytes <= kMediumListMax) {
1919  node->set_next(medium_list_);
1920  medium_list_ = node;
1921  } else if (size_in_bytes <= kLargeListMax) {
1922  node->set_next(large_list_);
1923  large_list_ = node;
1924  } else {
1925  node->set_next(huge_list_);
1926  huge_list_ = node;
1927  }
1928  available_ += size_in_bytes;
1929  ASSERT(IsVeryLong() || available_ == SumFreeLists());
1930  return 0;
1931 }
1932 
1933 
1934 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1935  FreeListNode* node = *list;
1936 
1937  if (node == NULL) return NULL;
1938 
1939  while (node != NULL &&
1940  Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1941  available_ -= node->Size();
1942  node = node->next();
1943  }
1944 
1945  if (node != NULL) {
1946  *node_size = node->Size();
1947  *list = node->next();
1948  } else {
1949  *list = NULL;
1950  }
1951 
1952  return node;
1953 }
1954 
1955 
1956 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1957  FreeListNode* node = NULL;
1958 
1959  if (size_in_bytes <= kSmallAllocationMax) {
1960  node = PickNodeFromList(&small_list_, node_size);
1961  if (node != NULL) return node;
1962  }
1963 
1964  if (size_in_bytes <= kMediumAllocationMax) {
1965  node = PickNodeFromList(&medium_list_, node_size);
1966  if (node != NULL) return node;
1967  }
1968 
1969  if (size_in_bytes <= kLargeAllocationMax) {
1970  node = PickNodeFromList(&large_list_, node_size);
1971  if (node != NULL) return node;
1972  }
1973 
1974  for (FreeListNode** cur = &huge_list_;
1975  *cur != NULL;
1976  cur = (*cur)->next_address()) {
1977  FreeListNode* cur_node = *cur;
1978  while (cur_node != NULL &&
1979  Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1980  available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1981  cur_node = cur_node->next();
1982  }
1983 
1984  *cur = cur_node;
1985  if (cur_node == NULL) break;
1986 
1987  ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1988  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1989  int size = cur_as_free_space->Size();
1990  if (size >= size_in_bytes) {
1991  // Large enough node found. Unlink it from the list.
1992  node = *cur;
1993  *node_size = size;
1994  *cur = node->next();
1995  break;
1996  }
1997  }
1998 
1999  return node;
2000 }
2001 
2002 
2003 // Allocation on the old space free list. If it succeeds then a new linear
2004 // allocation space has been set up with the top and limit of the space. If
2005 // the allocation fails then NULL is returned, and the caller can perform a GC
2006 // or allocate a new page before retrying.
2007 HeapObject* FreeList::Allocate(int size_in_bytes) {
2008  ASSERT(0 < size_in_bytes);
2009  ASSERT(size_in_bytes <= kMaxBlockSize);
2010  ASSERT(IsAligned(size_in_bytes, kPointerSize));
2011  // Don't free list allocate if there is linear space available.
2012  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2013 
2014  int new_node_size = 0;
2015  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2016  if (new_node == NULL) return NULL;
2017 
2018  available_ -= new_node_size;
2019  ASSERT(IsVeryLong() || available_ == SumFreeLists());
2020 
2021  int bytes_left = new_node_size - size_in_bytes;
2022  ASSERT(bytes_left >= 0);
2023 
2024  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2025  // Mark the old linear allocation area with a free space map so it can be
2026  // skipped when scanning the heap. This also puts it back in the free list
2027  // if it is big enough.
2028  owner_->Free(owner_->top(), old_linear_size);
2029 
2030 #ifdef DEBUG
2031  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2032  reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
2033  }
2034 #endif
2035 
2036  owner_->heap()->incremental_marking()->OldSpaceStep(
2037  size_in_bytes - old_linear_size);
2038 
2039  // The old-space-step might have finished sweeping and restarted marking.
2040  // Verify that it did not turn the page of the new node into an evacuation
2041  // candidate.
2042  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2043 
2044  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2045 
2046  // Memory in the linear allocation area is counted as allocated. We may free
2047  // a little of this again immediately - see below.
2048  owner_->Allocate(new_node_size);
2049 
2050  if (bytes_left > kThreshold &&
2051  owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2052  FLAG_incremental_marking_steps) {
2053  int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2054  // We don't want to give too large linear areas to the allocator while
2055  // incremental marking is going on, because we won't check again whether
2056  // we want to do another increment until the linear area is used up.
2057  owner_->Free(new_node->address() + size_in_bytes + linear_size,
2058  new_node_size - size_in_bytes - linear_size);
2059  owner_->SetTop(new_node->address() + size_in_bytes,
2060  new_node->address() + size_in_bytes + linear_size);
2061  } else if (bytes_left > 0) {
2062  // Normally we give the rest of the node to the allocator as its new
2063  // linear allocation area.
2064  owner_->SetTop(new_node->address() + size_in_bytes,
2065  new_node->address() + new_node_size);
2066  } else {
2067  // TODO(gc) Try not freeing linear allocation region when bytes_left
2068  // are zero.
2069  owner_->SetTop(NULL, NULL);
2070  }
2071 
2072  return new_node;
2073 }
2074 
2075 
2076 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2077  intptr_t sum = 0;
2078  while (n != NULL) {
2079  if (Page::FromAddress(n->address()) == p) {
2080  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
2081  sum += free_space->Size();
2082  }
2083  n = n->next();
2084  }
2085  return sum;
2086 }
2087 
2088 
2089 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2090  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2091  if (sizes->huge_size_ < p->area_size()) {
2092  sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2093  sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2094  sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2095  } else {
2096  sizes->small_size_ = 0;
2097  sizes->medium_size_ = 0;
2098  sizes->large_size_ = 0;
2099  }
2100 }
2101 
2102 
2103 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2104  intptr_t sum = 0;
2105  while (*n != NULL) {
2106  if (Page::FromAddress((*n)->address()) == p) {
2107  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2108  sum += free_space->Size();
2109  *n = (*n)->next();
2110  } else {
2111  n = (*n)->next_address();
2112  }
2113  }
2114  return sum;
2115 }
2116 
2117 
2118 intptr_t FreeList::EvictFreeListItems(Page* p) {
2119  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2120 
2121  if (sum < p->area_size()) {
2122  sum += EvictFreeListItemsInList(&small_list_, p) +
2123  EvictFreeListItemsInList(&medium_list_, p) +
2124  EvictFreeListItemsInList(&large_list_, p);
2125  }
2126 
2127  available_ -= static_cast<int>(sum);
2128 
2129  return sum;
2130 }
2131 
2132 
2133 #ifdef DEBUG
2134 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2135  intptr_t sum = 0;
2136  while (cur != NULL) {
2137  ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
2138  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2139  sum += cur_as_free_space->Size();
2140  cur = cur->next();
2141  }
2142  return sum;
2143 }
2144 
2145 
2146 static const int kVeryLongFreeList = 500;
2147 
2148 
2149 int FreeList::FreeListLength(FreeListNode* cur) {
2150  int length = 0;
2151  while (cur != NULL) {
2152  length++;
2153  cur = cur->next();
2154  if (length == kVeryLongFreeList) return length;
2155  }
2156  return length;
2157 }
2158 
2159 
2160 bool FreeList::IsVeryLong() {
2161  if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
2162  if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
2163  if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
2164  if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
2165  return false;
2166 }
2167 
2168 
2169 // This can take a very long time because it is linear in the number of entries
2170 // on the free list, so it should not be called if FreeListLength returns
2171 // kVeryLongFreeList.
2172 intptr_t FreeList::SumFreeLists() {
2173  intptr_t sum = SumFreeList(small_list_);
2174  sum += SumFreeList(medium_list_);
2175  sum += SumFreeList(large_list_);
2176  sum += SumFreeList(huge_list_);
2177  return sum;
2178 }
2179 #endif
2180 
2181 
2182 // -----------------------------------------------------------------------------
2183 // OldSpace implementation
2184 
2185 bool NewSpace::ReserveSpace(int bytes) {
2186  // We can't reliably unpack a partial snapshot that needs more new space
2187  // space than the minimum NewSpace size. The limit can be set lower than
2188  // the end of new space either because there is more space on the next page
2189  // or because we have lowered the limit in order to get periodic incremental
2190  // marking. The most reliable way to ensure that there is linear space is
2191  // to do the allocation, then rewind the limit.
2192  ASSERT(bytes <= InitialCapacity());
2193  MaybeObject* maybe = AllocateRaw(bytes);
2194  Object* object = NULL;
2195  if (!maybe->ToObject(&object)) return false;
2196  HeapObject* allocation = HeapObject::cast(object);
2197  Address top = allocation_info_.top;
2198  if ((top - bytes) == allocation->address()) {
2199  allocation_info_.top = allocation->address();
2200  return true;
2201  }
2202  // There may be a borderline case here where the allocation succeeded, but
2203  // the limit and top have moved on to a new page. In that case we try again.
2204  return ReserveSpace(bytes);
2205 }
2206 
2207 
2209  // We don't have a linear allocation area while sweeping. It will be restored
2210  // on the first allocation after the sweep.
2211  // Mark the old linear allocation area with a free space map so it can be
2212  // skipped when scanning the heap.
2213  int old_linear_size = static_cast<int>(limit() - top());
2214  Free(top(), old_linear_size);
2215  SetTop(NULL, NULL);
2216 
2217  // Stop lazy sweeping and clear marking bits for unswept pages.
2218  if (first_unswept_page_ != NULL) {
2220  do {
2221  // Do not use ShouldBeSweptLazily predicate here.
2222  // New evacuation candidates were selected but they still have
2223  // to be swept before collection starts.
2224  if (!p->WasSwept()) {
2225  Bitmap::Clear(p);
2226  if (FLAG_gc_verbose) {
2227  PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2228  reinterpret_cast<intptr_t>(p));
2229  }
2230  }
2231  p = p->next_page();
2232  } while (p != anchor());
2233  }
2235  unswept_free_bytes_ = 0;
2236 
2237  // Clear the free list before a full GC---it will be rebuilt afterward.
2238  free_list_.Reset();
2239 }
2240 
2241 
2242 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2243  ASSERT(size_in_bytes <= AreaSize());
2244  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2245  Address current_top = allocation_info_.top;
2246  Address new_top = current_top + size_in_bytes;
2247  if (new_top <= allocation_info_.limit) return true;
2248 
2249  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2250  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2251  if (new_area == NULL) return false;
2252 
2253  int old_linear_size = static_cast<int>(limit() - top());
2254  // Mark the old linear allocation area with a free space so it can be
2255  // skipped when scanning the heap. This also puts it back in the free list
2256  // if it is big enough.
2257  Free(top(), old_linear_size);
2258 
2259  SetTop(new_area->address(), new_area->address() + size_in_bytes);
2260  Allocate(size_in_bytes);
2261  return true;
2262 }
2263 
2264 
2265 // You have to call this last, since the implementation from PagedSpace
2266 // doesn't know that memory was 'promised' to large object space.
2268  return heap()->OldGenerationCapacityAvailable() >= bytes &&
2269  (!heap()->incremental_marking()->IsStopped() ||
2270  heap()->OldGenerationSpaceAvailable() >= bytes);
2271 }
2272 
2273 
2274 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2275  if (IsSweepingComplete()) return true;
2276 
2277  intptr_t freed_bytes = 0;
2279  do {
2280  Page* next_page = p->next_page();
2281  if (ShouldBeSweptLazily(p)) {
2282  if (FLAG_gc_verbose) {
2283  PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2284  reinterpret_cast<intptr_t>(p));
2285  }
2287  freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2288  }
2289  p = next_page;
2290  } while (p != anchor() && freed_bytes < bytes_to_sweep);
2291 
2292  if (p == anchor()) {
2294  } else {
2295  first_unswept_page_ = p;
2296  }
2297 
2298  heap()->FreeQueuedChunks();
2299 
2300  return IsSweepingComplete();
2301 }
2302 
2303 
2305  if (allocation_info_.top >= allocation_info_.limit) return;
2306 
2307  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2308  // Create filler object to keep page iterable if it was iterable.
2309  int remaining =
2310  static_cast<int>(allocation_info_.limit - allocation_info_.top);
2312 
2315  }
2316 }
2317 
2318 
2320  // Allocation in this space has failed.
2321 
2322  // If there are unswept pages advance lazy sweeper then sweep one page before
2323  // allocating a new page.
2324  if (first_unswept_page_->is_valid()) {
2325  AdvanceSweeper(size_in_bytes);
2326 
2327  // Retry the free list allocation.
2328  HeapObject* object = free_list_.Allocate(size_in_bytes);
2329  if (object != NULL) return object;
2330  }
2331 
2332  // Free list allocation failed and there is no next page. Fail if we have
2333  // hit the old generation size limit that should cause a garbage
2334  // collection.
2335  if (!heap()->always_allocate() &&
2336  heap()->OldGenerationAllocationLimitReached()) {
2337  return NULL;
2338  }
2339 
2340  // Try to expand the space and allocate in the new next page.
2341  if (Expand()) {
2342  return free_list_.Allocate(size_in_bytes);
2343  }
2344 
2345  // Last ditch, sweep all the remaining pages to try to find space. This may
2346  // cause a pause.
2347  if (!IsSweepingComplete()) {
2349 
2350  // Retry the free list allocation.
2351  HeapObject* object = free_list_.Allocate(size_in_bytes);
2352  if (object != NULL) return object;
2353  }
2354 
2355  // Finally, fail.
2356  return NULL;
2357 }
2358 
2359 
2360 #ifdef DEBUG
2361 void PagedSpace::ReportCodeStatistics() {
2362  Isolate* isolate = Isolate::Current();
2363  CommentStatistic* comments_statistics =
2364  isolate->paged_space_comments_statistics();
2365  ReportCodeKindStatistics();
2366  PrintF("Code comment statistics (\" [ comment-txt : size/ "
2367  "count (average)\"):\n");
2368  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2369  const CommentStatistic& cs = comments_statistics[i];
2370  if (cs.size > 0) {
2371  PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2372  cs.size/cs.count);
2373  }
2374  }
2375  PrintF("\n");
2376 }
2377 
2378 
2379 void PagedSpace::ResetCodeStatistics() {
2380  Isolate* isolate = Isolate::Current();
2381  CommentStatistic* comments_statistics =
2382  isolate->paged_space_comments_statistics();
2383  ClearCodeKindStatistics();
2384  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2385  comments_statistics[i].Clear();
2386  }
2387  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2388  comments_statistics[CommentStatistic::kMaxComments].size = 0;
2389  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2390 }
2391 
2392 
2393 // Adds comment to 'comment_statistics' table. Performance OK as long as
2394 // 'kMaxComments' is small
2395 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2396  CommentStatistic* comments_statistics =
2397  isolate->paged_space_comments_statistics();
2398  // Do not count empty comments
2399  if (delta <= 0) return;
2400  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2401  // Search for a free or matching entry in 'comments_statistics': 'cs'
2402  // points to result.
2403  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2404  if (comments_statistics[i].comment == NULL) {
2405  cs = &comments_statistics[i];
2406  cs->comment = comment;
2407  break;
2408  } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2409  cs = &comments_statistics[i];
2410  break;
2411  }
2412  }
2413  // Update entry for 'comment'
2414  cs->size += delta;
2415  cs->count += 1;
2416 }
2417 
2418 
2419 // Call for each nested comment start (start marked with '[ xxx', end marked
2420 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2421 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2422  ASSERT(!it->done());
2423  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2424  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2425  if (tmp[0] != '[') {
2426  // Not a nested comment; skip
2427  return;
2428  }
2429 
2430  // Search for end of nested comment or a new nested comment
2431  const char* const comment_txt =
2432  reinterpret_cast<const char*>(it->rinfo()->data());
2433  const byte* prev_pc = it->rinfo()->pc();
2434  int flat_delta = 0;
2435  it->next();
2436  while (true) {
2437  // All nested comments must be terminated properly, and therefore exit
2438  // from loop.
2439  ASSERT(!it->done());
2440  if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2441  const char* const txt =
2442  reinterpret_cast<const char*>(it->rinfo()->data());
2443  flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2444  if (txt[0] == ']') break; // End of nested comment
2445  // A new comment
2446  CollectCommentStatistics(isolate, it);
2447  // Skip code that was covered with previous comment
2448  prev_pc = it->rinfo()->pc();
2449  }
2450  it->next();
2451  }
2452  EnterComment(isolate, comment_txt, flat_delta);
2453 }
2454 
2455 
2456 // Collects code size statistics:
2457 // - by code kind
2458 // - by code comment
2459 void PagedSpace::CollectCodeStatistics() {
2460  Isolate* isolate = heap()->isolate();
2461  HeapObjectIterator obj_it(this);
2462  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2463  if (obj->IsCode()) {
2464  Code* code = Code::cast(obj);
2465  isolate->code_kind_statistics()[code->kind()] += code->Size();
2466  RelocIterator it(code);
2467  int delta = 0;
2468  const byte* prev_pc = code->instruction_start();
2469  while (!it.done()) {
2470  if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2471  delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2472  CollectCommentStatistics(isolate, &it);
2473  prev_pc = it.rinfo()->pc();
2474  }
2475  it.next();
2476  }
2477 
2478  ASSERT(code->instruction_start() <= prev_pc &&
2479  prev_pc <= code->instruction_end());
2480  delta += static_cast<int>(code->instruction_end() - prev_pc);
2481  EnterComment(isolate, "NoComment", delta);
2482  }
2483  }
2484 }
2485 
2486 
2487 void PagedSpace::ReportStatistics() {
2488  int pct = static_cast<int>(Available() * 100 / Capacity());
2489  PrintF(" capacity: %" V8_PTR_PREFIX "d"
2490  ", waste: %" V8_PTR_PREFIX "d"
2491  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2492  Capacity(), Waste(), Available(), pct);
2493 
2494  if (was_swept_conservatively_) return;
2495  ClearHistograms();
2496  HeapObjectIterator obj_it(this);
2497  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2498  CollectHistogramInfo(obj);
2499  ReportHistogram(true);
2500 }
2501 #endif
2502 
2503 // -----------------------------------------------------------------------------
2504 // FixedSpace implementation
2505 
2507  // Call prepare of the super class.
2509 
2510  // During a non-compacting collection, everything below the linear
2511  // allocation pointer except wasted top-of-page blocks is considered
2512  // allocated and we will rediscover available bytes during the
2513  // collection.
2514  accounting_stats_.AllocateBytes(free_list_.available());
2515 
2516  // Clear the free list before a full GC---it will be rebuilt afterward.
2517  free_list_.Reset();
2518 }
2519 
2520 
2521 // -----------------------------------------------------------------------------
2522 // MapSpace implementation
2523 
2524 #ifdef DEBUG
2525 void MapSpace::VerifyObject(HeapObject* object) {
2526  // The object should be a map or a free-list node.
2527  ASSERT(object->IsMap() || object->IsFreeSpace());
2528 }
2529 #endif
2530 
2531 
2532 // -----------------------------------------------------------------------------
2533 // GlobalPropertyCellSpace implementation
2534 
2535 #ifdef DEBUG
2536 void CellSpace::VerifyObject(HeapObject* object) {
2537  // The object should be a global object property cell or a free-list node.
2538  ASSERT(object->IsJSGlobalPropertyCell() ||
2539  object->map() == heap()->two_pointer_filler_map());
2540 }
2541 #endif
2542 
2543 
2544 // -----------------------------------------------------------------------------
2545 // LargeObjectIterator
2546 
2548  current_ = space->first_page_;
2549  size_func_ = NULL;
2550 }
2551 
2552 
2554  HeapObjectCallback size_func) {
2555  current_ = space->first_page_;
2556  size_func_ = size_func;
2557 }
2558 
2559 
2561  if (current_ == NULL) return NULL;
2562 
2563  HeapObject* object = current_->GetObject();
2564  current_ = current_->next_page();
2565  return object;
2566 }
2567 
2568 
2569 // -----------------------------------------------------------------------------
2570 // LargeObjectSpace
2571 static bool ComparePointers(void* key1, void* key2) {
2572  return key1 == key2;
2573 }
2574 
2575 
2577  intptr_t max_capacity,
2578  AllocationSpace id)
2579  : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2580  max_capacity_(max_capacity),
2581  first_page_(NULL),
2582  size_(0),
2583  page_count_(0),
2584  objects_size_(0),
2585  chunk_map_(ComparePointers, 1024) {}
2586 
2587 
2589  first_page_ = NULL;
2590  size_ = 0;
2591  page_count_ = 0;
2592  objects_size_ = 0;
2593  chunk_map_.Clear();
2594  return true;
2595 }
2596 
2597 
2599  while (first_page_ != NULL) {
2600  LargePage* page = first_page_;
2601  first_page_ = first_page_->next_page();
2602  LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2603 
2604  ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2606  space, kAllocationActionFree, page->size());
2607  heap()->isolate()->memory_allocator()->Free(page);
2608  }
2609  SetUp();
2610 }
2611 
2612 
2613 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2614  Executability executable) {
2615  // Check if we want to force a GC before growing the old space further.
2616  // If so, fail the allocation.
2617  if (!heap()->always_allocate() &&
2618  heap()->OldGenerationAllocationLimitReached()) {
2619  return Failure::RetryAfterGC(identity());
2620  }
2621 
2622  if (Size() + object_size > max_capacity_) {
2623  return Failure::RetryAfterGC(identity());
2624  }
2625 
2626  LargePage* page = heap()->isolate()->memory_allocator()->
2627  AllocateLargePage(object_size, this, executable);
2628  if (page == NULL) return Failure::RetryAfterGC(identity());
2629  ASSERT(page->area_size() >= object_size);
2630 
2631  size_ += static_cast<int>(page->size());
2632  objects_size_ += object_size;
2633  page_count_++;
2634  page->set_next_page(first_page_);
2635  first_page_ = page;
2636 
2637  // Register all MemoryChunk::kAlignment-aligned chunks covered by
2638  // this large page in the chunk map.
2639  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2640  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2641  for (uintptr_t key = base; key <= limit; key++) {
2642  HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2643  static_cast<uint32_t>(key),
2644  true);
2645  ASSERT(entry != NULL);
2646  entry->value = page;
2647  }
2648 
2649  HeapObject* object = page->GetObject();
2650 
2651 #ifdef DEBUG
2652  // Make the object consistent so the heap can be vefified in OldSpaceStep.
2653  reinterpret_cast<Object**>(object->address())[0] =
2654  heap()->fixed_array_map();
2655  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2656 #endif
2657 
2658  heap()->incremental_marking()->OldSpaceStep(object_size);
2659  return object;
2660 }
2661 
2662 
2663 // GC support
2665  LargePage* page = FindPage(a);
2666  if (page != NULL) {
2667  return page->GetObject();
2668  }
2669  return Failure::Exception();
2670 }
2671 
2672 
2674  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2675  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2676  static_cast<uint32_t>(key),
2677  false);
2678  if (e != NULL) {
2679  ASSERT(e->value != NULL);
2680  LargePage* page = reinterpret_cast<LargePage*>(e->value);
2681  ASSERT(page->is_valid());
2682  if (page->Contains(a)) {
2683  return page;
2684  }
2685  }
2686  return NULL;
2687 }
2688 
2689 
2691  LargePage* previous = NULL;
2692  LargePage* current = first_page_;
2693  while (current != NULL) {
2694  HeapObject* object = current->GetObject();
2695  // Can this large page contain pointers to non-trivial objects. No other
2696  // pointer object is this big.
2697  bool is_pointer_object = object->IsFixedArray();
2698  MarkBit mark_bit = Marking::MarkBitFrom(object);
2699  if (mark_bit.Get()) {
2700  mark_bit.Clear();
2701  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2702  previous = current;
2703  current = current->next_page();
2704  } else {
2705  LargePage* page = current;
2706  // Cut the chunk out from the chunk list.
2707  current = current->next_page();
2708  if (previous == NULL) {
2709  first_page_ = current;
2710  } else {
2711  previous->set_next_page(current);
2712  }
2713 
2714  // Free the chunk.
2716  object, heap()->isolate());
2717  size_ -= static_cast<int>(page->size());
2718  objects_size_ -= object->Size();
2719  page_count_--;
2720 
2721  // Remove entries belonging to this page.
2722  // Use variable alignment to help pass length check (<= 80 characters)
2723  // of single line in tools/presubmit.py.
2724  const intptr_t alignment = MemoryChunk::kAlignment;
2725  uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
2726  uintptr_t limit = base + (page->size()-1)/alignment;
2727  for (uintptr_t key = base; key <= limit; key++) {
2728  chunk_map_.Remove(reinterpret_cast<void*>(key),
2729  static_cast<uint32_t>(key));
2730  }
2731 
2732  if (is_pointer_object) {
2733  heap()->QueueMemoryChunkForFree(page);
2734  } else {
2735  heap()->isolate()->memory_allocator()->Free(page);
2736  }
2737  }
2738  }
2739  heap()->FreeQueuedChunks();
2740 }
2741 
2742 
2744  Address address = object->address();
2745  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2746 
2747  bool owned = (chunk->owner() == this);
2748 
2749  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2750 
2751  return owned;
2752 }
2753 
2754 
2755 #ifdef DEBUG
2756 // We do not assume that the large object iterator works, because it depends
2757 // on the invariants we are checking during verification.
2758 void LargeObjectSpace::Verify() {
2759  for (LargePage* chunk = first_page_;
2760  chunk != NULL;
2761  chunk = chunk->next_page()) {
2762  // Each chunk contains an object that starts at the large object page's
2763  // object area start.
2764  HeapObject* object = chunk->GetObject();
2765  Page* page = Page::FromAddress(object->address());
2766  ASSERT(object->address() == page->area_start());
2767 
2768  // The first word should be a map, and we expect all map pointers to be
2769  // in map space.
2770  Map* map = object->map();
2771  ASSERT(map->IsMap());
2772  ASSERT(heap()->map_space()->Contains(map));
2773 
2774  // We have only code, sequential strings, external strings
2775  // (sequential strings that have been morphed into external
2776  // strings), fixed arrays, and byte arrays in large object space.
2777  ASSERT(object->IsCode() || object->IsSeqString() ||
2778  object->IsExternalString() || object->IsFixedArray() ||
2779  object->IsFixedDoubleArray() || object->IsByteArray());
2780 
2781  // The object itself should look OK.
2782  object->Verify();
2783 
2784  // Byte arrays and strings don't have interior pointers.
2785  if (object->IsCode()) {
2786  VerifyPointersVisitor code_visitor;
2787  object->IterateBody(map->instance_type(),
2788  object->Size(),
2789  &code_visitor);
2790  } else if (object->IsFixedArray()) {
2791  FixedArray* array = FixedArray::cast(object);
2792  for (int j = 0; j < array->length(); j++) {
2793  Object* element = array->get(j);
2794  if (element->IsHeapObject()) {
2795  HeapObject* element_object = HeapObject::cast(element);
2796  ASSERT(heap()->Contains(element_object));
2797  ASSERT(element_object->map()->IsMap());
2798  }
2799  }
2800  }
2801  }
2802 }
2803 
2804 
2805 void LargeObjectSpace::Print() {
2806  LargeObjectIterator it(this);
2807  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2808  obj->Print();
2809  }
2810 }
2811 
2812 
2813 void LargeObjectSpace::ReportStatistics() {
2814  PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
2815  int num_objects = 0;
2816  ClearHistograms();
2817  LargeObjectIterator it(this);
2818  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2819  num_objects++;
2820  CollectHistogramInfo(obj);
2821  }
2822 
2823  PrintF(" number of objects %d, "
2824  "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
2825  if (num_objects > 0) ReportHistogram(false);
2826 }
2827 
2828 
2829 void LargeObjectSpace::CollectCodeStatistics() {
2830  Isolate* isolate = heap()->isolate();
2831  LargeObjectIterator obj_it(this);
2832  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2833  if (obj->IsCode()) {
2834  Code* code = Code::cast(obj);
2835  isolate->code_kind_statistics()[code->kind()] += code->Size();
2836  }
2837  }
2838 }
2839 
2840 
2841 void Page::Print() {
2842  // Make a best-effort to print the objects in the page.
2843  PrintF("Page@%p in %s\n",
2844  this->address(),
2845  AllocationSpaceName(this->owner()->identity()));
2846  printf(" --------------------------------------\n");
2847  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
2848  unsigned mark_size = 0;
2849  for (HeapObject* object = objects.Next();
2850  object != NULL;
2851  object = objects.Next()) {
2852  bool is_marked = Marking::MarkBitFrom(object).Get();
2853  PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
2854  if (is_marked) {
2855  mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
2856  }
2857  object->ShortPrint();
2858  PrintF("\n");
2859  }
2860  printf(" --------------------------------------\n");
2861  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2862 }
2863 
2864 #endif // DEBUG
2865 
2866 } } // namespace v8::internal
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:172
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2185
static const int kHeaderSize
Definition: objects.h:3685
virtual intptr_t Size()
Definition: spaces.h:2491
#define SLOW_ASSERT(condition)
Definition: checks.h:276
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:2689
void Allocate(int bytes)
Definition: spaces.h:1537
void Reset()
Definition: flags.cc:1446
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void set_next_page(Page *page)
Definition: spaces-inl.h:236
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:639
bool GrowTo(int new_capacity)
Definition: spaces.cc:1387
void SetNewLimit(Address limit)
Definition: heap.h:314
#define V8PRIxPTR
Definition: globals.h:204
intptr_t OldGenerationCapacityAvailable()
Definition: heap.h:1358
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:212
CodeRange * code_range()
Definition: isolate.h:810
intptr_t Available()
Definition: spaces.h:1486
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:299
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1822
bool Contains(Address addr)
Definition: spaces.h:365
friend class PageIterator
Definition: spaces.h:1679
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void SetTop(Address top, Address limit)
Definition: spaces.h:1530
bool was_swept_conservatively()
Definition: spaces.h:1574
void set_next(FreeListNode *next)
Definition: spaces.cc:1874
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:659
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2208
Isolate * isolate()
Definition: heap-inl.h:494
bool SetUp(const size_t requested_size)
Definition: spaces.cc:135
void ReleaseAllUnusedPages()
Definition: spaces.cc:953
static Smi * FromInt(int value)
Definition: objects-inl.h:973
virtual intptr_t Waste()
Definition: spaces.h:1503
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
MemoryChunk * next_chunk_
Definition: spaces.h:605
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1579
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2547
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:222
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:273
void TakeControl(VirtualMemory *from)
Definition: platform.h:391
void ResetAllocationInfo()
Definition: spaces.cc:1189
void SetNewSpacePageFlags(NewSpacePage *chunk)
Flag flags[]
Definition: flags.cc:1467
Address space_start()
Definition: spaces.h:1848
intptr_t SizeOfFirstPage()
Definition: spaces.cc:874
static const intptr_t kPageAlignmentMask
Definition: spaces.h:704
static FreeSpace * cast(Object *obj)
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
Definition: heap.h:1201
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
static Failure * Exception()
Definition: objects-inl.h:1016
PromotionQueue * promotion_queue()
Definition: heap.h:1095
const int kMaxInt
Definition: globals.h:224
AllocationAction
Definition: v8.h:2683
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2272
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:575
const char * AllocationSpaceName(AllocationSpace space)
#define ASSERT(condition)
Definition: checks.h:270
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:345
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:471
void ClearFlag(int flag)
Definition: spaces.h:421
#define ASSERT_GE(v1, v2)
Definition: checks.h:273
void Step(intptr_t allocated, CompletionAction action)
FreeListNode * next()
Definition: spaces.cc:1850
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:586
static const int kFlagsOffset
Definition: spaces.h:564
const char * comment() const
Definition: flags.cc:1362
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1804
NewSpacePage * current_page()
Definition: spaces.h:1912
#define CHECK(condition)
Definition: checks.h:56
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:264
#define STRING_TYPE_LIST(V)
Definition: objects.h:306
static const int kPageSize
Definition: spaces.h:695
CodeRange(Isolate *isolate)
Definition: spaces.cc:126
static Code * cast(Object *obj)
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:356
const intptr_t kHeapObjectTagMask
Definition: v8.h:3850
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
static bool IsAtEnd(Address addr)
Definition: spaces.h:1765
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2576
static MarkBit MarkBitFrom(Address addr)
AllocationStats accounting_stats_
Definition: spaces.h:1639
void Free(MemoryChunk *chunk)
Definition: spaces.cc:595
void set_size(int value)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:342
uint8_t byte
Definition: globals.h:171
Executability executable()
Definition: spaces.h:769
NewSpacePage * first_page()
Definition: spaces.h:1911
SlotsBuffer * slots_buffer_
Definition: spaces.h:626
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:679
#define UNREACHABLE()
Definition: checks.h:50
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:245
LargePage * FindPage(Address a)
Definition: spaces.cc:2673
FreeListNode ** next_address()
Definition: spaces.cc:1863
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2274
bool Contains(Address a)
Definition: spaces.h:2097
bool Contains(Address a)
Definition: spaces-inl.h:178
#define OFFSET_OF(type, field)
Definition: globals.h:287
void SetFlag(int flag)
Definition: spaces.h:417
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1774
friend class NewSpacePageIterator
Definition: spaces.h:1968
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:632
static Failure * RetryAfterGC()
Definition: objects-inl.h:1032
static int CodePageAreaStartOffset()
Definition: spaces.cc:715
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2304
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:781
const int kPointerSize
Definition: globals.h:234
MemoryAllocator * memory_allocator()
Definition: isolate.h:830
bool IsFlagSet(int flag)
Definition: spaces.h:433
static Address & Address_at(Address addr)
Definition: v8memory.h:71
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:7093
static int CodePageGuardSize()
Definition: spaces.cc:710
const Address kZapValue
Definition: v8globals.h:89
const int kHeapObjectTag
Definition: v8.h:3848
bool IsAligned(T value, U alignment)
Definition: utils.h:206
T Remove(int i)
Definition: list-inl.h:116
void InitializeReservedMemory()
Definition: spaces.h:341
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1088
intptr_t Available()
Definition: spaces.h:2137
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2242
bool Commit(void *address, size_t size, bool is_executable)
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1600
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:130
bool WasSweptPrecisely()
Definition: spaces.h:715
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:351
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
size_t size() const
Definition: spaces.h:504
SemiSpaceId id()
Definition: spaces.h:1935
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1812
bool contains(Address address)
Definition: spaces.h:830
#define V8_PTR_PREFIX
Definition: globals.h:196
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
Definition: spaces.cc:211
bool IsPowerOf2(T x)
Definition: utils.h:50
bool WasSwept()
Definition: spaces.h:717
static int CodePageAreaEndOffset()
Definition: spaces.cc:722
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1519
HeapObject * GetObject()
Definition: spaces.h:738
#define CHECK_NE(unexpected, value)
Definition: checks.h:223
void ReleasePage(Page *page)
Definition: spaces.cc:917
MemoryChunk * prev_chunk_
Definition: spaces.h:606
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1146
void FreeQueuedChunks()
Definition: heap.cc:7099
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
MaybeObject * FindObject(Address a)
Definition: spaces.cc:2664
static const int kObjectStartOffset
Definition: spaces.h:501
#define ISOLATE
Definition: isolate.h:1410
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:1750
bool Contains(HeapObject *obj)
Definition: spaces.cc:2743
Space * owner() const
Definition: spaces.h:321
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1493
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:386
int InitialSemiSpaceSize()
Definition: heap.h:470
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1583
static const intptr_t kLiveBytesOffset
Definition: spaces.h:484
intptr_t Capacity()
Definition: spaces.h:2125
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1742
void Sort(int(*cmp)(const T *x, const T *y))
Definition: list-inl.h:198
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2348
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
Definition: spaces.cc:490
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:1784
Heap * heap() const
Definition: spaces.h:766
IncrementalMarking * incremental_marking()
Definition: heap.h:1524
Page * prev_page()
Definition: spaces-inl.h:230
void IncrementLiveBytes(int by)
Definition: spaces.h:456
#define SET_NAME(name)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1320
SemiSpace * semi_space()
Definition: spaces.h:1754
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2267
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1302
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:1819
NewSpacePage * next_page() const
Definition: spaces.h:1738
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
Definition: spaces.h:477
void set_owner(Space *space)
Definition: spaces.h:330
void * Remove(void *key, uint32_t hash)
Definition: hashmap.h:160
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:7147
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:764
#define HEAP
Definition: isolate.h:1408
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:646
LargePage * next_page() const
Definition: spaces.h:742
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1731
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:319
InstanceType instance_type()
Definition: objects-inl.h:2864
friend class LargeObjectIterator
Definition: spaces.h:2548
bool IsEvacuationCandidate()
Definition: spaces.h:566
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1163
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:775
T RoundDown(T x, intptr_t m)
Definition: utils.h:142
void USE(T)
Definition: globals.h:303
Counters * counters()
Definition: isolate.h:804
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:470
void set_next_page(LargePage *page)
Definition: spaces.h:746
Page * next_page()
Definition: spaces-inl.h:224
ObjectSpace
Definition: v8.h:2670
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:38
static size_t AllocateAlignment()
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2506
void set_age_mark(Address mark)
Definition: spaces.cc:1514
static int CodePageGuardStartOffset()
Definition: spaces.cc:703
NewSpacePage * prev_page() const
Definition: spaces.h:1746
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:3447
SkipList * skip_list()
Definition: spaces.h:572
intptr_t unswept_free_bytes_
Definition: spaces.h:1665
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:620
AllocationInfo allocation_info_
Definition: spaces.h:1648
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:245
Executability executable()
Definition: spaces.h:515
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1043
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:354
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:822
void DeleteArray(T *array)
Definition: allocation.h:91
static bool IsAtStart(Address addr)
Definition: spaces.h:1760
T Min(T a, T b)
Definition: utils.h:229
void OldSpaceStep(intptr_t allocated)
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1425
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2613
intptr_t OldGenerationSpaceAvailable()
Definition: heap.h:1354
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:440
SkipList * skip_list_
Definition: spaces.h:627
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
Definition: spaces.cc:729
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:318
static JSObject * cast(Object *obj)
VirtualMemory * reserved_memory()
Definition: spaces.h:337
FlagType type() const
Definition: flags.cc:1358
SlotsBuffer * slots_buffer()
Definition: spaces.h:580
OldSpace * old_data_space()
Definition: heap.h:501
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1923
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1512
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:668
AllocationSpace identity()
Definition: spaces.h:772
void Free()
Definition: list.h:64
bool Uncommit(void *address, size_t size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2319