v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "macro-assembler.h"
31 #include "mark-compact.h"
32 #include "msan.h"
33 #include "platform.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 // ----------------------------------------------------------------------------
40 // HeapObjectIterator
41 
43  // You can't actually iterate over the anchor page. It is not a real page,
44  // just an anchor for the double linked page list. Initialize as if we have
45  // reached the end of the anchor page, then the first iteration will move on
46  // to the first page.
47  Initialize(space,
48  NULL,
49  NULL,
50  kAllPagesInSpace,
51  NULL);
52 }
53 
54 
56  HeapObjectCallback size_func) {
57  // You can't actually iterate over the anchor page. It is not a real page,
58  // just an anchor for the double linked page list. Initialize the current
59  // address and end as NULL, then the first iteration will move on
60  // to the first page.
61  Initialize(space,
62  NULL,
63  NULL,
64  kAllPagesInSpace,
65  size_func);
66 }
67 
68 
70  HeapObjectCallback size_func) {
71  Space* owner = page->owner();
72  ASSERT(owner == page->heap()->old_pointer_space() ||
73  owner == page->heap()->old_data_space() ||
74  owner == page->heap()->map_space() ||
75  owner == page->heap()->cell_space() ||
76  owner == page->heap()->property_cell_space() ||
77  owner == page->heap()->code_space());
78  Initialize(reinterpret_cast<PagedSpace*>(owner),
79  page->area_start(),
80  page->area_end(),
81  kOnePageOnly,
82  size_func);
83  ASSERT(page->WasSweptPrecisely());
84 }
85 
86 
87 void HeapObjectIterator::Initialize(PagedSpace* space,
88  Address cur, Address end,
89  HeapObjectIterator::PageMode mode,
90  HeapObjectCallback size_f) {
91  // Check that we actually can iterate this space.
93 
94  space_ = space;
95  cur_addr_ = cur;
96  cur_end_ = end;
97  page_mode_ = mode;
98  size_func_ = size_f;
99 }
100 
101 
102 // We have hit the end of the page and should advance to the next block of
103 // objects. This happens at the end of the page.
104 bool HeapObjectIterator::AdvanceToNextPage() {
105  ASSERT(cur_addr_ == cur_end_);
106  if (page_mode_ == kOnePageOnly) return false;
107  Page* cur_page;
108  if (cur_addr_ == NULL) {
109  cur_page = space_->anchor();
110  } else {
111  cur_page = Page::FromAddress(cur_addr_ - 1);
112  ASSERT(cur_addr_ == cur_page->area_end());
113  }
114  cur_page = cur_page->next_page();
115  if (cur_page == space_->anchor()) return false;
116  cur_addr_ = cur_page->area_start();
117  cur_end_ = cur_page->area_end();
118  ASSERT(cur_page->WasSweptPrecisely());
119  return true;
120 }
121 
122 
123 // -----------------------------------------------------------------------------
124 // CodeRange
125 
126 
128  : isolate_(isolate),
129  code_range_(NULL),
130  free_list_(0),
131  allocation_list_(0),
132  current_allocation_block_index_(0) {
133 }
134 
135 
136 bool CodeRange::SetUp(const size_t requested) {
137  ASSERT(code_range_ == NULL);
138 
139  code_range_ = new VirtualMemory(requested);
140  CHECK(code_range_ != NULL);
141  if (!code_range_->IsReserved()) {
142  delete code_range_;
143  code_range_ = NULL;
144  return false;
145  }
146 
147  // We are sure that we have mapped a block of requested addresses.
148  ASSERT(code_range_->size() == requested);
149  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150  Address base = reinterpret_cast<Address>(code_range_->address());
151  Address aligned_base =
152  RoundUp(reinterpret_cast<Address>(code_range_->address()),
154  size_t size = code_range_->size() - (aligned_base - base);
155  allocation_list_.Add(FreeBlock(aligned_base, size));
156  current_allocation_block_index_ = 0;
157  return true;
158 }
159 
160 
161 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162  const FreeBlock* right) {
163  // The entire point of CodeRange is that the difference between two
164  // addresses in the range can be represented as a signed 32-bit int,
165  // so the cast is semantically correct.
166  return static_cast<int>(left->start - right->start);
167 }
168 
169 
170 void CodeRange::GetNextAllocationBlock(size_t requested) {
171  for (current_allocation_block_index_++;
172  current_allocation_block_index_ < allocation_list_.length();
173  current_allocation_block_index_++) {
174  if (requested <= allocation_list_[current_allocation_block_index_].size) {
175  return; // Found a large enough allocation block.
176  }
177  }
178 
179  // Sort and merge the free blocks on the free list and the allocation list.
180  free_list_.AddAll(allocation_list_);
181  allocation_list_.Clear();
182  free_list_.Sort(&CompareFreeBlockAddress);
183  for (int i = 0; i < free_list_.length();) {
184  FreeBlock merged = free_list_[i];
185  i++;
186  // Add adjacent free blocks to the current merged block.
187  while (i < free_list_.length() &&
188  free_list_[i].start == merged.start + merged.size) {
189  merged.size += free_list_[i].size;
190  i++;
191  }
192  if (merged.size > 0) {
193  allocation_list_.Add(merged);
194  }
195  }
196  free_list_.Clear();
197 
198  for (current_allocation_block_index_ = 0;
199  current_allocation_block_index_ < allocation_list_.length();
200  current_allocation_block_index_++) {
201  if (requested <= allocation_list_[current_allocation_block_index_].size) {
202  return; // Found a large enough allocation block.
203  }
204  }
205 
206  // Code range is full or too fragmented.
207  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
208 }
209 
210 
211 Address CodeRange::AllocateRawMemory(const size_t requested_size,
212  const size_t commit_size,
213  size_t* allocated) {
214  ASSERT(commit_size <= requested_size);
215  ASSERT(current_allocation_block_index_ < allocation_list_.length());
216  if (requested_size > allocation_list_[current_allocation_block_index_].size) {
217  // Find an allocation block large enough. This function call may
218  // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
219  GetNextAllocationBlock(requested_size);
220  }
221  // Commit the requested memory at the start of the current allocation block.
222  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
223  FreeBlock current = allocation_list_[current_allocation_block_index_];
224  if (aligned_requested >= (current.size - Page::kPageSize)) {
225  // Don't leave a small free block, useless for a large object or chunk.
226  *allocated = current.size;
227  } else {
228  *allocated = aligned_requested;
229  }
230  ASSERT(*allocated <= current.size);
232  if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
233  current.start,
234  commit_size,
235  *allocated)) {
236  *allocated = 0;
237  return NULL;
238  }
239  allocation_list_[current_allocation_block_index_].start += *allocated;
240  allocation_list_[current_allocation_block_index_].size -= *allocated;
241  if (*allocated == current.size) {
242  GetNextAllocationBlock(0); // This block is used up, get the next one.
243  }
244  return current.start;
245 }
246 
247 
248 bool CodeRange::CommitRawMemory(Address start, size_t length) {
249  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
250 }
251 
252 
253 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
254  return code_range_->Uncommit(start, length);
255 }
256 
257 
258 void CodeRange::FreeRawMemory(Address address, size_t length) {
260  free_list_.Add(FreeBlock(address, length));
261  code_range_->Uncommit(address, length);
262 }
263 
264 
266  delete code_range_; // Frees all memory in the virtual memory range.
267  code_range_ = NULL;
268  free_list_.Free();
269  allocation_list_.Free();
270 }
271 
272 
273 // -----------------------------------------------------------------------------
274 // MemoryAllocator
275 //
276 
278  : isolate_(isolate),
279  capacity_(0),
280  capacity_executable_(0),
281  size_(0),
282  size_executable_(0),
283  lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284  highest_ever_allocated_(reinterpret_cast<void*>(0)) {
285 }
286 
287 
288 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
289  capacity_ = RoundUp(capacity, Page::kPageSize);
290  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
291  ASSERT_GE(capacity_, capacity_executable_);
292 
293  size_ = 0;
294  size_executable_ = 0;
295 
296  return true;
297 }
298 
299 
301  // Check that spaces were torn down before MemoryAllocator.
302  ASSERT(size_ == 0);
303  // TODO(gc) this will be true again when we fix FreeMemory.
304  // ASSERT(size_executable_ == 0);
305  capacity_ = 0;
306  capacity_executable_ = 0;
307 }
308 
309 
311  size_t size,
312  Executability executable) {
313  if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
314  return false;
315  }
316  UpdateAllocatedSpaceLimits(base, base + size);
317  return true;
318 }
319 
320 
322  Executability executable) {
323  // TODO(gc) make code_range part of memory allocator?
324  ASSERT(reservation->IsReserved());
325  size_t size = reservation->size();
326  ASSERT(size_ >= size);
327  size_ -= size;
328 
329  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
330 
331  if (executable == EXECUTABLE) {
332  ASSERT(size_executable_ >= size);
333  size_executable_ -= size;
334  }
335  // Code which is part of the code-range does not have its own VirtualMemory.
336  ASSERT(!isolate_->code_range()->contains(
337  static_cast<Address>(reservation->address())));
338  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
339  reservation->Release();
340 }
341 
342 
344  size_t size,
345  Executability executable) {
346  // TODO(gc) make code_range part of memory allocator?
347  ASSERT(size_ >= size);
348  size_ -= size;
349 
350  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351 
352  if (executable == EXECUTABLE) {
353  ASSERT(size_executable_ >= size);
354  size_executable_ -= size;
355  }
356  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
357  ASSERT(executable == EXECUTABLE);
358  isolate_->code_range()->FreeRawMemory(base, size);
359  } else {
360  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
361  bool result = VirtualMemory::ReleaseRegion(base, size);
362  USE(result);
363  ASSERT(result);
364  }
365 }
366 
367 
369  size_t alignment,
370  VirtualMemory* controller) {
371  VirtualMemory reservation(size, alignment);
372 
373  if (!reservation.IsReserved()) return NULL;
374  size_ += reservation.size();
375  Address base = RoundUp(static_cast<Address>(reservation.address()),
376  alignment);
377  controller->TakeControl(&reservation);
378  return base;
379 }
380 
381 
383  size_t commit_size,
384  size_t alignment,
385  Executability executable,
386  VirtualMemory* controller) {
387  ASSERT(commit_size <= reserve_size);
388  VirtualMemory reservation;
389  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
390  if (base == NULL) return NULL;
391 
392  if (executable == EXECUTABLE) {
393  if (!CommitExecutableMemory(&reservation,
394  base,
395  commit_size,
396  reserve_size)) {
397  base = NULL;
398  }
399  } else {
400  if (reservation.Commit(base, commit_size, false)) {
401  UpdateAllocatedSpaceLimits(base, base + commit_size);
402  } else {
403  base = NULL;
404  }
405  }
406 
407  if (base == NULL) {
408  // Failed to commit the body. Release the mapping and any partially
409  // commited regions inside it.
410  reservation.Release();
411  return NULL;
412  }
413 
414  controller->TakeControl(&reservation);
415  return base;
416 }
417 
418 
420  set_owner(owner);
421  set_prev_page(this);
422  set_next_page(this);
423 }
424 
425 
426 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
427  Address start,
428  SemiSpace* semi_space) {
430  Address area_end = start + Page::kPageSize;
431 
432  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
433  start,
434  Page::kPageSize,
435  area_start,
436  area_end,
438  semi_space);
439  chunk->set_next_chunk(NULL);
440  chunk->set_prev_chunk(NULL);
441  chunk->initialize_scan_on_scavenge(true);
442  bool in_to_space = (semi_space->id() != kFromSpace);
443  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
445  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
447  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
449  return page;
450 }
451 
452 
453 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
454  set_owner(semi_space);
455  set_next_chunk(this);
456  set_prev_chunk(this);
457  // Flags marks this invalid page as not being in new-space.
458  // All real new-space pages will be in new-space.
459  SetFlags(0, ~0);
460 }
461 
462 
464  Address base,
465  size_t size,
466  Address area_start,
467  Address area_end,
468  Executability executable,
469  Space* owner) {
470  MemoryChunk* chunk = FromAddress(base);
471 
472  ASSERT(base == chunk->address());
473 
474  chunk->heap_ = heap;
475  chunk->size_ = size;
476  chunk->area_start_ = area_start;
477  chunk->area_end_ = area_end;
478  chunk->flags_ = 0;
479  chunk->set_owner(owner);
480  chunk->InitializeReservedMemory();
481  chunk->slots_buffer_ = NULL;
482  chunk->skip_list_ = NULL;
484  chunk->progress_bar_ = 0;
485  chunk->high_water_mark_ = static_cast<int>(area_start - base);
490  chunk->available_in_huge_free_list_ = 0;
491  chunk->non_available_small_blocks_ = 0;
492  chunk->ResetLiveBytes();
493  Bitmap::Clear(chunk);
494  chunk->initialize_scan_on_scavenge(false);
496 
499 
500  if (executable == EXECUTABLE) {
501  chunk->SetFlag(IS_EXECUTABLE);
502  }
503 
504  if (owner == heap->old_data_space()) {
505  chunk->SetFlag(CONTAINS_ONLY_DATA);
506  }
507 
508  return chunk;
509 }
510 
511 
512 // Commit MemoryChunk area to the requested size.
513 bool MemoryChunk::CommitArea(size_t requested) {
514  size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
516  size_t header_size = area_start() - address() - guard_size;
517  size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
518  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
520 
521  if (commit_size > committed_size) {
522  // Commit size should be less or equal than the reserved size.
523  ASSERT(commit_size <= size() - 2 * guard_size);
524  // Append the committed area.
525  Address start = address() + committed_size + guard_size;
526  size_t length = commit_size - committed_size;
527  if (reservation_.IsReserved()) {
530  if (!heap()->isolate()->memory_allocator()->CommitMemory(
531  start, length, executable)) {
532  return false;
533  }
534  } else {
535  CodeRange* code_range = heap_->isolate()->code_range();
536  ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
537  if (!code_range->CommitRawMemory(start, length)) return false;
538  }
539 
540  if (Heap::ShouldZapGarbage()) {
541  heap_->isolate()->memory_allocator()->ZapBlock(start, length);
542  }
543  } else if (commit_size < committed_size) {
544  ASSERT(commit_size > 0);
545  // Shrink the committed area.
546  size_t length = committed_size - commit_size;
547  Address start = address() + committed_size + guard_size - length;
548  if (reservation_.IsReserved()) {
549  if (!reservation_.Uncommit(start, length)) return false;
550  } else {
551  CodeRange* code_range = heap_->isolate()->code_range();
552  ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
553  if (!code_range->UncommitRawMemory(start, length)) return false;
554  }
555  }
556 
557  area_end_ = area_start_ + requested;
558  return true;
559 }
560 
561 
563  MemoryChunk* other_next = other->next_chunk();
564 
565  set_next_chunk(other_next);
566  set_prev_chunk(other);
567  other_next->set_prev_chunk(this);
568  other->set_next_chunk(this);
569 }
570 
571 
576  }
577  MemoryChunk* next_element = next_chunk();
578  MemoryChunk* prev_element = prev_chunk();
579  next_element->set_prev_chunk(prev_element);
580  prev_element->set_next_chunk(next_element);
583 }
584 
585 
586 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
587  intptr_t commit_area_size,
588  Executability executable,
589  Space* owner) {
590  ASSERT(commit_area_size <= reserve_area_size);
591 
592  size_t chunk_size;
593  Heap* heap = isolate_->heap();
594  Address base = NULL;
595  VirtualMemory reservation;
596  Address area_start = NULL;
597  Address area_end = NULL;
598 
599  //
600  // MemoryChunk layout:
601  //
602  // Executable
603  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
604  // | Header |
605  // +----------------------------+<- base + CodePageGuardStartOffset
606  // | Guard |
607  // +----------------------------+<- area_start_
608  // | Area |
609  // +----------------------------+<- area_end_ (area_start + commit_area_size)
610  // | Committed but not used |
611  // +----------------------------+<- aligned at OS page boundary
612  // | Reserved but not committed |
613  // +----------------------------+<- aligned at OS page boundary
614  // | Guard |
615  // +----------------------------+<- base + chunk_size
616  //
617  // Non-executable
618  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
619  // | Header |
620  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
621  // | Area |
622  // +----------------------------+<- area_end_ (area_start + commit_area_size)
623  // | Committed but not used |
624  // +----------------------------+<- aligned at OS page boundary
625  // | Reserved but not committed |
626  // +----------------------------+<- base + chunk_size
627  //
628 
629  if (executable == EXECUTABLE) {
630  chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
632 
633  // Check executable memory limit.
634  if (size_executable_ + chunk_size > capacity_executable_) {
635  LOG(isolate_,
636  StringEvent("MemoryAllocator::AllocateRawMemory",
637  "V8 Executable Allocation capacity exceeded"));
638  return NULL;
639  }
640 
641  // Size of header (not executable) plus area (executable).
642  size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
644  // Allocate executable memory either from code range or from the
645  // OS.
646  if (isolate_->code_range()->exists()) {
647  base = isolate_->code_range()->AllocateRawMemory(chunk_size,
648  commit_size,
649  &chunk_size);
650  ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
652  if (base == NULL) return NULL;
653  size_ += chunk_size;
654  // Update executable memory size.
655  size_executable_ += chunk_size;
656  } else {
657  base = AllocateAlignedMemory(chunk_size,
658  commit_size,
660  executable,
661  &reservation);
662  if (base == NULL) return NULL;
663  // Update executable memory size.
664  size_executable_ += reservation.size();
665  }
666 
667  if (Heap::ShouldZapGarbage()) {
669  ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
670  }
671 
672  area_start = base + CodePageAreaStartOffset();
673  area_end = area_start + commit_area_size;
674  } else {
675  chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
677  size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
678  commit_area_size, OS::CommitPageSize());
679  base = AllocateAlignedMemory(chunk_size,
680  commit_size,
682  executable,
683  &reservation);
684 
685  if (base == NULL) return NULL;
686 
687  if (Heap::ShouldZapGarbage()) {
688  ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
689  }
690 
691  area_start = base + Page::kObjectStartOffset;
692  area_end = area_start + commit_area_size;
693  }
694 
695  // Use chunk_size for statistics and callbacks because we assume that they
696  // treat reserved but not-yet committed memory regions of chunks as allocated.
697  isolate_->counters()->memory_allocated()->
698  Increment(static_cast<int>(chunk_size));
699 
700  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
701  if (owner != NULL) {
702  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
704  }
705 
706  MemoryChunk* result = MemoryChunk::Initialize(heap,
707  base,
708  chunk_size,
709  area_start,
710  area_end,
711  executable,
712  owner);
713  result->set_reserved_memory(&reservation);
714  MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
715  return result;
716 }
717 
718 
725 }
726 
727 
729  PagedSpace* owner,
730  Executability executable) {
731  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
732 
733  if (chunk == NULL) return NULL;
734 
735  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
736 }
737 
738 
740  Space* owner,
741  Executability executable) {
742  MemoryChunk* chunk = AllocateChunk(object_size,
743  object_size,
744  executable,
745  owner);
746  if (chunk == NULL) return NULL;
747  return LargePage::Initialize(isolate_->heap(), chunk);
748 }
749 
750 
752  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
753  if (chunk->owner() != NULL) {
754  ObjectSpace space =
755  static_cast<ObjectSpace>(1 << chunk->owner()->identity());
757  }
758 
759  isolate_->heap()->RememberUnmappedPage(
760  reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
761 
762  delete chunk->slots_buffer();
763  delete chunk->skip_list();
764 
765  VirtualMemory* reservation = chunk->reserved_memory();
766  if (reservation->IsReserved()) {
767  FreeMemory(reservation, chunk->executable());
768  } else {
769  FreeMemory(chunk->address(),
770  chunk->size(),
771  chunk->executable());
772  }
773 }
774 
775 
777  size_t size,
778  Executability executable) {
779  if (!CommitMemory(start, size, executable)) return false;
780 
781  if (Heap::ShouldZapGarbage()) {
782  ZapBlock(start, size);
783  }
784 
785  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
786  return true;
787 }
788 
789 
791  if (!VirtualMemory::UncommitRegion(start, size)) return false;
792  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
793  return true;
794 }
795 
796 
798  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
799  Memory::Address_at(start + s) = kZapValue;
800  }
801 }
802 
803 
805  AllocationAction action,
806  size_t size) {
807  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
808  MemoryAllocationCallbackRegistration registration =
809  memory_allocation_callbacks_[i];
810  if ((registration.space & space) == space &&
811  (registration.action & action) == action)
812  registration.callback(space, action, static_cast<int>(size));
813  }
814 }
815 
816 
818  MemoryAllocationCallback callback) {
819  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
820  if (memory_allocation_callbacks_[i].callback == callback) return true;
821  }
822  return false;
823 }
824 
825 
827  MemoryAllocationCallback callback,
828  ObjectSpace space,
829  AllocationAction action) {
830  ASSERT(callback != NULL);
831  MemoryAllocationCallbackRegistration registration(callback, space, action);
833  return memory_allocation_callbacks_.Add(registration);
834 }
835 
836 
838  MemoryAllocationCallback callback) {
839  ASSERT(callback != NULL);
840  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
841  if (memory_allocation_callbacks_[i].callback == callback) {
842  memory_allocation_callbacks_.Remove(i);
843  return;
844  }
845  }
846  UNREACHABLE();
847 }
848 
849 
850 #ifdef DEBUG
851 void MemoryAllocator::ReportStatistics() {
852  float pct = static_cast<float>(capacity_ - size_) / capacity_;
853  PrintF(" capacity: %" V8_PTR_PREFIX "d"
854  ", used: %" V8_PTR_PREFIX "d"
855  ", available: %%%d\n\n",
856  capacity_, size_, static_cast<int>(pct*100));
857 }
858 #endif
859 
860 
862  // We are guarding code pages: the first OS page after the header
863  // will be protected as non-writable.
865 }
866 
867 
869  return static_cast<int>(OS::CommitPageSize());
870 }
871 
872 
874  // We are guarding code pages: the first OS page after the header
875  // will be protected as non-writable.
877 }
878 
879 
881  // We are guarding code pages: the last OS page will be protected as
882  // non-writable.
883  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
884 }
885 
886 
888  Address start,
889  size_t commit_size,
890  size_t reserved_size) {
891  // Commit page header (not executable).
892  if (!vm->Commit(start,
894  false)) {
895  return false;
896  }
897 
898  // Create guard page after the header.
899  if (!vm->Guard(start + CodePageGuardStartOffset())) {
900  return false;
901  }
902 
903  // Commit page body (executable).
904  if (!vm->Commit(start + CodePageAreaStartOffset(),
905  commit_size - CodePageGuardStartOffset(),
906  true)) {
907  return false;
908  }
909 
910  // Create guard page before the end.
911  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
912  return false;
913  }
914 
915  UpdateAllocatedSpaceLimits(start,
916  start + CodePageAreaStartOffset() +
917  commit_size - CodePageGuardStartOffset());
918  return true;
919 }
920 
921 
922 // -----------------------------------------------------------------------------
923 // MemoryChunk implementation
924 
926  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
927  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
928  static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
929  }
930  chunk->IncrementLiveBytes(by);
931 }
932 
933 
934 // -----------------------------------------------------------------------------
935 // PagedSpace implementation
936 
938  intptr_t max_capacity,
939  AllocationSpace id,
940  Executability executable)
941  : Space(heap, id, executable),
942  free_list_(this),
943  was_swept_conservatively_(false),
944  first_unswept_page_(Page::FromAddress(NULL)),
945  unswept_free_bytes_(0) {
946  if (id == CODE_SPACE) {
947  area_size_ = heap->isolate()->memory_allocator()->
948  CodePageAreaSize();
949  } else {
950  area_size_ = Page::kPageSize - Page::kObjectStartOffset;
951  }
952  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
953  * AreaSize();
954  accounting_stats_.Clear();
955 
956  allocation_info_.set_top(NULL);
957  allocation_info_.set_limit(NULL);
958 
960 }
961 
962 
964  return true;
965 }
966 
967 
969  return true;
970 }
971 
972 
974  PageIterator iterator(this);
975  while (iterator.has_next()) {
976  heap()->isolate()->memory_allocator()->Free(iterator.next());
977  }
980  accounting_stats_.Clear();
981 }
982 
983 
987  size_t size = 0;
988  PageIterator it(this);
989  while (it.has_next()) {
990  size += it.next()->CommittedPhysicalMemory();
991  }
992  return size;
993 }
994 
995 
996 MaybeObject* PagedSpace::FindObject(Address addr) {
997  // Note: this function can only be called on precisely swept spaces.
998  ASSERT(!heap()->mark_compact_collector()->in_use());
999 
1000  if (!Contains(addr)) return Failure::Exception();
1001 
1002  Page* p = Page::FromAddress(addr);
1003  HeapObjectIterator it(p, NULL);
1004  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1005  Address cur = obj->address();
1006  Address next = cur + obj->Size();
1007  if ((cur <= addr) && (addr < next)) return obj;
1008  }
1009 
1010  UNREACHABLE();
1011  return Failure::Exception();
1012 }
1013 
1014 
1016  ASSERT(max_capacity_ % AreaSize() == 0);
1017 
1018  if (Capacity() == max_capacity_) return false;
1019 
1021 
1022  // Are we going to exceed capacity for this space?
1023  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1024 
1025  return true;
1026 }
1027 
1028 
1030  if (!CanExpand()) return false;
1031 
1032  intptr_t size = AreaSize();
1033 
1034  if (anchor_.next_page() == &anchor_) {
1035  size = SizeOfFirstPage();
1036  }
1037 
1039  size, this, executable());
1040  if (p == NULL) return false;
1041 
1043 
1045 
1046  return true;
1047 }
1048 
1049 
1051  int size = 0;
1052  switch (identity()) {
1053  case OLD_POINTER_SPACE:
1054  size = 72 * kPointerSize * KB;
1055  break;
1056  case OLD_DATA_SPACE:
1057  size = 192 * KB;
1058  break;
1059  case MAP_SPACE:
1060  size = 16 * kPointerSize * KB;
1061  break;
1062  case CELL_SPACE:
1063  size = 16 * kPointerSize * KB;
1064  break;
1065  case PROPERTY_CELL_SPACE:
1066  size = 8 * kPointerSize * KB;
1067  break;
1068  case CODE_SPACE:
1069  if (heap()->isolate()->code_range()->exists()) {
1070  // When code range exists, code pages are allocated in a special way
1071  // (from the reserved code range). That part of the code is not yet
1072  // upgraded to handle small pages.
1073  size = AreaSize();
1074  } else {
1075  size = 480 * KB;
1076  }
1077  break;
1078  default:
1079  UNREACHABLE();
1080  }
1081  return Min(size, AreaSize());
1082 }
1083 
1084 
1086  PageIterator it(this);
1087  int count = 0;
1088  while (it.has_next()) {
1089  it.next();
1090  count++;
1091  }
1092  return count;
1093 }
1094 
1095 
1097  sizes->huge_size_ = page->available_in_huge_free_list();
1098  sizes->small_size_ = page->available_in_small_free_list();
1099  sizes->medium_size_ = page->available_in_medium_free_list();
1100  sizes->large_size_ = page->available_in_large_free_list();
1101 }
1102 
1103 
1105  PageIterator page_iterator(this);
1106  while (page_iterator.has_next()) {
1107  Page* page = page_iterator.next();
1108  page->ResetFreeListStatistics();
1109  }
1110 }
1111 
1112 
1114  accounting_stats_.ExpandSpace(size);
1115 }
1116 
1117 
1118 void PagedSpace::ReleasePage(Page* page, bool unlink) {
1119  ASSERT(page->LiveBytes() == 0);
1120  ASSERT(AreaSize() == page->area_size());
1121 
1122  // Adjust list of unswept pages if the page is the head of the list.
1123  if (first_unswept_page_ == page) {
1124  first_unswept_page_ = page->next_page();
1125  if (first_unswept_page_ == anchor()) {
1127  }
1128  }
1129 
1130  if (page->WasSwept()) {
1131  intptr_t size = free_list_.EvictFreeListItems(page);
1132  accounting_stats_.AllocateBytes(size);
1133  ASSERT_EQ(AreaSize(), static_cast<int>(size));
1134  } else {
1136  }
1137 
1138  // TODO(hpayer): This check is just used for debugging purpose and
1139  // should be removed or turned into an assert after investigating the
1140  // crash in concurrent sweeping.
1142 
1143  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1144  allocation_info_.set_top(NULL);
1145  allocation_info_.set_limit(NULL);
1146  }
1147 
1148  if (unlink) {
1149  page->Unlink();
1150  }
1152  heap()->isolate()->memory_allocator()->Free(page);
1153  } else {
1154  heap()->QueueMemoryChunkForFree(page);
1155  }
1156 
1157  ASSERT(Capacity() > 0);
1158  accounting_stats_.ShrinkSpace(AreaSize());
1159 }
1160 
1161 
1162 #ifdef DEBUG
1163 void PagedSpace::Print() { }
1164 #endif
1165 
1166 #ifdef VERIFY_HEAP
1167 void PagedSpace::Verify(ObjectVisitor* visitor) {
1168  // We can only iterate over the pages if they were swept precisely.
1169  if (was_swept_conservatively_) return;
1170 
1171  bool allocation_pointer_found_in_space =
1172  (allocation_info_.top() == allocation_info_.limit());
1173  PageIterator page_iterator(this);
1174  while (page_iterator.has_next()) {
1175  Page* page = page_iterator.next();
1176  CHECK(page->owner() == this);
1177  if (page == Page::FromAllocationTop(allocation_info_.top())) {
1178  allocation_pointer_found_in_space = true;
1179  }
1180  CHECK(page->WasSweptPrecisely());
1181  HeapObjectIterator it(page, NULL);
1182  Address end_of_previous_object = page->area_start();
1183  Address top = page->area_end();
1184  int black_size = 0;
1185  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1186  CHECK(end_of_previous_object <= object->address());
1187 
1188  // The first word should be a map, and we expect all map pointers to
1189  // be in map space.
1190  Map* map = object->map();
1191  CHECK(map->IsMap());
1192  CHECK(heap()->map_space()->Contains(map));
1193 
1194  // Perform space-specific object verification.
1195  VerifyObject(object);
1196 
1197  // The object itself should look OK.
1198  object->Verify();
1199 
1200  // All the interior pointers should be contained in the heap.
1201  int size = object->Size();
1202  object->IterateBody(map->instance_type(), size, visitor);
1203  if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1204  black_size += size;
1205  }
1206 
1207  CHECK(object->address() + size <= top);
1208  end_of_previous_object = object->address() + size;
1209  }
1210  CHECK_LE(black_size, page->LiveBytes());
1211  }
1212  CHECK(allocation_pointer_found_in_space);
1213 }
1214 #endif // VERIFY_HEAP
1215 
1216 // -----------------------------------------------------------------------------
1217 // NewSpace implementation
1218 
1219 
1220 bool NewSpace::SetUp(int reserved_semispace_capacity,
1221  int maximum_semispace_capacity) {
1222  // Set up new space based on the preallocated memory block defined by
1223  // start and size. The provided space is divided into two semi-spaces.
1224  // To support fast containment testing in the new space, the size of
1225  // this chunk must be a power of two and it must be aligned to its size.
1226  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1227 
1228  size_t size = 2 * reserved_semispace_capacity;
1229  Address base =
1231  size, size, &reservation_);
1232  if (base == NULL) return false;
1233 
1234  chunk_base_ = base;
1235  chunk_size_ = static_cast<uintptr_t>(size);
1236  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1237 
1238  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1239  ASSERT(IsPowerOf2(maximum_semispace_capacity));
1240 
1241  // Allocate and set up the histogram arrays if necessary.
1242  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1243  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1244 
1245 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1246  promoted_histogram_[name].set_name(#name);
1248 #undef SET_NAME
1249 
1250  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1251  ASSERT(static_cast<intptr_t>(chunk_size_) >=
1252  2 * heap()->ReservedSemiSpaceSize());
1253  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1254 
1255  to_space_.SetUp(chunk_base_,
1256  initial_semispace_capacity,
1257  maximum_semispace_capacity);
1258  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1259  initial_semispace_capacity,
1260  maximum_semispace_capacity);
1261  if (!to_space_.Commit()) {
1262  return false;
1263  }
1264  ASSERT(!from_space_.is_committed()); // No need to use memory yet.
1265 
1266  start_ = chunk_base_;
1267  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1268  object_mask_ = address_mask_ | kHeapObjectTagMask;
1269  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1270 
1272 
1273  return true;
1274 }
1275 
1276 
1278  if (allocated_histogram_) {
1279  DeleteArray(allocated_histogram_);
1280  allocated_histogram_ = NULL;
1281  }
1282  if (promoted_histogram_) {
1283  DeleteArray(promoted_histogram_);
1284  promoted_histogram_ = NULL;
1285  }
1286 
1287  start_ = NULL;
1288  allocation_info_.set_top(NULL);
1289  allocation_info_.set_limit(NULL);
1290 
1291  to_space_.TearDown();
1292  from_space_.TearDown();
1293 
1294  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1295 
1296  ASSERT(reservation_.IsReserved());
1297  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1298  NOT_EXECUTABLE);
1299  chunk_base_ = NULL;
1300  chunk_size_ = 0;
1301 }
1302 
1303 
1305  SemiSpace::Swap(&from_space_, &to_space_);
1306 }
1307 
1308 
1310  // Double the semispace size but only up to maximum capacity.
1312  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1313  if (to_space_.GrowTo(new_capacity)) {
1314  // Only grow from space if we managed to grow to-space.
1315  if (!from_space_.GrowTo(new_capacity)) {
1316  // If we managed to grow to-space but couldn't grow from-space,
1317  // attempt to shrink to-space.
1318  if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1319  // We are in an inconsistent state because we could not
1320  // commit/uncommit memory from new space.
1321  V8::FatalProcessOutOfMemory("Failed to grow new space.");
1322  }
1323  }
1324  }
1325  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1326 }
1327 
1328 
1330  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1331  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1332  if (rounded_new_capacity < Capacity() &&
1333  to_space_.ShrinkTo(rounded_new_capacity)) {
1334  // Only shrink from-space if we managed to shrink to-space.
1335  from_space_.Reset();
1336  if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1337  // If we managed to shrink to-space but couldn't shrink from
1338  // space, attempt to grow to-space again.
1339  if (!to_space_.GrowTo(from_space_.Capacity())) {
1340  // We are in an inconsistent state because we could not
1341  // commit/uncommit memory from new space.
1342  V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1343  }
1344  }
1345  }
1346  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347 }
1348 
1349 
1350 void NewSpace::UpdateAllocationInfo() {
1351  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1352  allocation_info_.set_top(to_space_.page_low());
1353  allocation_info_.set_limit(to_space_.page_high());
1355  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1356 }
1357 
1358 
1360  to_space_.Reset();
1361  UpdateAllocationInfo();
1362  pages_used_ = 0;
1363  // Clear all mark-bits in the to-space.
1364  NewSpacePageIterator it(&to_space_);
1365  while (it.has_next()) {
1366  Bitmap::Clear(it.next());
1367  }
1368 }
1369 
1370 
1371 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1372  if (heap()->inline_allocation_disabled()) {
1373  // Lowest limit when linear allocation was disabled.
1374  Address high = to_space_.page_high();
1375  Address new_top = allocation_info_.top() + size_in_bytes;
1376  allocation_info_.set_limit(Min(new_top, high));
1377  } else if (inline_allocation_limit_step() == 0) {
1378  // Normal limit is the end of the current page.
1379  allocation_info_.set_limit(to_space_.page_high());
1380  } else {
1381  // Lower limit during incremental marking.
1382  Address high = to_space_.page_high();
1383  Address new_top = allocation_info_.top() + size_in_bytes;
1384  Address new_limit = new_top + inline_allocation_limit_step_;
1385  allocation_info_.set_limit(Min(new_limit, high));
1386  }
1387  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1388 }
1389 
1390 
1392  Address top = allocation_info_.top();
1393  if (NewSpacePage::IsAtStart(top)) {
1394  // The current page is already empty. Don't try to make another.
1395 
1396  // We should only get here if someone asks to allocate more
1397  // than what can be stored in a single page.
1398  // TODO(gc): Change the limit on new-space allocation to prevent this
1399  // from happening (all such allocations should go directly to LOSpace).
1400  return false;
1401  }
1402  if (!to_space_.AdvancePage()) {
1403  // Failed to get a new page in to-space.
1404  return false;
1405  }
1406 
1407  // Clear remainder of current page.
1409  if (heap()->gc_state() == Heap::SCAVENGE) {
1410  heap()->promotion_queue()->SetNewLimit(limit);
1412  }
1413 
1414  int remaining_in_page = static_cast<int>(limit - top);
1415  heap()->CreateFillerObjectAt(top, remaining_in_page);
1416  pages_used_++;
1417  UpdateAllocationInfo();
1418 
1419  return true;
1420 }
1421 
1422 
1423 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1424  Address old_top = allocation_info_.top();
1425  Address high = to_space_.page_high();
1426  if (allocation_info_.limit() < high) {
1427  // Either the limit has been lowered because linear allocation was disabled
1428  // or because incremental marking wants to get a chance to do a step. Set
1429  // the new limit accordingly.
1430  Address new_top = old_top + size_in_bytes;
1431  int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1433  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1434  UpdateInlineAllocationLimit(size_in_bytes);
1435  top_on_previous_step_ = new_top;
1436  return AllocateRaw(size_in_bytes);
1437  } else if (AddFreshPage()) {
1438  // Switched to new page. Try allocating again.
1439  int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1441  bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1442  top_on_previous_step_ = to_space_.page_low();
1443  return AllocateRaw(size_in_bytes);
1444  } else {
1445  return Failure::RetryAfterGC();
1446  }
1447 }
1448 
1449 
1450 #ifdef VERIFY_HEAP
1451 // We do not use the SemiSpaceIterator because verification doesn't assume
1452 // that it works (it depends on the invariants we are checking).
1453 void NewSpace::Verify() {
1454  // The allocation pointer should be in the space or at the very end.
1455  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1456 
1457  // There should be objects packed in from the low address up to the
1458  // allocation pointer.
1459  Address current = to_space_.first_page()->area_start();
1460  CHECK_EQ(current, to_space_.space_start());
1461 
1462  while (current != top()) {
1463  if (!NewSpacePage::IsAtEnd(current)) {
1464  // The allocation pointer should not be in the middle of an object.
1465  CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1466  current < top());
1467 
1468  HeapObject* object = HeapObject::FromAddress(current);
1469 
1470  // The first word should be a map, and we expect all map pointers to
1471  // be in map space.
1472  Map* map = object->map();
1473  CHECK(map->IsMap());
1474  CHECK(heap()->map_space()->Contains(map));
1475 
1476  // The object should not be code or a map.
1477  CHECK(!object->IsMap());
1478  CHECK(!object->IsCode());
1479 
1480  // The object itself should look OK.
1481  object->Verify();
1482 
1483  // All the interior pointers should be contained in the heap.
1484  VerifyPointersVisitor visitor;
1485  int size = object->Size();
1486  object->IterateBody(map->instance_type(), size, &visitor);
1487 
1488  current += size;
1489  } else {
1490  // At end of page, switch to next page.
1491  NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1492  // Next page should be valid.
1493  CHECK(!page->is_anchor());
1494  current = page->area_start();
1495  }
1496  }
1497 
1498  // Check semi-spaces.
1499  CHECK_EQ(from_space_.id(), kFromSpace);
1500  CHECK_EQ(to_space_.id(), kToSpace);
1501  from_space_.Verify();
1502  to_space_.Verify();
1503 }
1504 #endif
1505 
1506 // -----------------------------------------------------------------------------
1507 // SemiSpace implementation
1508 
1510  int initial_capacity,
1511  int maximum_capacity) {
1512  // Creates a space in the young generation. The constructor does not
1513  // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1514  // memory of size 'capacity' when set up, and does not grow or shrink
1515  // otherwise. In the mark-compact collector, the memory region of the from
1516  // space is used as the marking stack. It requires contiguous memory
1517  // addresses.
1518  ASSERT(maximum_capacity >= Page::kPageSize);
1519  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1520  capacity_ = initial_capacity;
1521  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1522  maximum_committed_ = 0;
1523  committed_ = false;
1524  start_ = start;
1525  address_mask_ = ~(maximum_capacity - 1);
1526  object_mask_ = address_mask_ | kHeapObjectTagMask;
1527  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1528  age_mark_ = start_;
1529 }
1530 
1531 
1533  start_ = NULL;
1534  capacity_ = 0;
1535 }
1536 
1537 
1539  ASSERT(!is_committed());
1540  int pages = capacity_ / Page::kPageSize;
1541  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1542  capacity_,
1543  executable())) {
1544  return false;
1545  }
1546 
1547  NewSpacePage* current = anchor();
1548  for (int i = 0; i < pages; i++) {
1549  NewSpacePage* new_page =
1550  NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1551  new_page->InsertAfter(current);
1552  current = new_page;
1553  }
1554 
1555  SetCapacity(capacity_);
1556  committed_ = true;
1557  Reset();
1558  return true;
1559 }
1560 
1561 
1563  ASSERT(is_committed());
1564  Address start = start_ + maximum_capacity_ - capacity_;
1565  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1566  return false;
1567  }
1568  anchor()->set_next_page(anchor());
1569  anchor()->set_prev_page(anchor());
1570 
1571  committed_ = false;
1572  return true;
1573 }
1574 
1575 
1577  if (!is_committed()) return 0;
1578  size_t size = 0;
1579  NewSpacePageIterator it(this);
1580  while (it.has_next()) {
1581  size += it.next()->CommittedPhysicalMemory();
1582  }
1583  return size;
1584 }
1585 
1586 
1587 bool SemiSpace::GrowTo(int new_capacity) {
1588  if (!is_committed()) {
1589  if (!Commit()) return false;
1590  }
1591  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1592  ASSERT(new_capacity <= maximum_capacity_);
1593  ASSERT(new_capacity > capacity_);
1594  int pages_before = capacity_ / Page::kPageSize;
1595  int pages_after = new_capacity / Page::kPageSize;
1596 
1597  size_t delta = new_capacity - capacity_;
1598 
1600  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1601  start_ + capacity_, delta, executable())) {
1602  return false;
1603  }
1604  SetCapacity(new_capacity);
1605  NewSpacePage* last_page = anchor()->prev_page();
1606  ASSERT(last_page != anchor());
1607  for (int i = pages_before; i < pages_after; i++) {
1608  Address page_address = start_ + i * Page::kPageSize;
1609  NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1610  page_address,
1611  this);
1612  new_page->InsertAfter(last_page);
1613  Bitmap::Clear(new_page);
1614  // Duplicate the flags that was set on the old page.
1615  new_page->SetFlags(last_page->GetFlags(),
1617  last_page = new_page;
1618  }
1619  return true;
1620 }
1621 
1622 
1623 bool SemiSpace::ShrinkTo(int new_capacity) {
1624  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1625  ASSERT(new_capacity >= initial_capacity_);
1626  ASSERT(new_capacity < capacity_);
1627  if (is_committed()) {
1628  size_t delta = capacity_ - new_capacity;
1630 
1631  MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1632  if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1633  return false;
1634  }
1635 
1636  int pages_after = new_capacity / Page::kPageSize;
1637  NewSpacePage* new_last_page =
1638  NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1639  new_last_page->set_next_page(anchor());
1640  anchor()->set_prev_page(new_last_page);
1641  ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1642  }
1643 
1644  SetCapacity(new_capacity);
1645 
1646  return true;
1647 }
1648 
1649 
1650 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1651  anchor_.set_owner(this);
1652  // Fixup back-pointers to anchor. Address of anchor changes
1653  // when we swap.
1654  anchor_.prev_page()->set_next_page(&anchor_);
1655  anchor_.next_page()->set_prev_page(&anchor_);
1656 
1657  bool becomes_to_space = (id_ == kFromSpace);
1658  id_ = becomes_to_space ? kToSpace : kFromSpace;
1659  NewSpacePage* page = anchor_.next_page();
1660  while (page != &anchor_) {
1661  page->set_owner(this);
1662  page->SetFlags(flags, mask);
1663  if (becomes_to_space) {
1664  page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1665  page->SetFlag(MemoryChunk::IN_TO_SPACE);
1667  page->ResetLiveBytes();
1668  } else {
1669  page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1670  page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1671  }
1673  ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1674  page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1675  page = page->next_page();
1676  }
1677 }
1678 
1679 
1681  ASSERT(anchor_.next_page() != &anchor_);
1682  current_page_ = anchor_.next_page();
1683 }
1684 
1685 
1687  // We won't be swapping semispaces without data in them.
1688  ASSERT(from->anchor_.next_page() != &from->anchor_);
1689  ASSERT(to->anchor_.next_page() != &to->anchor_);
1690 
1691  // Swap bits.
1692  SemiSpace tmp = *from;
1693  *from = *to;
1694  *to = tmp;
1695 
1696  // Fixup back-pointers to the page list anchor now that its address
1697  // has changed.
1698  // Swap to/from-space bits on pages.
1699  // Copy GC flags from old active space (from-space) to new (to-space).
1700  intptr_t flags = from->current_page()->GetFlags();
1701  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1702 
1703  from->FlipPages(0, 0);
1704 }
1705 
1706 
1707 void SemiSpace::SetCapacity(int new_capacity) {
1708  capacity_ = new_capacity;
1709  if (capacity_ > maximum_committed_) {
1710  maximum_committed_ = capacity_;
1711  }
1712 }
1713 
1714 
1716  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1717  age_mark_ = mark;
1718  // Mark all pages up to the one containing mark.
1719  NewSpacePageIterator it(space_start(), mark);
1720  while (it.has_next()) {
1721  it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1722  }
1723 }
1724 
1725 
1726 #ifdef DEBUG
1727 void SemiSpace::Print() { }
1728 #endif
1729 
1730 #ifdef VERIFY_HEAP
1731 void SemiSpace::Verify() {
1732  bool is_from_space = (id_ == kFromSpace);
1733  NewSpacePage* page = anchor_.next_page();
1734  CHECK(anchor_.semi_space() == this);
1735  while (page != &anchor_) {
1736  CHECK(page->semi_space() == this);
1737  CHECK(page->InNewSpace());
1738  CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1739  : MemoryChunk::IN_TO_SPACE));
1740  CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1741  : MemoryChunk::IN_FROM_SPACE));
1743  if (!is_from_space) {
1744  // The pointers-from-here-are-interesting flag isn't updated dynamically
1745  // on from-space pages, so it might be out of sync with the marking state.
1746  if (page->heap()->incremental_marking()->IsMarking()) {
1748  } else {
1749  CHECK(!page->IsFlagSet(
1751  }
1752  // TODO(gc): Check that the live_bytes_count_ field matches the
1753  // black marking on the page (if we make it match in new-space).
1754  }
1755  CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1756  CHECK(page->prev_page()->next_page() == page);
1757  page = page->next_page();
1758  }
1759 }
1760 #endif
1761 
1762 #ifdef DEBUG
1763 void SemiSpace::AssertValidRange(Address start, Address end) {
1764  // Addresses belong to same semi-space
1765  NewSpacePage* page = NewSpacePage::FromLimit(start);
1766  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1767  SemiSpace* space = page->semi_space();
1768  CHECK_EQ(space, end_page->semi_space());
1769  // Start address is before end address, either on same page,
1770  // or end address is on a later page in the linked list of
1771  // semi-space pages.
1772  if (page == end_page) {
1773  CHECK(start <= end);
1774  } else {
1775  while (page != end_page) {
1776  page = page->next_page();
1777  CHECK_NE(page, space->anchor());
1778  }
1779  }
1780 }
1781 #endif
1782 
1783 
1784 // -----------------------------------------------------------------------------
1785 // SemiSpaceIterator implementation.
1787  Initialize(space->bottom(), space->top(), NULL);
1788 }
1789 
1790 
1792  HeapObjectCallback size_func) {
1793  Initialize(space->bottom(), space->top(), size_func);
1794 }
1795 
1796 
1798  Initialize(start, space->top(), NULL);
1799 }
1800 
1801 
1803  Initialize(from, to, NULL);
1804 }
1805 
1806 
1807 void SemiSpaceIterator::Initialize(Address start,
1808  Address end,
1809  HeapObjectCallback size_func) {
1810  SemiSpace::AssertValidRange(start, end);
1811  current_ = start;
1812  limit_ = end;
1813  size_func_ = size_func;
1814 }
1815 
1816 
1817 #ifdef DEBUG
1818 // heap_histograms is shared, always clear it before using it.
1819 static void ClearHistograms(Isolate* isolate) {
1820  // We reset the name each time, though it hasn't changed.
1821 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1822  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1823 #undef DEF_TYPE_NAME
1824 
1825 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1826  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1827 #undef CLEAR_HISTOGRAM
1828 
1829  isolate->js_spill_information()->Clear();
1830 }
1831 
1832 
1833 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1834  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1835  code_kind_statistics[i] = 0;
1836  }
1837 }
1838 
1839 
1840 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1841  PrintF("\n Code kind histograms: \n");
1842  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1843  if (code_kind_statistics[i] > 0) {
1844  PrintF(" %-20s: %10d bytes\n",
1845  Code::Kind2String(static_cast<Code::Kind>(i)),
1846  code_kind_statistics[i]);
1847  }
1848  }
1849  PrintF("\n");
1850 }
1851 
1852 
1853 static int CollectHistogramInfo(HeapObject* obj) {
1854  Isolate* isolate = obj->GetIsolate();
1855  InstanceType type = obj->map()->instance_type();
1856  ASSERT(0 <= type && type <= LAST_TYPE);
1857  ASSERT(isolate->heap_histograms()[type].name() != NULL);
1858  isolate->heap_histograms()[type].increment_number(1);
1859  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1860 
1861  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1862  JSObject::cast(obj)->IncrementSpillStatistics(
1863  isolate->js_spill_information());
1864  }
1865 
1866  return obj->Size();
1867 }
1868 
1869 
1870 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1871  PrintF("\n Object Histogram:\n");
1872  for (int i = 0; i <= LAST_TYPE; i++) {
1873  if (isolate->heap_histograms()[i].number() > 0) {
1874  PrintF(" %-34s%10d (%10d bytes)\n",
1875  isolate->heap_histograms()[i].name(),
1876  isolate->heap_histograms()[i].number(),
1877  isolate->heap_histograms()[i].bytes());
1878  }
1879  }
1880  PrintF("\n");
1881 
1882  // Summarize string types.
1883  int string_number = 0;
1884  int string_bytes = 0;
1885 #define INCREMENT(type, size, name, camel_name) \
1886  string_number += isolate->heap_histograms()[type].number(); \
1887  string_bytes += isolate->heap_histograms()[type].bytes();
1889 #undef INCREMENT
1890  if (string_number > 0) {
1891  PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1892  string_bytes);
1893  }
1894 
1895  if (FLAG_collect_heap_spill_statistics && print_spill) {
1896  isolate->js_spill_information()->Print();
1897  }
1898 }
1899 #endif // DEBUG
1900 
1901 
1902 // Support for statistics gathering for --heap-stats and --log-gc.
1904  for (int i = 0; i <= LAST_TYPE; i++) {
1905  allocated_histogram_[i].clear();
1906  promoted_histogram_[i].clear();
1907  }
1908 }
1909 
1910 
1911 // Because the copying collector does not touch garbage objects, we iterate
1912 // the new space before a collection to get a histogram of allocated objects.
1913 // This only happens when --log-gc flag is set.
1915  ClearHistograms();
1916  SemiSpaceIterator it(this);
1917  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1918  RecordAllocation(obj);
1919 }
1920 
1921 
1922 static void DoReportStatistics(Isolate* isolate,
1923  HistogramInfo* info, const char* description) {
1924  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1925  // Lump all the string types together.
1926  int string_number = 0;
1927  int string_bytes = 0;
1928 #define INCREMENT(type, size, name, camel_name) \
1929  string_number += info[type].number(); \
1930  string_bytes += info[type].bytes();
1932 #undef INCREMENT
1933  if (string_number > 0) {
1934  LOG(isolate,
1935  HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1936  }
1937 
1938  // Then do the other types.
1939  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1940  if (info[i].number() > 0) {
1941  LOG(isolate,
1942  HeapSampleItemEvent(info[i].name(), info[i].number(),
1943  info[i].bytes()));
1944  }
1945  }
1946  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1947 }
1948 
1949 
1951 #ifdef DEBUG
1952  if (FLAG_heap_stats) {
1953  float pct = static_cast<float>(Available()) / Capacity();
1954  PrintF(" capacity: %" V8_PTR_PREFIX "d"
1955  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1956  Capacity(), Available(), static_cast<int>(pct*100));
1957  PrintF("\n Object Histogram:\n");
1958  for (int i = 0; i <= LAST_TYPE; i++) {
1959  if (allocated_histogram_[i].number() > 0) {
1960  PrintF(" %-34s%10d (%10d bytes)\n",
1961  allocated_histogram_[i].name(),
1962  allocated_histogram_[i].number(),
1963  allocated_histogram_[i].bytes());
1964  }
1965  }
1966  PrintF("\n");
1967  }
1968 #endif // DEBUG
1969 
1970  if (FLAG_log_gc) {
1971  Isolate* isolate = heap()->isolate();
1972  DoReportStatistics(isolate, allocated_histogram_, "allocated");
1973  DoReportStatistics(isolate, promoted_histogram_, "promoted");
1974  }
1975 }
1976 
1977 
1979  InstanceType type = obj->map()->instance_type();
1980  ASSERT(0 <= type && type <= LAST_TYPE);
1981  allocated_histogram_[type].increment_number(1);
1982  allocated_histogram_[type].increment_bytes(obj->Size());
1983 }
1984 
1985 
1987  InstanceType type = obj->map()->instance_type();
1988  ASSERT(0 <= type && type <= LAST_TYPE);
1989  promoted_histogram_[type].increment_number(1);
1990  promoted_histogram_[type].increment_bytes(obj->Size());
1991 }
1992 
1993 
1996  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1997  size_t size = to_space_.CommittedPhysicalMemory();
1998  if (from_space_.is_committed()) {
1999  size += from_space_.CommittedPhysicalMemory();
2000  }
2001  return size;
2002 }
2003 
2004 
2005 // -----------------------------------------------------------------------------
2006 // Free lists for old object spaces implementation
2007 
2008 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
2009  ASSERT(size_in_bytes > 0);
2010  ASSERT(IsAligned(size_in_bytes, kPointerSize));
2011 
2012  // We write a map and possibly size information to the block. If the block
2013  // is big enough to be a FreeSpace with at least one extra word (the next
2014  // pointer), we set its map to be the free space map and its size to an
2015  // appropriate array length for the desired size from HeapObject::Size().
2016  // If the block is too small (eg, one or two words), to hold both a size
2017  // field and a next pointer, we give it a filler map that gives it the
2018  // correct size.
2019  if (size_in_bytes > FreeSpace::kHeaderSize) {
2020  set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2021  // Can't use FreeSpace::cast because it fails during deserialization.
2022  FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2023  this_as_free_space->set_size(size_in_bytes);
2024  } else if (size_in_bytes == kPointerSize) {
2025  set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2026  } else if (size_in_bytes == 2 * kPointerSize) {
2027  set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2028  } else {
2029  UNREACHABLE();
2030  }
2031  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
2032  // deserialization because the free space map is not done yet.
2033 }
2034 
2035 
2037  ASSERT(IsFreeListNode(this));
2038  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2039  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2040  return reinterpret_cast<FreeListNode*>(
2041  Memory::Address_at(address() + kNextOffset));
2042  } else {
2043  return reinterpret_cast<FreeListNode*>(
2045  }
2046 }
2047 
2048 
2050  ASSERT(IsFreeListNode(this));
2051  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2052  ASSERT(Size() >= kNextOffset + kPointerSize);
2053  return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2054  } else {
2055  return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2056  }
2057 }
2058 
2059 
2061  ASSERT(IsFreeListNode(this));
2062  // While we are booting the VM the free space map will actually be null. So
2063  // we have to make sure that we don't try to use it for anything at that
2064  // stage.
2065  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2066  ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2067  Memory::Address_at(address() + kNextOffset) =
2068  reinterpret_cast<Address>(next);
2069  } else {
2071  reinterpret_cast<Address>(next);
2072  }
2073 }
2074 
2075 
2077  intptr_t free_bytes = 0;
2078  if (category->top() != NULL) {
2079  // This is safe (not going to deadlock) since Concatenate operations
2080  // are never performed on the same free lists at the same time in
2081  // reverse order.
2082  LockGuard<Mutex> target_lock_guard(mutex());
2083  LockGuard<Mutex> source_lock_guard(category->mutex());
2084  ASSERT(category->end_ != NULL);
2085  free_bytes = category->available();
2086  if (end_ == NULL) {
2087  end_ = category->end();
2088  } else {
2089  category->end()->set_next(top());
2090  }
2091  set_top(category->top());
2092  NoBarrier_Store(&top_, category->top_);
2093  available_ += category->available();
2094  category->Reset();
2095  }
2096  return free_bytes;
2097 }
2098 
2099 
2101  set_top(NULL);
2102  set_end(NULL);
2103  set_available(0);
2104 }
2105 
2106 
2108  int sum = 0;
2109  FreeListNode* t = top();
2110  FreeListNode** n = &t;
2111  while (*n != NULL) {
2112  if (Page::FromAddress((*n)->address()) == p) {
2113  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2114  sum += free_space->Size();
2115  *n = (*n)->next();
2116  } else {
2117  n = (*n)->next_address();
2118  }
2119  }
2120  set_top(t);
2121  if (top() == NULL) {
2122  set_end(NULL);
2123  }
2124  available_ -= sum;
2125  return sum;
2126 }
2127 
2128 
2130  FreeListNode* node = top();
2131  while (node != NULL) {
2132  if (Page::FromAddress(node->address()) == p) return true;
2133  node = node->next();
2134  }
2135  return false;
2136 }
2137 
2138 
2140  FreeListNode* node = top();
2141 
2142  if (node == NULL) return NULL;
2143 
2144  while (node != NULL &&
2145  Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2146  available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2147  node = node->next();
2148  }
2149 
2150  if (node != NULL) {
2151  set_top(node->next());
2152  *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2153  available_ -= *node_size;
2154  } else {
2155  set_top(NULL);
2156  }
2157 
2158  if (top() == NULL) {
2159  set_end(NULL);
2160  }
2161 
2162  return node;
2163 }
2164 
2165 
2167  int *node_size) {
2168  FreeListNode* node = PickNodeFromList(node_size);
2169  if (node != NULL && *node_size < size_in_bytes) {
2170  Free(node, *node_size);
2171  *node_size = 0;
2172  return NULL;
2173  }
2174  return node;
2175 }
2176 
2177 
2178 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2179  node->set_next(top());
2180  set_top(node);
2181  if (end_ == NULL) {
2182  end_ = node;
2183  }
2184  available_ += size_in_bytes;
2185 }
2186 
2187 
2189  FreeListNode* n = top();
2190  while (n != NULL) {
2191  Map** map_location = reinterpret_cast<Map**>(n->address());
2192  if (*map_location == NULL) {
2193  *map_location = heap->free_space_map();
2194  } else {
2195  ASSERT(*map_location == heap->free_space_map());
2196  }
2197  n = n->next();
2198  }
2199 }
2200 
2201 
2203  : owner_(owner), heap_(owner->heap()) {
2204  Reset();
2205 }
2206 
2207 
2208 intptr_t FreeList::Concatenate(FreeList* free_list) {
2209  intptr_t free_bytes = 0;
2210  free_bytes += small_list_.Concatenate(free_list->small_list());
2211  free_bytes += medium_list_.Concatenate(free_list->medium_list());
2212  free_bytes += large_list_.Concatenate(free_list->large_list());
2213  free_bytes += huge_list_.Concatenate(free_list->huge_list());
2214  return free_bytes;
2215 }
2216 
2217 
2219  small_list_.Reset();
2220  medium_list_.Reset();
2221  large_list_.Reset();
2222  huge_list_.Reset();
2223 }
2224 
2225 
2226 int FreeList::Free(Address start, int size_in_bytes) {
2227  if (size_in_bytes == 0) return 0;
2228 
2229  FreeListNode* node = FreeListNode::FromAddress(start);
2230  node->set_size(heap_, size_in_bytes);
2231  Page* page = Page::FromAddress(start);
2232 
2233  // Early return to drop too-small blocks on the floor.
2234  if (size_in_bytes < kSmallListMin) {
2235  page->add_non_available_small_blocks(size_in_bytes);
2236  return size_in_bytes;
2237  }
2238 
2239  // Insert other blocks at the head of a free list of the appropriate
2240  // magnitude.
2241  if (size_in_bytes <= kSmallListMax) {
2242  small_list_.Free(node, size_in_bytes);
2243  page->add_available_in_small_free_list(size_in_bytes);
2244  } else if (size_in_bytes <= kMediumListMax) {
2245  medium_list_.Free(node, size_in_bytes);
2246  page->add_available_in_medium_free_list(size_in_bytes);
2247  } else if (size_in_bytes <= kLargeListMax) {
2248  large_list_.Free(node, size_in_bytes);
2249  page->add_available_in_large_free_list(size_in_bytes);
2250  } else {
2251  huge_list_.Free(node, size_in_bytes);
2252  page->add_available_in_huge_free_list(size_in_bytes);
2253  }
2254 
2255  ASSERT(IsVeryLong() || available() == SumFreeLists());
2256  return 0;
2257 }
2258 
2259 
2260 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2261  FreeListNode* node = NULL;
2262  Page* page = NULL;
2263 
2264  if (size_in_bytes <= kSmallAllocationMax) {
2265  node = small_list_.PickNodeFromList(node_size);
2266  if (node != NULL) {
2267  ASSERT(size_in_bytes <= *node_size);
2268  page = Page::FromAddress(node->address());
2269  page->add_available_in_small_free_list(-(*node_size));
2270  ASSERT(IsVeryLong() || available() == SumFreeLists());
2271  return node;
2272  }
2273  }
2274 
2275  if (size_in_bytes <= kMediumAllocationMax) {
2276  node = medium_list_.PickNodeFromList(node_size);
2277  if (node != NULL) {
2278  ASSERT(size_in_bytes <= *node_size);
2279  page = Page::FromAddress(node->address());
2280  page->add_available_in_medium_free_list(-(*node_size));
2281  ASSERT(IsVeryLong() || available() == SumFreeLists());
2282  return node;
2283  }
2284  }
2285 
2286  if (size_in_bytes <= kLargeAllocationMax) {
2287  node = large_list_.PickNodeFromList(node_size);
2288  if (node != NULL) {
2289  ASSERT(size_in_bytes <= *node_size);
2290  page = Page::FromAddress(node->address());
2291  page->add_available_in_large_free_list(-(*node_size));
2292  ASSERT(IsVeryLong() || available() == SumFreeLists());
2293  return node;
2294  }
2295  }
2296 
2297  int huge_list_available = huge_list_.available();
2298  FreeListNode* top_node = huge_list_.top();
2299  for (FreeListNode** cur = &top_node;
2300  *cur != NULL;
2301  cur = (*cur)->next_address()) {
2302  FreeListNode* cur_node = *cur;
2303  while (cur_node != NULL &&
2304  Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2305  int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2306  huge_list_available -= size;
2307  page = Page::FromAddress(cur_node->address());
2308  page->add_available_in_huge_free_list(-size);
2309  cur_node = cur_node->next();
2310  }
2311 
2312  *cur = cur_node;
2313  if (cur_node == NULL) {
2314  huge_list_.set_end(NULL);
2315  break;
2316  }
2317 
2318  ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2319  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2320  int size = cur_as_free_space->Size();
2321  if (size >= size_in_bytes) {
2322  // Large enough node found. Unlink it from the list.
2323  node = *cur;
2324  *cur = node->next();
2325  *node_size = size;
2326  huge_list_available -= size;
2327  page = Page::FromAddress(node->address());
2328  page->add_available_in_huge_free_list(-size);
2329  break;
2330  }
2331  }
2332 
2333  huge_list_.set_top(top_node);
2334  if (huge_list_.top() == NULL) {
2335  huge_list_.set_end(NULL);
2336  }
2337  huge_list_.set_available(huge_list_available);
2338 
2339  if (node != NULL) {
2340  ASSERT(IsVeryLong() || available() == SumFreeLists());
2341  return node;
2342  }
2343 
2344  if (size_in_bytes <= kSmallListMax) {
2345  node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2346  if (node != NULL) {
2347  ASSERT(size_in_bytes <= *node_size);
2348  page = Page::FromAddress(node->address());
2349  page->add_available_in_small_free_list(-(*node_size));
2350  }
2351  } else if (size_in_bytes <= kMediumListMax) {
2352  node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2353  if (node != NULL) {
2354  ASSERT(size_in_bytes <= *node_size);
2355  page = Page::FromAddress(node->address());
2356  page->add_available_in_medium_free_list(-(*node_size));
2357  }
2358  } else if (size_in_bytes <= kLargeListMax) {
2359  node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2360  if (node != NULL) {
2361  ASSERT(size_in_bytes <= *node_size);
2362  page = Page::FromAddress(node->address());
2363  page->add_available_in_large_free_list(-(*node_size));
2364  }
2365  }
2366 
2367  ASSERT(IsVeryLong() || available() == SumFreeLists());
2368  return node;
2369 }
2370 
2371 
2372 // Allocation on the old space free list. If it succeeds then a new linear
2373 // allocation space has been set up with the top and limit of the space. If
2374 // the allocation fails then NULL is returned, and the caller can perform a GC
2375 // or allocate a new page before retrying.
2376 HeapObject* FreeList::Allocate(int size_in_bytes) {
2377  ASSERT(0 < size_in_bytes);
2378  ASSERT(size_in_bytes <= kMaxBlockSize);
2379  ASSERT(IsAligned(size_in_bytes, kPointerSize));
2380  // Don't free list allocate if there is linear space available.
2381  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2382 
2383  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2384  // Mark the old linear allocation area with a free space map so it can be
2385  // skipped when scanning the heap. This also puts it back in the free list
2386  // if it is big enough.
2387  owner_->Free(owner_->top(), old_linear_size);
2388 
2389  owner_->heap()->incremental_marking()->OldSpaceStep(
2390  size_in_bytes - old_linear_size);
2391 
2392  int new_node_size = 0;
2393  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2394  if (new_node == NULL) {
2395  owner_->SetTopAndLimit(NULL, NULL);
2396  return NULL;
2397  }
2398 
2399  int bytes_left = new_node_size - size_in_bytes;
2400  ASSERT(bytes_left >= 0);
2401 
2402 #ifdef DEBUG
2403  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2404  reinterpret_cast<Object**>(new_node->address())[i] =
2406  }
2407 #endif
2408 
2409  // The old-space-step might have finished sweeping and restarted marking.
2410  // Verify that it did not turn the page of the new node into an evacuation
2411  // candidate.
2412  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2413 
2414  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2415 
2416  // Memory in the linear allocation area is counted as allocated. We may free
2417  // a little of this again immediately - see below.
2418  owner_->Allocate(new_node_size);
2419 
2420  if (owner_->heap()->inline_allocation_disabled()) {
2421  // Keep the linear allocation area empty if requested to do so, just
2422  // return area back to the free list instead.
2423  owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2424  ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
2425  } else if (bytes_left > kThreshold &&
2426  owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2427  FLAG_incremental_marking_steps) {
2428  int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2429  // We don't want to give too large linear areas to the allocator while
2430  // incremental marking is going on, because we won't check again whether
2431  // we want to do another increment until the linear area is used up.
2432  owner_->Free(new_node->address() + size_in_bytes + linear_size,
2433  new_node_size - size_in_bytes - linear_size);
2434  owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2435  new_node->address() + size_in_bytes + linear_size);
2436  } else if (bytes_left > 0) {
2437  // Normally we give the rest of the node to the allocator as its new
2438  // linear allocation area.
2439  owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2440  new_node->address() + new_node_size);
2441  } else {
2442  // TODO(gc) Try not freeing linear allocation region when bytes_left
2443  // are zero.
2444  owner_->SetTopAndLimit(NULL, NULL);
2445  }
2446 
2447  return new_node;
2448 }
2449 
2450 
2452  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2453  p->set_available_in_huge_free_list(0);
2454 
2455  if (sum < p->area_size()) {
2456  sum += small_list_.EvictFreeListItemsInList(p) +
2457  medium_list_.EvictFreeListItemsInList(p) +
2458  large_list_.EvictFreeListItemsInList(p);
2459  p->set_available_in_small_free_list(0);
2460  p->set_available_in_medium_free_list(0);
2461  p->set_available_in_large_free_list(0);
2462  }
2463 
2464  return sum;
2465 }
2466 
2467 
2469  return huge_list_.EvictFreeListItemsInList(p) ||
2470  small_list_.EvictFreeListItemsInList(p) ||
2471  medium_list_.EvictFreeListItemsInList(p) ||
2472  large_list_.EvictFreeListItemsInList(p);
2473 }
2474 
2475 
2477  small_list_.RepairFreeList(heap);
2478  medium_list_.RepairFreeList(heap);
2479  large_list_.RepairFreeList(heap);
2480  huge_list_.RepairFreeList(heap);
2481 }
2482 
2483 
2484 #ifdef DEBUG
2485 intptr_t FreeListCategory::SumFreeList() {
2486  intptr_t sum = 0;
2487  FreeListNode* cur = top();
2488  while (cur != NULL) {
2489  ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2490  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2491  sum += cur_as_free_space->Size();
2492  cur = cur->next();
2493  }
2494  return sum;
2495 }
2496 
2497 
2498 static const int kVeryLongFreeList = 500;
2499 
2500 
2501 int FreeListCategory::FreeListLength() {
2502  int length = 0;
2503  FreeListNode* cur = top();
2504  while (cur != NULL) {
2505  length++;
2506  cur = cur->next();
2507  if (length == kVeryLongFreeList) return length;
2508  }
2509  return length;
2510 }
2511 
2512 
2513 bool FreeList::IsVeryLong() {
2514  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2515  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2516  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2517  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2518  return false;
2519 }
2520 
2521 
2522 // This can take a very long time because it is linear in the number of entries
2523 // on the free list, so it should not be called if FreeListLength returns
2524 // kVeryLongFreeList.
2525 intptr_t FreeList::SumFreeLists() {
2526  intptr_t sum = small_list_.SumFreeList();
2527  sum += medium_list_.SumFreeList();
2528  sum += large_list_.SumFreeList();
2529  sum += huge_list_.SumFreeList();
2530  return sum;
2531 }
2532 #endif
2533 
2534 
2535 // -----------------------------------------------------------------------------
2536 // OldSpace implementation
2537 
2539  // We don't have a linear allocation area while sweeping. It will be restored
2540  // on the first allocation after the sweep.
2542 
2543  // Stop lazy sweeping and clear marking bits for unswept pages.
2544  if (first_unswept_page_ != NULL) {
2546  do {
2547  // Do not use ShouldBeSweptLazily predicate here.
2548  // New evacuation candidates were selected but they still have
2549  // to be swept before collection starts.
2550  if (!p->WasSwept()) {
2551  Bitmap::Clear(p);
2552  if (FLAG_gc_verbose) {
2553  PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2554  reinterpret_cast<intptr_t>(p));
2555  }
2556  }
2557  p = p->next_page();
2558  } while (p != anchor());
2559  }
2561  unswept_free_bytes_ = 0;
2562 
2563  // Clear the free list before a full GC---it will be rebuilt afterward.
2564  free_list_.Reset();
2565 }
2566 
2567 
2569  ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2570  return Size() - unswept_free_bytes_ - (limit() - top());
2571 }
2572 
2573 
2574 // After we have booted, we have created a map which represents free space
2575 // on the heap. If there was already a free list then the elements on it
2576 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2577 // fix them.
2580 }
2581 
2582 
2583 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2584  if (IsLazySweepingComplete()) return true;
2585 
2586  intptr_t freed_bytes = 0;
2588  do {
2589  Page* next_page = p->next_page();
2590  if (ShouldBeSweptLazily(p)) {
2591  if (FLAG_gc_verbose) {
2592  PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2593  reinterpret_cast<intptr_t>(p));
2594  }
2596  freed_bytes +=
2597  MarkCompactCollector::
2598  SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2599  this, NULL, p);
2600  }
2601  p = next_page;
2602  } while (p != anchor() && freed_bytes < bytes_to_sweep);
2603 
2604  if (p == anchor()) {
2606  } else {
2607  first_unswept_page_ = p;
2608  }
2609 
2610  heap()->FreeQueuedChunks();
2611 
2612  return IsLazySweepingComplete();
2613 }
2614 
2615 
2617  if (allocation_info_.top() >= allocation_info_.limit()) return;
2618 
2619  if (Page::FromAllocationTop(allocation_info_.top())->
2620  IsEvacuationCandidate()) {
2621  // Create filler object to keep page iterable if it was iterable.
2622  int remaining =
2623  static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2624  heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2625 
2626  allocation_info_.set_top(NULL);
2627  allocation_info_.set_limit(NULL);
2628  }
2629 }
2630 
2631 
2632 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2634  if (collector->AreSweeperThreadsActivated()) {
2635  if (collector->IsConcurrentSweepingInProgress()) {
2636  if (collector->RefillFreeLists(this) < size_in_bytes) {
2637  if (!collector->sequential_sweeping()) {
2638  collector->WaitUntilSweepingCompleted();
2639  return true;
2640  }
2641  }
2642  return false;
2643  }
2644  return true;
2645  } else {
2646  return AdvanceSweeper(size_in_bytes);
2647  }
2648 }
2649 
2650 
2652  // Allocation in this space has failed.
2653 
2654  // If there are unswept pages advance lazy sweeper a bounded number of times
2655  // until we find a size_in_bytes contiguous piece of memory
2656  const int kMaxSweepingTries = 5;
2657  bool sweeping_complete = false;
2658 
2659  for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2660  sweeping_complete = EnsureSweeperProgress(size_in_bytes);
2661 
2662  // Retry the free list allocation.
2663  HeapObject* object = free_list_.Allocate(size_in_bytes);
2664  if (object != NULL) return object;
2665  }
2666 
2667  // Free list allocation failed and there is no next page. Fail if we have
2668  // hit the old generation size limit that should cause a garbage
2669  // collection.
2670  if (!heap()->always_allocate() &&
2671  heap()->OldGenerationAllocationLimitReached()) {
2672  return NULL;
2673  }
2674 
2675  // Try to expand the space and allocate in the new next page.
2676  if (Expand()) {
2677  ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2678  return free_list_.Allocate(size_in_bytes);
2679  }
2680 
2681  // Last ditch, sweep all the remaining pages to try to find space. This may
2682  // cause a pause.
2683  if (!IsLazySweepingComplete()) {
2685 
2686  // Retry the free list allocation.
2687  HeapObject* object = free_list_.Allocate(size_in_bytes);
2688  if (object != NULL) return object;
2689  }
2690 
2691  // Finally, fail.
2692  return NULL;
2693 }
2694 
2695 
2696 #ifdef DEBUG
2697 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2698  CommentStatistic* comments_statistics =
2699  isolate->paged_space_comments_statistics();
2700  ReportCodeKindStatistics(isolate->code_kind_statistics());
2701  PrintF("Code comment statistics (\" [ comment-txt : size/ "
2702  "count (average)\"):\n");
2703  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2704  const CommentStatistic& cs = comments_statistics[i];
2705  if (cs.size > 0) {
2706  PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2707  cs.size/cs.count);
2708  }
2709  }
2710  PrintF("\n");
2711 }
2712 
2713 
2714 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2715  CommentStatistic* comments_statistics =
2716  isolate->paged_space_comments_statistics();
2717  ClearCodeKindStatistics(isolate->code_kind_statistics());
2718  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2719  comments_statistics[i].Clear();
2720  }
2721  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2722  comments_statistics[CommentStatistic::kMaxComments].size = 0;
2723  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2724 }
2725 
2726 
2727 // Adds comment to 'comment_statistics' table. Performance OK as long as
2728 // 'kMaxComments' is small
2729 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2730  CommentStatistic* comments_statistics =
2731  isolate->paged_space_comments_statistics();
2732  // Do not count empty comments
2733  if (delta <= 0) return;
2734  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2735  // Search for a free or matching entry in 'comments_statistics': 'cs'
2736  // points to result.
2737  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2738  if (comments_statistics[i].comment == NULL) {
2739  cs = &comments_statistics[i];
2740  cs->comment = comment;
2741  break;
2742  } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2743  cs = &comments_statistics[i];
2744  break;
2745  }
2746  }
2747  // Update entry for 'comment'
2748  cs->size += delta;
2749  cs->count += 1;
2750 }
2751 
2752 
2753 // Call for each nested comment start (start marked with '[ xxx', end marked
2754 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2755 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2756  ASSERT(!it->done());
2757  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2758  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2759  if (tmp[0] != '[') {
2760  // Not a nested comment; skip
2761  return;
2762  }
2763 
2764  // Search for end of nested comment or a new nested comment
2765  const char* const comment_txt =
2766  reinterpret_cast<const char*>(it->rinfo()->data());
2767  const byte* prev_pc = it->rinfo()->pc();
2768  int flat_delta = 0;
2769  it->next();
2770  while (true) {
2771  // All nested comments must be terminated properly, and therefore exit
2772  // from loop.
2773  ASSERT(!it->done());
2774  if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2775  const char* const txt =
2776  reinterpret_cast<const char*>(it->rinfo()->data());
2777  flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2778  if (txt[0] == ']') break; // End of nested comment
2779  // A new comment
2780  CollectCommentStatistics(isolate, it);
2781  // Skip code that was covered with previous comment
2782  prev_pc = it->rinfo()->pc();
2783  }
2784  it->next();
2785  }
2786  EnterComment(isolate, comment_txt, flat_delta);
2787 }
2788 
2789 
2790 // Collects code size statistics:
2791 // - by code kind
2792 // - by code comment
2793 void PagedSpace::CollectCodeStatistics() {
2794  Isolate* isolate = heap()->isolate();
2795  HeapObjectIterator obj_it(this);
2796  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2797  if (obj->IsCode()) {
2798  Code* code = Code::cast(obj);
2799  isolate->code_kind_statistics()[code->kind()] += code->Size();
2800  RelocIterator it(code);
2801  int delta = 0;
2802  const byte* prev_pc = code->instruction_start();
2803  while (!it.done()) {
2804  if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2805  delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2806  CollectCommentStatistics(isolate, &it);
2807  prev_pc = it.rinfo()->pc();
2808  }
2809  it.next();
2810  }
2811 
2812  ASSERT(code->instruction_start() <= prev_pc &&
2813  prev_pc <= code->instruction_end());
2814  delta += static_cast<int>(code->instruction_end() - prev_pc);
2815  EnterComment(isolate, "NoComment", delta);
2816  }
2817  }
2818 }
2819 
2820 
2821 void PagedSpace::ReportStatistics() {
2822  int pct = static_cast<int>(Available() * 100 / Capacity());
2823  PrintF(" capacity: %" V8_PTR_PREFIX "d"
2824  ", waste: %" V8_PTR_PREFIX "d"
2825  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2826  Capacity(), Waste(), Available(), pct);
2827 
2828  if (was_swept_conservatively_) return;
2829  ClearHistograms(heap()->isolate());
2830  HeapObjectIterator obj_it(this);
2831  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2832  CollectHistogramInfo(obj);
2833  ReportHistogram(heap()->isolate(), true);
2834 }
2835 #endif
2836 
2837 
2838 // -----------------------------------------------------------------------------
2839 // MapSpace implementation
2840 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2841 // there is at least one non-inlined virtual function. I would prefer to hide
2842 // the VerifyObject definition behind VERIFY_HEAP.
2843 
2845  CHECK(object->IsMap());
2846 }
2847 
2848 
2849 // -----------------------------------------------------------------------------
2850 // CellSpace and PropertyCellSpace implementation
2851 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2852 // there is at least one non-inlined virtual function. I would prefer to hide
2853 // the VerifyObject definition behind VERIFY_HEAP.
2854 
2856  CHECK(object->IsCell());
2857 }
2858 
2859 
2861  CHECK(object->IsPropertyCell());
2862 }
2863 
2864 
2865 // -----------------------------------------------------------------------------
2866 // LargeObjectIterator
2867 
2869  current_ = space->first_page_;
2870  size_func_ = NULL;
2871 }
2872 
2873 
2875  HeapObjectCallback size_func) {
2876  current_ = space->first_page_;
2877  size_func_ = size_func;
2878 }
2879 
2880 
2882  if (current_ == NULL) return NULL;
2883 
2884  HeapObject* object = current_->GetObject();
2885  current_ = current_->next_page();
2886  return object;
2887 }
2888 
2889 
2890 // -----------------------------------------------------------------------------
2891 // LargeObjectSpace
2892 static bool ComparePointers(void* key1, void* key2) {
2893  return key1 == key2;
2894 }
2895 
2896 
2898  intptr_t max_capacity,
2899  AllocationSpace id)
2900  : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2901  max_capacity_(max_capacity),
2902  first_page_(NULL),
2903  size_(0),
2904  page_count_(0),
2905  objects_size_(0),
2906  chunk_map_(ComparePointers, 1024) {}
2907 
2908 
2910  first_page_ = NULL;
2911  size_ = 0;
2912  maximum_committed_ = 0;
2913  page_count_ = 0;
2914  objects_size_ = 0;
2915  chunk_map_.Clear();
2916  return true;
2917 }
2918 
2919 
2921  while (first_page_ != NULL) {
2922  LargePage* page = first_page_;
2923  first_page_ = first_page_->next_page();
2924  LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2925 
2926  ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2928  space, kAllocationActionFree, page->size());
2929  heap()->isolate()->memory_allocator()->Free(page);
2930  }
2931  SetUp();
2932 }
2933 
2934 
2935 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2936  Executability executable) {
2937  // Check if we want to force a GC before growing the old space further.
2938  // If so, fail the allocation.
2939  if (!heap()->always_allocate() &&
2940  heap()->OldGenerationAllocationLimitReached()) {
2941  return Failure::RetryAfterGC(identity());
2942  }
2943 
2944  if (Size() + object_size > max_capacity_) {
2945  return Failure::RetryAfterGC(identity());
2946  }
2947 
2948  LargePage* page = heap()->isolate()->memory_allocator()->
2949  AllocateLargePage(object_size, this, executable);
2950  if (page == NULL) return Failure::RetryAfterGC(identity());
2951  ASSERT(page->area_size() >= object_size);
2952 
2953  size_ += static_cast<int>(page->size());
2954  objects_size_ += object_size;
2955  page_count_++;
2956  page->set_next_page(first_page_);
2957  first_page_ = page;
2958 
2959  if (size_ > maximum_committed_) {
2960  maximum_committed_ = size_;
2961  }
2962 
2963  // Register all MemoryChunk::kAlignment-aligned chunks covered by
2964  // this large page in the chunk map.
2965  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2966  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2967  for (uintptr_t key = base; key <= limit; key++) {
2968  HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2969  static_cast<uint32_t>(key),
2970  true);
2971  ASSERT(entry != NULL);
2972  entry->value = page;
2973  }
2974 
2975  HeapObject* object = page->GetObject();
2976 
2977  if (Heap::ShouldZapGarbage()) {
2978  // Make the object consistent so the heap can be verified in OldSpaceStep.
2979  // We only need to do this in debug builds or if verify_heap is on.
2980  reinterpret_cast<Object**>(object->address())[0] =
2981  heap()->fixed_array_map();
2982  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2983  }
2984 
2985  heap()->incremental_marking()->OldSpaceStep(object_size);
2986  return object;
2987 }
2988 
2989 
2992  size_t size = 0;
2993  LargePage* current = first_page_;
2994  while (current != NULL) {
2995  size += current->CommittedPhysicalMemory();
2996  current = current->next_page();
2997  }
2998  return size;
2999 }
3000 
3001 
3002 // GC support
3004  LargePage* page = FindPage(a);
3005  if (page != NULL) {
3006  return page->GetObject();
3007  }
3008  return Failure::Exception();
3009 }
3010 
3011 
3013  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3014  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3015  static_cast<uint32_t>(key),
3016  false);
3017  if (e != NULL) {
3018  ASSERT(e->value != NULL);
3019  LargePage* page = reinterpret_cast<LargePage*>(e->value);
3020  ASSERT(page->is_valid());
3021  if (page->Contains(a)) {
3022  return page;
3023  }
3024  }
3025  return NULL;
3026 }
3027 
3028 
3030  LargePage* previous = NULL;
3031  LargePage* current = first_page_;
3032  while (current != NULL) {
3033  HeapObject* object = current->GetObject();
3034  // Can this large page contain pointers to non-trivial objects. No other
3035  // pointer object is this big.
3036  bool is_pointer_object = object->IsFixedArray();
3037  MarkBit mark_bit = Marking::MarkBitFrom(object);
3038  if (mark_bit.Get()) {
3039  mark_bit.Clear();
3040  Page::FromAddress(object->address())->ResetProgressBar();
3041  Page::FromAddress(object->address())->ResetLiveBytes();
3042  previous = current;
3043  current = current->next_page();
3044  } else {
3045  LargePage* page = current;
3046  // Cut the chunk out from the chunk list.
3047  current = current->next_page();
3048  if (previous == NULL) {
3049  first_page_ = current;
3050  } else {
3051  previous->set_next_page(current);
3052  }
3053 
3054  // Free the chunk.
3056  object, heap()->isolate());
3057  size_ -= static_cast<int>(page->size());
3058  objects_size_ -= object->Size();
3059  page_count_--;
3060 
3061  // Remove entries belonging to this page.
3062  // Use variable alignment to help pass length check (<= 80 characters)
3063  // of single line in tools/presubmit.py.
3064  const intptr_t alignment = MemoryChunk::kAlignment;
3065  uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3066  uintptr_t limit = base + (page->size()-1)/alignment;
3067  for (uintptr_t key = base; key <= limit; key++) {
3068  chunk_map_.Remove(reinterpret_cast<void*>(key),
3069  static_cast<uint32_t>(key));
3070  }
3071 
3072  if (is_pointer_object) {
3073  heap()->QueueMemoryChunkForFree(page);
3074  } else {
3075  heap()->isolate()->memory_allocator()->Free(page);
3076  }
3077  }
3078  }
3079  heap()->FreeQueuedChunks();
3080 }
3081 
3082 
3084  Address address = object->address();
3085  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3086 
3087  bool owned = (chunk->owner() == this);
3088 
3089  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3090 
3091  return owned;
3092 }
3093 
3094 
3095 #ifdef VERIFY_HEAP
3096 // We do not assume that the large object iterator works, because it depends
3097 // on the invariants we are checking during verification.
3098 void LargeObjectSpace::Verify() {
3099  for (LargePage* chunk = first_page_;
3100  chunk != NULL;
3101  chunk = chunk->next_page()) {
3102  // Each chunk contains an object that starts at the large object page's
3103  // object area start.
3104  HeapObject* object = chunk->GetObject();
3105  Page* page = Page::FromAddress(object->address());
3106  CHECK(object->address() == page->area_start());
3107 
3108  // The first word should be a map, and we expect all map pointers to be
3109  // in map space.
3110  Map* map = object->map();
3111  CHECK(map->IsMap());
3112  CHECK(heap()->map_space()->Contains(map));
3113 
3114  // We have only code, sequential strings, external strings
3115  // (sequential strings that have been morphed into external
3116  // strings), fixed arrays, and byte arrays in large object space.
3117  CHECK(object->IsCode() || object->IsSeqString() ||
3118  object->IsExternalString() || object->IsFixedArray() ||
3119  object->IsFixedDoubleArray() || object->IsByteArray());
3120 
3121  // The object itself should look OK.
3122  object->Verify();
3123 
3124  // Byte arrays and strings don't have interior pointers.
3125  if (object->IsCode()) {
3126  VerifyPointersVisitor code_visitor;
3127  object->IterateBody(map->instance_type(),
3128  object->Size(),
3129  &code_visitor);
3130  } else if (object->IsFixedArray()) {
3131  FixedArray* array = FixedArray::cast(object);
3132  for (int j = 0; j < array->length(); j++) {
3133  Object* element = array->get(j);
3134  if (element->IsHeapObject()) {
3135  HeapObject* element_object = HeapObject::cast(element);
3136  CHECK(heap()->Contains(element_object));
3137  CHECK(element_object->map()->IsMap());
3138  }
3139  }
3140  }
3141  }
3142 }
3143 #endif
3144 
3145 
3146 #ifdef DEBUG
3147 void LargeObjectSpace::Print() {
3148  LargeObjectIterator it(this);
3149  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3150  obj->Print();
3151  }
3152 }
3153 
3154 
3155 void LargeObjectSpace::ReportStatistics() {
3156  PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3157  int num_objects = 0;
3158  ClearHistograms(heap()->isolate());
3159  LargeObjectIterator it(this);
3160  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3161  num_objects++;
3162  CollectHistogramInfo(obj);
3163  }
3164 
3165  PrintF(" number of objects %d, "
3166  "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3167  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3168 }
3169 
3170 
3171 void LargeObjectSpace::CollectCodeStatistics() {
3172  Isolate* isolate = heap()->isolate();
3173  LargeObjectIterator obj_it(this);
3174  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3175  if (obj->IsCode()) {
3176  Code* code = Code::cast(obj);
3177  isolate->code_kind_statistics()[code->kind()] += code->Size();
3178  }
3179  }
3180 }
3181 
3182 
3183 void Page::Print() {
3184  // Make a best-effort to print the objects in the page.
3185  PrintF("Page@%p in %s\n",
3186  this->address(),
3187  AllocationSpaceName(this->owner()->identity()));
3188  printf(" --------------------------------------\n");
3189  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3190  unsigned mark_size = 0;
3191  for (HeapObject* object = objects.Next();
3192  object != NULL;
3193  object = objects.Next()) {
3194  bool is_marked = Marking::MarkBitFrom(object).Get();
3195  PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3196  if (is_marked) {
3197  mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3198  }
3199  object->ShortPrint();
3200  PrintF("\n");
3201  }
3202  printf(" --------------------------------------\n");
3203  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3204 }
3205 
3206 #endif // DEBUG
3207 
3208 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
static const int kHeaderSize
Definition: objects.h:4653
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
bool CommitRawMemory(Address start, size_t length)
Definition: spaces.cc:248
virtual intptr_t Size()
Definition: spaces.h:2821
#define SLOW_ASSERT(condition)
Definition: checks.h:306
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:4038
void Allocate(int bytes)
Definition: spaces.h:1849
#define CHECK_EQ(expected, value)
Definition: checks.h:252
intptr_t Concatenate(FreeListCategory *category)
Definition: spaces.cc:2076
void set_next_page(Page *page)
Definition: spaces-inl.h:250
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Definition: spaces.cc:887
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:797
bool GrowTo(int new_capacity)
Definition: spaces.cc:1587
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1994
void RepairFreeList(Heap *heap)
Definition: spaces.cc:2188
void SetNewLimit(Address limit)
Definition: heap.h:438
#define MSAN_MEMORY_IS_INITIALIZED(p, s)
Definition: msan.h:46
void RepairFreeListsAfterBoot()
Definition: spaces.cc:2578
#define V8PRIxPTR
Definition: globals.h:228
FreeListCategory * medium_list()
Definition: spaces.h:1666
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:217
CodeRange * code_range()
Definition: isolate.h:865
intptr_t Available()
Definition: spaces.h:1783
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:342
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:463
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:2008
bool Contains(Address addr)
Definition: spaces.h:377
friend class PageIterator
Definition: spaces.h:1993
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
bool was_swept_conservatively()
Definition: spaces.h:1883
void RepairLists(Heap *heap)
Definition: spaces.cc:2476
void set_next(FreeListNode *next)
Definition: spaces.cc:2060
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:817
Isolate * isolate()
Definition: heap-inl.h:624
bool SetUp(const size_t requested_size)
Definition: spaces.cc:136
bool CommitArea(size_t requested)
Definition: spaces.cc:513
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
virtual intptr_t Waste()
Definition: spaces.h:1797
#define LOG(isolate, Call)
Definition: log.h:86
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2855
const int KB
Definition: globals.h:245
intptr_t EvictFreeListItemsInList(Page *p)
Definition: spaces.cc:2107
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2844
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1888
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2868
static HeapObject * cast(Object *obj)
T Max(T a, T b)
Definition: utils.h:227
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:288
void TakeControl(VirtualMemory *from)
Definition: platform.h:481
void ResetAllocationInfo()
Definition: spaces.cc:1359
void SetNewSpacePageFlags(NewSpacePage *chunk)
bool UncommitRawMemory(Address start, size_t length)
Definition: spaces.cc:253
Address space_start()
Definition: spaces.h:2169
intptr_t SizeOfFirstPage()
Definition: spaces.cc:1050
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
intptr_t available_in_small_free_list_
Definition: spaces.h:736
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
Definition: heap.h:1429
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:43
static Failure * Exception()
Definition: objects-inl.h:1244
PromotionQueue * promotion_queue()
Definition: heap.h:1302
const int kMaxInt
Definition: globals.h:248
AllocationAction
Definition: v8.h:4032
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2620
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:728
const char * AllocationSpaceName(AllocationSpace space)
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:391
#define ASSERT(condition)
Definition: checks.h:329
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:357
void ClearFlag(int flag)
Definition: spaces.h:444
#define ASSERT_GE(v1, v2)
Definition: checks.h:332
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
Definition: spaces.cc:586
void Step(intptr_t allocated, CompletionAction action)
FreeListNode * next()
Definition: spaces.cc:2036
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:739
static const int kFlagsOffset
Definition: spaces.h:655
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:218
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1978
NewSpacePage * current_page()
Definition: spaces.h:2228
#define CHECK(condition)
Definition: checks.h:75
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:277
#define STRING_TYPE_LIST(V)
Definition: objects.h:459
static const int kPageSize
Definition: spaces.h:814
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
CodeRange(Isolate *isolate)
Definition: spaces.cc:127
static Code * cast(Object *obj)
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:321
static bool IsAtEnd(Address addr)
Definition: spaces.h:2080
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2897
bool CommitMemory(Address addr, size_t size, Executability executable)
Definition: spaces.cc:310
bool ContainsPageFreeListItems(Page *p)
Definition: spaces.cc:2468
AllocationStats accounting_stats_
Definition: spaces.h:1959
void Free(MemoryChunk *chunk)
Definition: spaces.cc:751
void set_size(int value)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:368
uint8_t byte
Definition: globals.h:185
Executability executable()
Definition: spaces.h:903
NewSpacePage * first_page()
Definition: spaces.h:2227
SlotsBuffer * slots_buffer_
Definition: spaces.h:723
size_t CommittedPhysicalMemory()
Definition: spaces.h:697
void ReleasePage(Page *page, bool unlink)
Definition: spaces.cc:1118
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:837
void set_available(int available)
Definition: spaces.h:1572
FreeList(PagedSpace *owner)
Definition: spaces.cc:2202
#define UNREACHABLE()
Definition: checks.h:52
FreeListCategory * small_list()
Definition: spaces.h:1665
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:382
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:248
LargePage * FindPage(Address a)
Definition: spaces.cc:3012
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2860
intptr_t non_available_small_blocks_
Definition: spaces.h:740
FreeListNode ** next_address()
Definition: spaces.cc:2049
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2583
intptr_t CommittedMemory()
Definition: spaces.h:1742
bool Contains(Address a)
Definition: spaces.h:2427
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool Contains(Address a)
Definition: spaces-inl.h:179
#define OFFSET_OF(type, field)
Definition: globals.h:325
FreeListNode * PickNodeFromList(int *node_size)
Definition: spaces.cc:2139
void SetFlag(int flag)
Definition: spaces.h:440
intptr_t CommittedMemory()
Definition: spaces.h:2461
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2208
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2089
friend class NewSpacePageIterator
Definition: spaces.h:2298
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:790
static Failure * RetryAfterGC()
Definition: objects-inl.h:1255
static int CodePageAreaStartOffset()
Definition: spaces.cc:873
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2616
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:915
PropertyCellSpace * property_cell_space()
Definition: heap.h:643
const int kPointerSize
Definition: globals.h:268
MemoryAllocator * memory_allocator()
Definition: isolate.h:884
bool IsFlagSet(int flag)
Definition: spaces.h:456
static Address & Address_at(Address addr)
Definition: v8memory.h:79
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:7736
static int CodePageGuardSize()
Definition: spaces.cc:868
const Address kZapValue
Definition: v8globals.h:82
const int kHeapObjectTag
Definition: v8.h:5473
bool IsAligned(T value, U alignment)
Definition: utils.h:211
T Remove(int i)
Definition: list-inl.h:125
void InitializeReservedMemory()
Definition: spaces.h:353
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2568
size_t CommittedPhysicalMemory()
Definition: spaces.cc:984
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1295
intptr_t Available()
Definition: spaces.h:2476
bool Commit(void *address, size_t size, bool is_executable)
intptr_t EvictFreeListItems(Page *p)
Definition: spaces.cc:2451
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1913
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:131
static const char * Kind2String(Kind kind)
Definition: objects.cc:10803
bool inline_allocation_disabled()
Definition: heap.h:1607
bool WasSweptPrecisely()
Definition: spaces.h:834
int Free(Address start, int size_in_bytes)
Definition: spaces.cc:2226
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:363
OldSpace * old_pointer_space()
Definition: heap.h:638
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
intptr_t available_in_large_free_list_
Definition: spaces.h:738
bool ContainsPageFreeListItemsInList(Page *p)
Definition: spaces.cc:2129
size_t size() const
Definition: spaces.h:595
void IncreaseCapacity(int size)
Definition: spaces.cc:1113
SemiSpaceId id()
Definition: spaces.h:2254
void set_top(FreeListNode *top)
Definition: spaces.h:1562
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1986
bool contains(Address address)
Definition: spaces.h:960
OldSpace * code_space()
Definition: heap.h:640
#define V8_PTR_PREFIX
Definition: globals.h:220
void set_prev_page(Page *page)
Definition: spaces-inl.h:256
bool IsPowerOf2(T x)
Definition: utils.h:51
bool WasSwept()
Definition: spaces.h:836
static int CodePageAreaEndOffset()
Definition: spaces.cc:880
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1821
HeapObject * GetObject()
Definition: spaces.h:872
#define CHECK_NE(unexpected, value)
Definition: checks.h:256
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:163
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1352
void FreeQueuedChunks()
Definition: heap.cc:7742
CellSpace * cell_space()
Definition: heap.h:642
#define CHECK_LE(a, b)
Definition: checks.h:263
MaybeObject * FindObject(Address a)
Definition: spaces.cc:3003
FreeListCategory * large_list()
Definition: spaces.h:1667
VirtualMemory reservation_
Definition: spaces.h:712
FreeListCategory * huge_list()
Definition: spaces.h:1668
static const int kObjectStartOffset
Definition: spaces.h:592
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:2065
bool Contains(HeapObject *obj)
Definition: spaces.cc:3083
Space * owner() const
Definition: spaces.h:332
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1686
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:419
int InitialSemiSpaceSize()
Definition: heap.h:598
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1786
static const intptr_t kLiveBytesOffset
Definition: spaces.h:570
intptr_t Capacity()
Definition: spaces.h:2455
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:490
void set_next_page(NewSpacePage *page)
Definition: spaces.h:2057
void Sort(int(*cmp)(const T *x, const T *y))
Definition: list-inl.h:216
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2690
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2099
virtual intptr_t Size()
Definition: spaces.h:1788
Heap * heap() const
Definition: spaces.h:900
IncrementalMarking * incremental_marking()
Definition: heap.h:1781
Page * prev_page()
Definition: spaces-inl.h:244
void IncrementLiveBytes(int by)
Definition: spaces.h:510
#define SET_NAME(name)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1509
void SetTopAndLimit(Address top, Address limit)
Definition: spaces.h:1832
SemiSpace * semi_space()
Definition: spaces.h:2069
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1504
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:2140
NewSpacePage * next_page() const
Definition: spaces.h:2053
bool IsLazySweepingComplete()
Definition: spaces.h:1929
void Free(FreeListNode *node, int size_in_bytes)
Definition: spaces.cc:2178
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
Definition: spaces.h:563
void set_owner(Space *space)
Definition: spaces.h:342
void * Remove(void *key, uint32_t hash)
Definition: hashmap.h:162
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:7790
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
Definition: spaces.cc:211
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:925
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:804
LargePage * next_page() const
Definition: spaces.h:876
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:2046
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
intptr_t RefillFreeLists(PagedSpace *space)
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:328
InstanceType instance_type()
Definition: objects-inl.h:4012
friend class LargeObjectIterator
Definition: spaces.h:2888
bool IsEvacuationCandidate()
Definition: spaces.h:657
static bool ShouldZapGarbage()
Definition: heap.h:1486
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:937
T RoundDown(T x, intptr_t m)
Definition: utils.h:136
void USE(T)
Definition: globals.h:341
Counters * counters()
Definition: isolate.h:859
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:562
void set_next_page(LargePage *page)
Definition: spaces.h:880
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1371
MapSpace * map_space()
Definition: heap.h:641
Page * next_page()
Definition: spaces-inl.h:238
ObjectSpace
Definition: v8.h:4019
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
static size_t AllocateAlignment()
intptr_t write_barrier_counter_
Definition: spaces.h:725
void set_age_mark(Address mark)
Definition: spaces.cc:1715
static int CodePageGuardStartOffset()
Definition: spaces.cc:861
NewSpacePage * prev_page() const
Definition: spaces.h:2061
HeapObject * obj
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:4005
SkipList * skip_list()
Definition: spaces.h:663
FreeListNode * end() const
Definition: spaces.h:1567
intptr_t unswept_free_bytes_
Definition: spaces.h:1979
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:776
AllocationInfo allocation_info_
Definition: spaces.h:1968
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:258
FreeListNode * top() const
Definition: spaces.h:1558
Executability executable()
Definition: spaces.h:606
void ResetFreeListStatistics()
Definition: spaces.cc:1104
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1220
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:355
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1576
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:996
void DeleteArray(T *array)
Definition: allocation.h:91
static bool IsAtStart(Address addr)
Definition: spaces.h:2075
T Min(T a, T b)
Definition: utils.h:234
bool EnsureSweeperProgress(intptr_t size_in_bytes)
Definition: spaces.cc:2632
void OldSpaceStep(intptr_t allocated)
intptr_t available_in_medium_free_list_
Definition: spaces.h:737
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1623
void set_end(FreeListNode *end)
Definition: spaces.h:1568
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
void ResetFreeListStatistics()
Definition: spaces.cc:719
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2935
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:463
SkipList * skip_list_
Definition: spaces.h:724
MemoryChunk * prev_chunk() const
Definition: spaces.h:320
intptr_t available_in_huge_free_list_
Definition: spaces.h:739
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:324
static JSObject * cast(Object *obj)
const int kCodeZapValue
Definition: v8globals.h:91
VirtualMemory * reserved_memory()
Definition: spaces.h:349
SlotsBuffer * slots_buffer()
Definition: spaces.h:671
OldSpace * old_data_space()
Definition: heap.h:639
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2242
intptr_t available()
Definition: spaces.h:1629
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1769
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:826
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
Definition: spaces.cc:1096
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2376
AllocationSpace identity()
Definition: spaces.h:906
void Free()
Definition: list.h:64
bool Uncommit(void *address, size_t size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2651
MemoryChunk * next_chunk() const
Definition: spaces.h:316