v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "allocation.h"
32 #include "hashmap.h"
33 #include "list.h"
34 #include "log.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 class Isolate;
40 
41 // -----------------------------------------------------------------------------
42 // Heap structures:
43 //
44 // A JS heap consists of a young generation, an old generation, and a large
45 // object space. The young generation is divided into two semispaces. A
46 // scavenger implements Cheney's copying algorithm. The old generation is
47 // separated into a map space and an old object space. The map space contains
48 // all (and only) map objects, the rest of old objects go into the old space.
49 // The old generation is collected by a mark-sweep-compact collector.
50 //
51 // The semispaces of the young generation are contiguous. The old and map
52 // spaces consists of a list of pages. A page has a page header and an object
53 // area.
54 //
55 // There is a separate large object space for objects larger than
56 // Page::kMaxHeapObjectSize, so that they do not have to move during
57 // collection. The large object space is paged. Pages in large object space
58 // may be larger than the page size.
59 //
60 // A store-buffer based write barrier is used to keep track of intergenerational
61 // references. See store-buffer.h.
62 //
63 // During scavenges and mark-sweep collections we sometimes (after a store
64 // buffer overflow) iterate intergenerational pointers without decoding heap
65 // object maps so if the page belongs to old pointer space or large object
66 // space it is essential to guarantee that the page does not contain any
67 // garbage pointers to new space: every pointer aligned word which satisfies
68 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
69 // new space. Thus objects in old pointer and large object spaces should have a
70 // special layout (e.g. no bare integer fields). This requirement does not
71 // apply to map space which is iterated in a special fashion. However we still
72 // require pointer fields of dead maps to be cleaned.
73 //
74 // To enable lazy cleaning of old space pages we can mark chunks of the page
75 // as being garbage. Garbage sections are marked with a special map. These
76 // sections are skipped when scanning the page, even if we are otherwise
77 // scanning without regard for object boundaries. Garbage sections are chained
78 // together to form a free list after a GC. Garbage sections created outside
79 // of GCs by object trunctation etc. may not be in the free list chain. Very
80 // small free spaces are ignored, they need only be cleaned of bogus pointers
81 // into new space.
82 //
83 // Each page may have up to one special garbage section. The start of this
84 // section is denoted by the top field in the space. The end of the section
85 // is denoted by the limit field in the space. This special garbage section
86 // is not marked with a free space map in the data. The point of this section
87 // is to enable linear allocation without having to constantly update the byte
88 // array every time the top field is updated and a new object is created. The
89 // special garbage section is not in the chain of garbage sections.
90 //
91 // Since the top and limit fields are in the space, not the page, only one page
92 // has a special garbage section, and if the top and limit are equal then there
93 // is no special garbage section.
94 
95 // Some assertion macros used in the debugging mode.
96 
97 #define ASSERT_PAGE_ALIGNED(address) \
98  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
99 
100 #define ASSERT_OBJECT_ALIGNED(address) \
101  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
102 
103 #define ASSERT_OBJECT_SIZE(size) \
104  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
105 
106 #define ASSERT_PAGE_OFFSET(offset) \
107  ASSERT((Page::kObjectStartOffset <= offset) \
108  && (offset <= Page::kPageSize))
109 
110 #define ASSERT_MAP_PAGE_INDEX(index) \
111  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112 
113 
114 class PagedSpace;
115 class MemoryAllocator;
116 class AllocationInfo;
117 class Space;
118 class FreeList;
119 class MemoryChunk;
120 
121 class MarkBit {
122  public:
123  typedef uint32_t CellType;
124 
126  : cell_(cell), mask_(mask), data_only_(data_only) { }
127 
128  inline CellType* cell() { return cell_; }
129  inline CellType mask() { return mask_; }
130 
131 #ifdef DEBUG
132  bool operator==(const MarkBit& other) {
133  return cell_ == other.cell_ && mask_ == other.mask_;
134  }
135 #endif
136 
137  inline void Set() { *cell_ |= mask_; }
138  inline bool Get() { return (*cell_ & mask_) != 0; }
139  inline void Clear() { *cell_ &= ~mask_; }
140 
141  inline bool data_only() { return data_only_; }
142 
143  inline MarkBit Next() {
144  CellType new_mask = mask_ << 1;
145  if (new_mask == 0) {
146  return MarkBit(cell_ + 1, 1, data_only_);
147  } else {
148  return MarkBit(cell_, new_mask, data_only_);
149  }
150  }
151 
152  private:
153  CellType* cell_;
154  CellType mask_;
155  // This boolean indicates that the object is in a data-only space with no
156  // pointers. This enables some optimizations when marking.
157  // It is expected that this field is inlined and turned into control flow
158  // at the place where the MarkBit object is created.
159  bool data_only_;
160 };
161 
162 
163 // Bitmap is a sequence of cells each containing fixed number of bits.
164 class Bitmap {
165  public:
166  static const uint32_t kBitsPerCell = 32;
167  static const uint32_t kBitsPerCellLog2 = 5;
168  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
169  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
171 
172  static const size_t kLength =
173  (1 << kPageSizeBits) >> (kPointerSizeLog2);
174 
175  static const size_t kSize =
177 
178 
179  static int CellsForLength(int length) {
180  return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
181  }
182 
183  int CellsCount() {
184  return CellsForLength(kLength);
185  }
186 
187  static int SizeFor(int cells_count) {
188  return sizeof(MarkBit::CellType) * cells_count;
189  }
190 
191  INLINE(static uint32_t IndexToCell(uint32_t index)) {
192  return index >> kBitsPerCellLog2;
193  }
194 
195  INLINE(static uint32_t CellToIndex(uint32_t index)) {
196  return index << kBitsPerCellLog2;
197  }
198 
199  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
200  return (index + kBitIndexMask) & ~kBitIndexMask;
201  }
202 
204  return reinterpret_cast<MarkBit::CellType*>(this);
205  }
206 
207  INLINE(Address address()) {
208  return reinterpret_cast<Address>(this);
209  }
210 
211  INLINE(static Bitmap* FromAddress(Address addr)) {
212  return reinterpret_cast<Bitmap*>(addr);
213  }
214 
215  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
216  MarkBit::CellType mask = 1 << (index & kBitIndexMask);
217  MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
218  return MarkBit(cell, mask, data_only);
219  }
220 
221  static inline void Clear(MemoryChunk* chunk);
222 
223  static void PrintWord(uint32_t word, uint32_t himask = 0) {
224  for (uint32_t mask = 1; mask != 0; mask <<= 1) {
225  if ((mask & himask) != 0) PrintF("[");
226  PrintF((mask & word) ? "1" : "0");
227  if ((mask & himask) != 0) PrintF("]");
228  }
229  }
230 
231  class CellPrinter {
232  public:
233  CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
234 
235  void Print(uint32_t pos, uint32_t cell) {
236  if (cell == seq_type) {
237  seq_length++;
238  return;
239  }
240 
241  Flush();
242 
243  if (IsSeq(cell)) {
244  seq_start = pos;
245  seq_length = 0;
246  seq_type = cell;
247  return;
248  }
249 
250  PrintF("%d: ", pos);
251  PrintWord(cell);
252  PrintF("\n");
253  }
254 
255  void Flush() {
256  if (seq_length > 0) {
257  PrintF("%d: %dx%d\n",
258  seq_start,
259  seq_type == 0 ? 0 : 1,
260  seq_length * kBitsPerCell);
261  seq_length = 0;
262  }
263  }
264 
265  static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
266 
267  private:
268  uint32_t seq_start;
269  uint32_t seq_type;
270  uint32_t seq_length;
271  };
272 
273  void Print() {
274  CellPrinter printer;
275  for (int i = 0; i < CellsCount(); i++) {
276  printer.Print(i, cells()[i]);
277  }
278  printer.Flush();
279  PrintF("\n");
280  }
281 
282  bool IsClean() {
283  for (int i = 0; i < CellsCount(); i++) {
284  if (cells()[i] != 0) {
285  return false;
286  }
287  }
288  return true;
289  }
290 };
291 
292 
293 class SkipList;
294 class SlotsBuffer;
295 
296 // MemoryChunk represents a memory region owned by a specific space.
297 // It is divided into the header and the body. Chunk start is always
298 // 1MB aligned. Start of the body is aligned so it can accommodate
299 // any heap object.
300 class MemoryChunk {
301  public:
302  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
304  return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
305  }
306 
307  // Only works for addresses in pointer spaces, not data or code spaces.
308  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
309 
310  Address address() { return reinterpret_cast<Address>(this); }
311 
312  bool is_valid() { return address() != NULL; }
313 
314  MemoryChunk* next_chunk() const { return next_chunk_; }
315  MemoryChunk* prev_chunk() const { return prev_chunk_; }
316 
317  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
318  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
319 
320  Space* owner() const {
321  if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
322  kFailureTag) {
323  return reinterpret_cast<Space*>(owner_ - kFailureTag);
324  } else {
325  return NULL;
326  }
327  }
328 
329  void set_owner(Space* space) {
330  ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
331  owner_ = reinterpret_cast<Address>(space) + kFailureTag;
332  ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
333  kFailureTag);
334  }
335 
337  return &reservation_;
338  }
339 
342  }
343 
344  void set_reserved_memory(VirtualMemory* reservation) {
345  ASSERT_NOT_NULL(reservation);
346  reservation_.TakeControl(reservation);
347  }
348 
350  void initialize_scan_on_scavenge(bool scan) {
351  if (scan) {
353  } else {
355  }
356  }
357  inline void set_scan_on_scavenge(bool scan);
358 
360  void set_store_buffer_counter(int counter) {
361  store_buffer_counter_ = counter;
362  }
363 
364  bool Contains(Address addr) {
365  return addr >= area_start() && addr < area_end();
366  }
367 
368  // Checks whether addr can be a limit of addresses in this page.
369  // It's a limit if it's in the page, or if it's just after the
370  // last byte of the page.
371  bool ContainsLimit(Address addr) {
372  return addr >= area_start() && addr <= area_end();
373  }
374 
375  // Every n write barrier invocations we go to runtime even though
376  // we could have handled it in generated code. This lets us check
377  // whether we have hit the limit and should do some more marking.
378  static const int kWriteBarrierCounterGranularity = 500;
379 
386  IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
387  IN_TO_SPACE, // All pages in new space has one of these two set.
392 
393  // Pages swept precisely can be iterated, hitting only the live objects.
394  // Whereas those swept conservatively cannot be iterated over. Both flags
395  // indicate that marking bits have been cleared by the sweeper, otherwise
396  // marking bits are still intact.
399 
400  // Last flag, keep at bottom.
402  };
403 
404 
407 
410 
411  static const int kEvacuationCandidateMask =
413 
415  (1 << EVACUATION_CANDIDATE) |
416  (1 << RESCAN_ON_EVACUATION) |
417  (1 << IN_FROM_SPACE) |
418  (1 << IN_TO_SPACE);
419 
420 
421  void SetFlag(int flag) {
422  flags_ |= static_cast<uintptr_t>(1) << flag;
423  }
424 
425  void ClearFlag(int flag) {
426  flags_ &= ~(static_cast<uintptr_t>(1) << flag);
427  }
428 
429  void SetFlagTo(int flag, bool value) {
430  if (value) {
431  SetFlag(flag);
432  } else {
433  ClearFlag(flag);
434  }
435  }
436 
437  bool IsFlagSet(int flag) {
438  return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
439  }
440 
441  // Set or clear multiple flags at a time. The flags in the mask
442  // are set to the value in "flags", the rest retain the current value
443  // in flags_.
444  void SetFlags(intptr_t flags, intptr_t mask) {
445  flags_ = (flags_ & ~mask) | (flags & mask);
446  }
447 
448  // Return all current flags.
449  intptr_t GetFlags() { return flags_; }
450 
451  // Manage live byte count (count of bytes known to be live,
452  // because they are marked black).
453  void ResetLiveBytes() {
454  if (FLAG_gc_verbose) {
455  PrintF("ResetLiveBytes:%p:%x->0\n",
456  static_cast<void*>(this), live_byte_count_);
457  }
458  live_byte_count_ = 0;
459  }
460  void IncrementLiveBytes(int by) {
461  if (FLAG_gc_verbose) {
462  printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
463  static_cast<void*>(this), live_byte_count_,
464  ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
465  live_byte_count_ + by);
466  }
467  live_byte_count_ += by;
468  ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
469  }
470  int LiveBytes() {
471  ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
472  return live_byte_count_;
473  }
474 
476  return static_cast<int>(write_barrier_counter_);
477  }
478 
479  void set_write_barrier_counter(int counter) {
480  write_barrier_counter_ = counter;
481  }
482 
483 
484  static void IncrementLiveBytesFromGC(Address address, int by) {
486  }
487 
488  static void IncrementLiveBytesFromMutator(Address address, int by);
489 
490  static const intptr_t kAlignment =
491  (static_cast<uintptr_t>(1) << kPageSizeBits);
492 
493  static const intptr_t kAlignmentMask = kAlignment - 1;
494 
495  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
496 
497  static const intptr_t kLiveBytesOffset =
501 
503 
504  static const size_t kWriteBarrierCounterOffset =
506 
508 
509  static const int kBodyOffset =
511 
512  // The start offset of the object area in a page. Aligned to both maps and
513  // code alignment to be suitable for both. Also aligned to 32 words because
514  // the marking bitmap is arranged in 32 bit chunks.
515  static const int kObjectStartAlignment = 32 * kPointerSize;
516  static const int kObjectStartOffset = kBodyOffset - 1 +
518 
519  size_t size() const { return size_; }
520 
521  void set_size(size_t size) {
522  size_ = size;
523  }
524 
528  }
529 
532  }
533 
536  }
537 
538  bool InNewSpace() {
539  return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
540  }
541 
542  bool InToSpace() {
543  return IsFlagSet(IN_TO_SPACE);
544  }
545 
546  bool InFromSpace() {
547  return IsFlagSet(IN_FROM_SPACE);
548  }
549 
550  // ---------------------------------------------------------------------
551  // Markbits support
552 
553  inline Bitmap* markbits() {
554  return Bitmap::FromAddress(address() + kHeaderSize);
555  }
556 
557  void PrintMarkbits() { markbits()->Print(); }
558 
559  inline uint32_t AddressToMarkbitIndex(Address addr) {
560  return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
561  }
562 
563  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
564  const intptr_t offset =
565  reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
566 
567  return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
568  }
569 
570  inline Address MarkbitIndexToAddress(uint32_t index) {
571  return this->address() + (index << kPointerSizeLog2);
572  }
573 
574  void InsertAfter(MemoryChunk* other);
575  void Unlink();
576 
577  inline Heap* heap() { return heap_; }
578 
579  static const int kFlagsOffset = kPointerSize * 3;
580 
582 
585  }
586 
587  inline SkipList* skip_list() {
588  return skip_list_;
589  }
590 
593  }
594 
596  return slots_buffer_;
597  }
598 
600  return &slots_buffer_;
601  }
602 
606  }
607 
611  }
612 
614  Address area_end() { return area_end_; }
615  int area_size() {
616  return static_cast<int>(area_end() - area_start());
617  }
618 
619  protected:
622  size_t size_;
623  intptr_t flags_;
624 
625  // Start and end of allocatable memory on this chunk.
628 
629  // If the chunk needs to remember its memory reservation, it is stored here.
631  // The identity of the owning space. This is tagged as a failure pointer, but
632  // no failure can be in an object, so this can be distinguished from any entry
633  // in a fixed array.
636  // Used by the store buffer to keep track of which pages to mark scan-on-
637  // scavenge.
639  // Count of bytes marked black on page.
644 
645  static MemoryChunk* Initialize(Heap* heap,
646  Address base,
647  size_t size,
651  Space* owner);
652 
653  friend class MemoryAllocator;
654 };
655 
656 
658 
659 
660 // -----------------------------------------------------------------------------
661 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
662 //
663 // The only way to get a page pointer is by calling factory methods:
664 // Page* p = Page::FromAddress(addr); or
665 // Page* p = Page::FromAllocationTop(top);
666 class Page : public MemoryChunk {
667  public:
668  // Returns the page containing a given address. The address ranges
669  // from [page_addr .. page_addr + kPageSize[
670  // This only works if the object is in fact in a page. See also MemoryChunk::
671  // FromAddress() and FromAnyAddress().
673  return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
674  }
675 
676  // Returns the page containing an allocation top. Because an allocation
677  // top address can be the upper bound of the page, we need to subtract
678  // it with kPointerSize first. The address ranges from
679  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
680  INLINE(static Page* FromAllocationTop(Address top)) {
681  Page* p = FromAddress(top - kPointerSize);
682  return p;
683  }
684 
685  // Returns the next page in the chain of pages owned by a space.
686  inline Page* next_page();
687  inline Page* prev_page();
688  inline void set_next_page(Page* page);
689  inline void set_prev_page(Page* page);
690 
691  // Checks whether an address is page aligned.
692  static bool IsAlignedToPageSize(Address a) {
693  return 0 == (OffsetFrom(a) & kPageAlignmentMask);
694  }
695 
696  // Returns the offset of a given address to this page.
698  int offset = static_cast<int>(a - address());
699  return offset;
700  }
701 
702  // Returns the address for a given offset to the this page.
703  Address OffsetToAddress(int offset) {
704  ASSERT_PAGE_OFFSET(offset);
705  return address() + offset;
706  }
707 
708  // ---------------------------------------------------------------------
709 
710  // Page size in bytes. This must be a multiple of the OS page size.
711  static const int kPageSize = 1 << kPageSizeBits;
712 
713  // Object area size in bytes.
715 
716  // Maximum object size that fits in a page.
718 
719  // Page size mask.
720  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
721 
722  inline void ClearGCFields();
723 
724  static inline Page* Initialize(Heap* heap,
725  MemoryChunk* chunk,
727  PagedSpace* owner);
728 
730 
734 
737 
740 
741 #ifdef DEBUG
742  void Print();
743 #endif // DEBUG
744 
745  friend class MemoryAllocator;
746 };
747 
748 
750 
751 
752 class LargePage : public MemoryChunk {
753  public:
756  }
757 
758  inline LargePage* next_page() const {
759  return static_cast<LargePage*>(next_chunk());
760  }
761 
762  inline void set_next_page(LargePage* page) {
763  set_next_chunk(page);
764  }
765  private:
766  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
767 
768  friend class MemoryAllocator;
769 };
770 
772 
773 // ----------------------------------------------------------------------------
774 // Space is the abstract superclass for all allocation spaces.
775 class Space : public Malloced {
776  public:
778  : heap_(heap), id_(id), executable_(executable) {}
779 
780  virtual ~Space() {}
781 
782  Heap* heap() const { return heap_; }
783 
784  // Does the space need executable memory?
785  Executability executable() { return executable_; }
786 
787  // Identity used in error reporting.
788  AllocationSpace identity() { return id_; }
789 
790  // Returns allocated size.
791  virtual intptr_t Size() = 0;
792 
793  // Returns size of objects. Can differ from the allocated size
794  // (e.g. see LargeObjectSpace).
795  virtual intptr_t SizeOfObjects() { return Size(); }
796 
797  virtual int RoundSizeDownToObjectAlignment(int size) {
798  if (id_ == CODE_SPACE) {
799  return RoundDown(size, kCodeAlignment);
800  } else {
801  return RoundDown(size, kPointerSize);
802  }
803  }
804 
805 #ifdef DEBUG
806  virtual void Print() = 0;
807 #endif
808 
809  private:
810  Heap* heap_;
811  AllocationSpace id_;
812  Executability executable_;
813 };
814 
815 
816 // ----------------------------------------------------------------------------
817 // All heap objects containing executable code (code objects) must be allocated
818 // from a 2 GB range of memory, so that they can call each other using 32-bit
819 // displacements. This happens automatically on 32-bit platforms, where 32-bit
820 // displacements cover the entire 4GB virtual address space. On 64-bit
821 // platforms, we support this using the CodeRange object, which reserves and
822 // manages a range of virtual memory.
823 class CodeRange {
824  public:
825  explicit CodeRange(Isolate* isolate);
827 
828  // Reserves a range of virtual memory, but does not commit any of it.
829  // Can only be called once, at heap initialization time.
830  // Returns false on failure.
831  bool SetUp(const size_t requested_size);
832 
833  // Frees the range of virtual memory, and frees the data structures used to
834  // manage it.
835  void TearDown();
836 
837  bool exists() { return this != NULL && code_range_ != NULL; }
838  bool contains(Address address) {
839  if (this == NULL || code_range_ == NULL) return false;
840  Address start = static_cast<Address>(code_range_->address());
841  return start <= address && address < start + code_range_->size();
842  }
843 
844  // Allocates a chunk of memory from the large-object portion of
845  // the code range. On platforms with no separate code range, should
846  // not be called.
847  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
848  size_t* allocated);
849  void FreeRawMemory(Address buf, size_t length);
850 
851  private:
852  Isolate* isolate_;
853 
854  // The reserved range of virtual memory that all code objects are put in.
855  VirtualMemory* code_range_;
856  // Plain old data class, just a struct plus a constructor.
857  class FreeBlock {
858  public:
859  FreeBlock(Address start_arg, size_t size_arg)
860  : start(start_arg), size(size_arg) {
862  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
863  }
864  FreeBlock(void* start_arg, size_t size_arg)
865  : start(static_cast<Address>(start_arg)), size(size_arg) {
867  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
868  }
869 
870  Address start;
871  size_t size;
872  };
873 
874  // Freed blocks of memory are added to the free list. When the allocation
875  // list is exhausted, the free list is sorted and merged to make the new
876  // allocation list.
877  List<FreeBlock> free_list_;
878  // Memory is allocated from the free blocks on the allocation list.
879  // The block at current_allocation_block_index_ is the current block.
880  List<FreeBlock> allocation_list_;
881  int current_allocation_block_index_;
882 
883  // Finds a block on the allocation list that contains at least the
884  // requested amount of memory. If none is found, sorts and merges
885  // the existing free memory blocks, and searches again.
886  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
887  void GetNextAllocationBlock(size_t requested);
888  // Compares the start addresses of two free blocks.
889  static int CompareFreeBlockAddress(const FreeBlock* left,
890  const FreeBlock* right);
891 
892  DISALLOW_COPY_AND_ASSIGN(CodeRange);
893 };
894 
895 
896 class SkipList {
897  public:
899  Clear();
900  }
901 
902  void Clear() {
903  for (int idx = 0; idx < kSize; idx++) {
904  starts_[idx] = reinterpret_cast<Address>(-1);
905  }
906  }
907 
909  return starts_[RegionNumber(addr)];
910  }
911 
912  void AddObject(Address addr, int size) {
913  int start_region = RegionNumber(addr);
914  int end_region = RegionNumber(addr + size - kPointerSize);
915  for (int idx = start_region; idx <= end_region; idx++) {
916  if (starts_[idx] > addr) starts_[idx] = addr;
917  }
918  }
919 
920  static inline int RegionNumber(Address addr) {
921  return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
922  }
923 
924  static void Update(Address addr, int size) {
925  Page* page = Page::FromAddress(addr);
926  SkipList* list = page->skip_list();
927  if (list == NULL) {
928  list = new SkipList();
929  page->set_skip_list(list);
930  }
931 
932  list->AddObject(addr, size);
933  }
934 
935  private:
936  static const int kRegionSizeLog2 = 13;
937  static const int kRegionSize = 1 << kRegionSizeLog2;
938  static const int kSize = Page::kPageSize / kRegionSize;
939 
940  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
941 
942  Address starts_[kSize];
943 };
944 
945 
946 // ----------------------------------------------------------------------------
947 // A space acquires chunks of memory from the operating system. The memory
948 // allocator allocated and deallocates pages for the paged heap spaces and large
949 // pages for large object space.
950 //
951 // Each space has to manage it's own pages.
952 //
954  public:
955  explicit MemoryAllocator(Isolate* isolate);
956 
957  // Initializes its internal bookkeeping structures.
958  // Max capacity of the total space and executable memory limit.
959  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
960 
961  void TearDown();
962 
964  intptr_t size, PagedSpace* owner, Executability executable);
965 
967  intptr_t object_size, Space* owner, Executability executable);
968 
969  void Free(MemoryChunk* chunk);
970 
971  // Returns the maximum available bytes of heaps.
972  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
973 
974  // Returns allocated spaces in bytes.
975  intptr_t Size() { return size_; }
976 
977  // Returns the maximum available executable bytes of heaps.
978  intptr_t AvailableExecutable() {
979  if (capacity_executable_ < size_executable_) return 0;
980  return capacity_executable_ - size_executable_;
981  }
982 
983  // Returns allocated executable spaces in bytes.
984  intptr_t SizeExecutable() { return size_executable_; }
985 
986  // Returns maximum available bytes that the old space can have.
987  intptr_t MaxAvailable() {
989  }
990 
991 #ifdef DEBUG
992  // Reports statistic info of the space.
993  void ReportStatistics();
994 #endif
995 
996  MemoryChunk* AllocateChunk(intptr_t body_size,
997  Executability executable,
998  Space* space);
999 
1000  Address ReserveAlignedMemory(size_t requested,
1001  size_t alignment,
1002  VirtualMemory* controller);
1003  Address AllocateAlignedMemory(size_t requested,
1004  size_t alignment,
1005  Executability executable,
1006  VirtualMemory* controller);
1007 
1008  void FreeMemory(VirtualMemory* reservation, Executability executable);
1009  void FreeMemory(Address addr, size_t size, Executability executable);
1010 
1011  // Commit a contiguous block of memory from the initial chunk. Assumes that
1012  // the address is not NULL, the size is greater than zero, and that the
1013  // block is contained in the initial chunk. Returns true if it succeeded
1014  // and false otherwise.
1015  bool CommitBlock(Address start, size_t size, Executability executable);
1016 
1017  // Uncommit a contiguous block of memory [start..(start+size)[.
1018  // start is not NULL, the size is greater than zero, and the
1019  // block is contained in the initial chunk. Returns true if it succeeded
1020  // and false otherwise.
1021  bool UncommitBlock(Address start, size_t size);
1022 
1023  // Zaps a contiguous block of memory [start..(start+size)[ thus
1024  // filling it up with a recognizable non-NULL bit pattern.
1025  void ZapBlock(Address start, size_t size);
1026 
1028  AllocationAction action,
1029  size_t size);
1030 
1032  ObjectSpace space,
1033  AllocationAction action);
1034 
1036  MemoryAllocationCallback callback);
1037 
1039  MemoryAllocationCallback callback);
1040 
1041  static int CodePageGuardStartOffset();
1042 
1043  static int CodePageGuardSize();
1044 
1045  static int CodePageAreaStartOffset();
1046 
1047  static int CodePageAreaEndOffset();
1048 
1049  static int CodePageAreaSize() {
1051  }
1052 
1054  Address start,
1055  size_t size);
1056 
1057  private:
1058  Isolate* isolate_;
1059 
1060  // Maximum space size in bytes.
1061  size_t capacity_;
1062  // Maximum subset of capacity_ that can be executable
1063  size_t capacity_executable_;
1064 
1065  // Allocated space size in bytes.
1066  size_t size_;
1067  // Allocated executable space size in bytes.
1068  size_t size_executable_;
1069 
1070  struct MemoryAllocationCallbackRegistration {
1071  MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1072  ObjectSpace space,
1073  AllocationAction action)
1074  : callback(callback), space(space), action(action) {
1075  }
1076  MemoryAllocationCallback callback;
1077  ObjectSpace space;
1078  AllocationAction action;
1079  };
1080 
1081  // A List of callback that are triggered when memory is allocated or free'd
1082  List<MemoryAllocationCallbackRegistration>
1083  memory_allocation_callbacks_;
1084 
1085  // Initializes pages in a chunk. Returns the first page address.
1086  // This function and GetChunkId() are provided for the mark-compact
1087  // collector to rebuild page headers in the from space, which is
1088  // used as a marking stack and its page headers are destroyed.
1089  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1090  PagedSpace* owner);
1091 
1092  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1093 };
1094 
1095 
1096 // -----------------------------------------------------------------------------
1097 // Interface for heap object iterator to be implemented by all object space
1098 // object iterators.
1099 //
1100 // NOTE: The space specific object iterators also implements the own next()
1101 // method which is used to avoid using virtual functions
1102 // iterating a specific space.
1103 
1104 class ObjectIterator : public Malloced {
1105  public:
1106  virtual ~ObjectIterator() { }
1107 
1108  virtual HeapObject* next_object() = 0;
1109 };
1110 
1111 
1112 // -----------------------------------------------------------------------------
1113 // Heap object iterator in new/old/map spaces.
1114 //
1115 // A HeapObjectIterator iterates objects from the bottom of the given space
1116 // to its top or from the bottom of the given page to its top.
1117 //
1118 // If objects are allocated in the page during iteration the iterator may
1119 // or may not iterate over those objects. The caller must create a new
1120 // iterator in order to be sure to visit these new objects.
1122  public:
1123  // Creates a new object iterator in a given space.
1124  // If the size function is not given, the iterator calls the default
1125  // Object::Size().
1126  explicit HeapObjectIterator(PagedSpace* space);
1128  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1129 
1130  // Advance to the next object, skipping free spaces and other fillers and
1131  // skipping the special garbage section of which there is one per space.
1132  // Returns NULL when the iteration has ended.
1133  inline HeapObject* Next() {
1134  do {
1135  HeapObject* next_obj = FromCurrentPage();
1136  if (next_obj != NULL) return next_obj;
1137  } while (AdvanceToNextPage());
1138  return NULL;
1139  }
1140 
1142  return Next();
1143  }
1144 
1145  private:
1146  enum PageMode { kOnePageOnly, kAllPagesInSpace };
1147 
1148  Address cur_addr_; // Current iteration point.
1149  Address cur_end_; // End iteration point.
1150  HeapObjectCallback size_func_; // Size function or NULL.
1151  PagedSpace* space_;
1152  PageMode page_mode_;
1153 
1154  // Fast (inlined) path of next().
1155  inline HeapObject* FromCurrentPage();
1156 
1157  // Slow path of next(), goes into the next page. Returns false if the
1158  // iteration has ended.
1159  bool AdvanceToNextPage();
1160 
1161  // Initializes fields.
1162  inline void Initialize(PagedSpace* owner,
1163  Address start,
1164  Address end,
1165  PageMode mode,
1166  HeapObjectCallback size_func);
1167 };
1168 
1169 
1170 // -----------------------------------------------------------------------------
1171 // A PageIterator iterates the pages in a paged space.
1172 
1173 class PageIterator BASE_EMBEDDED {
1174  public:
1175  explicit inline PageIterator(PagedSpace* space);
1176 
1177  inline bool has_next();
1178  inline Page* next();
1179 
1180  private:
1181  PagedSpace* space_;
1182  Page* prev_page_; // Previous page returned.
1183  // Next page that will be returned. Cached here so that we can use this
1184  // iterator for operations that deallocate pages.
1185  Page* next_page_;
1186 };
1187 
1188 
1189 // -----------------------------------------------------------------------------
1190 // A space has a circular list of pages. The next page can be accessed via
1191 // Page::next_page() call.
1192 
1193 // An abstraction of allocation and relocation pointers in a page-structured
1194 // space.
1196  public:
1198  }
1199 
1200  Address top; // Current allocation top.
1201  Address limit; // Current allocation limit.
1202 
1203 #ifdef DEBUG
1204  bool VerifyPagedAllocation() {
1205  return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1206  && (top <= limit);
1207  }
1208 #endif
1209 };
1210 
1211 
1212 // An abstraction of the accounting statistics of a page-structured space.
1213 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1214 // including page bookkeeping structures) currently in the space. The 'size'
1215 // of a space is the number of allocated bytes, the 'waste' in the space is
1216 // the number of bytes that are not allocated and not available to
1217 // allocation without reorganizing the space via a GC (e.g. small blocks due
1218 // to internal fragmentation, top of page areas in map space), and the bytes
1219 // 'available' is the number of unallocated bytes that are not waste. The
1220 // capacity is the sum of size, waste, and available.
1221 //
1222 // The stats are only set by functions that ensure they stay balanced. These
1223 // functions increase or decrease one of the non-capacity stats in
1224 // conjunction with capacity, or else they always balance increases and
1225 // decreases to the non-capacity stats.
1226 class AllocationStats BASE_EMBEDDED {
1227  public:
1228  AllocationStats() { Clear(); }
1229 
1230  // Zero out all the allocation statistics (i.e., no capacity).
1231  void Clear() {
1232  capacity_ = 0;
1233  size_ = 0;
1234  waste_ = 0;
1235  }
1236 
1238  size_ = capacity_;
1239  waste_ = 0;
1240  }
1241 
1242  // Reset the allocation statistics (i.e., available = capacity with no
1243  // wasted or allocated bytes).
1244  void Reset() {
1245  size_ = 0;
1246  waste_ = 0;
1247  }
1248 
1249  // Accessors for the allocation statistics.
1250  intptr_t Capacity() { return capacity_; }
1251  intptr_t Size() { return size_; }
1252  intptr_t Waste() { return waste_; }
1253 
1254  // Grow the space by adding available bytes. They are initially marked as
1255  // being in use (part of the size), but will normally be immediately freed,
1256  // putting them on the free list and removing them from size_.
1257  void ExpandSpace(int size_in_bytes) {
1258  capacity_ += size_in_bytes;
1259  size_ += size_in_bytes;
1260  ASSERT(size_ >= 0);
1261  }
1262 
1263  // Shrink the space by removing available bytes. Since shrinking is done
1264  // during sweeping, bytes have been marked as being in use (part of the size)
1265  // and are hereby freed.
1266  void ShrinkSpace(int size_in_bytes) {
1267  capacity_ -= size_in_bytes;
1268  size_ -= size_in_bytes;
1269  ASSERT(size_ >= 0);
1270  }
1271 
1272  // Allocate from available bytes (available -> size).
1273  void AllocateBytes(intptr_t size_in_bytes) {
1274  size_ += size_in_bytes;
1275  ASSERT(size_ >= 0);
1276  }
1277 
1278  // Free allocated bytes, making them available (size -> available).
1279  void DeallocateBytes(intptr_t size_in_bytes) {
1280  size_ -= size_in_bytes;
1281  ASSERT(size_ >= 0);
1282  }
1283 
1284  // Waste free bytes (available -> waste).
1285  void WasteBytes(int size_in_bytes) {
1286  size_ -= size_in_bytes;
1287  waste_ += size_in_bytes;
1288  ASSERT(size_ >= 0);
1289  }
1290 
1291  private:
1292  intptr_t capacity_;
1293  intptr_t size_;
1294  intptr_t waste_;
1295 };
1296 
1297 
1298 // -----------------------------------------------------------------------------
1299 // Free lists for old object spaces
1300 //
1301 // Free-list nodes are free blocks in the heap. They look like heap objects
1302 // (free-list node pointers have the heap object tag, and they have a map like
1303 // a heap object). They have a size and a next pointer. The next pointer is
1304 // the raw address of the next free list node (or NULL).
1305 class FreeListNode: public HeapObject {
1306  public:
1307  // Obtain a free-list node from a raw address. This is not a cast because
1308  // it does not check nor require that the first word at the address is a map
1309  // pointer.
1311  return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1312  }
1313 
1314  static inline bool IsFreeListNode(HeapObject* object);
1315 
1316  // Set the size in bytes, which can be read with HeapObject::Size(). This
1317  // function also writes a map to the first word of the block so that it
1318  // looks like a heap object to the garbage collector and heap iteration
1319  // functions.
1320  void set_size(Heap* heap, int size_in_bytes);
1321 
1322  // Accessors for the next field.
1323  inline FreeListNode* next();
1324  inline FreeListNode** next_address();
1325  inline void set_next(FreeListNode* next);
1326 
1327  inline void Zap();
1328 
1329  static inline FreeListNode* cast(MaybeObject* maybe) {
1330  ASSERT(!maybe->IsFailure());
1331  return reinterpret_cast<FreeListNode*>(maybe);
1332  }
1333 
1334  private:
1335  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1336 
1337  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1338 };
1339 
1340 
1341 // The free list for the old space. The free list is organized in such a way
1342 // as to encourage objects allocated around the same time to be near each
1343 // other. The normal way to allocate is intended to be by bumping a 'top'
1344 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1345 // find a new space to allocate from. This is done with the free list, which
1346 // is divided up into rough categories to cut down on waste. Having finer
1347 // categories would scatter allocation more.
1348 
1349 // The old space free list is organized in categories.
1350 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1351 // They can be reclaimed by the compactor. However the distance between top
1352 // and limit may be this small.
1353 // 32-255 words: There is a list of spaces this large. It is used for top and
1354 // limit when the object we need to allocate is 1-31 words in size. These
1355 // spaces are called small.
1356 // 256-2047 words: There is a list of spaces this large. It is used for top and
1357 // limit when the object we need to allocate is 32-255 words in size. These
1358 // spaces are called medium.
1359 // 1048-16383 words: There is a list of spaces this large. It is used for top
1360 // and limit when the object we need to allocate is 256-2047 words in size.
1361 // These spaces are call large.
1362 // At least 16384 words. This list is for objects of 2048 words or larger.
1363 // Empty pages are added to this list. These spaces are called huge.
1364 class FreeList BASE_EMBEDDED {
1365  public:
1366  explicit FreeList(PagedSpace* owner);
1367 
1368  // Clear the free list.
1369  void Reset();
1370 
1371  // Return the number of bytes available on the free list.
1372  intptr_t available() { return available_; }
1373 
1374  // Place a node on the free list. The block of size 'size_in_bytes'
1375  // starting at 'start' is placed on the free list. The return value is the
1376  // number of bytes that have been lost due to internal fragmentation by
1377  // freeing the block. Bookkeeping information will be written to the block,
1378  // i.e., its contents will be destroyed. The start address should be word
1379  // aligned, and the size should be a non-zero multiple of the word size.
1380  int Free(Address start, int size_in_bytes);
1381 
1382  // Allocate a block of size 'size_in_bytes' from the free list. The block
1383  // is unitialized. A failure is returned if no block is available. The
1384  // number of bytes lost to fragmentation is returned in the output parameter
1385  // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1386  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1387 
1388 #ifdef DEBUG
1389  void Zap();
1390  static intptr_t SumFreeList(FreeListNode* node);
1391  static int FreeListLength(FreeListNode* cur);
1392  intptr_t SumFreeLists();
1393  bool IsVeryLong();
1394 #endif
1395 
1396  // Used after booting the VM.
1397  void RepairLists(Heap* heap);
1398 
1399  struct SizeStats {
1400  intptr_t Total() {
1401  return small_size_ + medium_size_ + large_size_ + huge_size_;
1402  }
1403 
1404  intptr_t small_size_;
1405  intptr_t medium_size_;
1406  intptr_t large_size_;
1407  intptr_t huge_size_;
1408  };
1409 
1410  void CountFreeListItems(Page* p, SizeStats* sizes);
1411 
1412  intptr_t EvictFreeListItems(Page* p);
1413 
1414  private:
1415  // The size range of blocks, in bytes.
1416  static const int kMinBlockSize = 3 * kPointerSize;
1417  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1418 
1419  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1420 
1421  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1422 
1423  PagedSpace* owner_;
1424  Heap* heap_;
1425 
1426  // Total available bytes in all blocks on this free list.
1427  int available_;
1428 
1429  static const int kSmallListMin = 0x20 * kPointerSize;
1430  static const int kSmallListMax = 0xff * kPointerSize;
1431  static const int kMediumListMax = 0x7ff * kPointerSize;
1432  static const int kLargeListMax = 0x3fff * kPointerSize;
1433  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1434  static const int kMediumAllocationMax = kSmallListMax;
1435  static const int kLargeAllocationMax = kMediumListMax;
1436  FreeListNode* small_list_;
1437  FreeListNode* medium_list_;
1438  FreeListNode* large_list_;
1439  FreeListNode* huge_list_;
1440 
1442 };
1443 
1444 
1445 class PagedSpace : public Space {
1446  public:
1447  // Creates a space with a maximum capacity, and an id.
1448  PagedSpace(Heap* heap,
1449  intptr_t max_capacity,
1450  AllocationSpace id,
1452 
1453  virtual ~PagedSpace() {}
1454 
1455  // Set up the space using the given address range of virtual memory (from
1456  // the memory allocator's initial chunk) if possible. If the block of
1457  // addresses is not big enough to contain a single page-aligned page, a
1458  // fresh chunk will be allocated.
1459  bool SetUp();
1460 
1461  // Returns true if the space has been successfully set up and not
1462  // subsequently torn down.
1463  bool HasBeenSetUp();
1464 
1465  // Cleans up the space, frees all pages in this space except those belonging
1466  // to the initial chunk, uncommits addresses in the initial chunk.
1467  void TearDown();
1468 
1469  // Checks whether an object/address is in this space.
1470  inline bool Contains(Address a);
1471  bool Contains(HeapObject* o) { return Contains(o->address()); }
1472 
1473  // Given an address occupied by a live object, return that object if it is
1474  // in this space, or Failure::Exception() if it is not. The implementation
1475  // iterates over objects in the page containing the address, the cost is
1476  // linear in the number of objects in the page. It may be slow.
1477  MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1478 
1479  // During boot the free_space_map is created, and afterwards we may need
1480  // to write it into the free list nodes that were already created.
1481  virtual void RepairFreeListsAfterBoot();
1482 
1483  // Prepares for a mark-compact GC.
1484  virtual void PrepareForMarkCompact();
1485 
1486  // Current capacity without growing (Size() + Available()).
1487  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1488 
1489  // Total amount of memory committed for this space. For paged
1490  // spaces this equals the capacity.
1491  intptr_t CommittedMemory() { return Capacity(); }
1492 
1493  // Sets the capacity, the available space and the wasted space to zero.
1494  // The stats are rebuilt during sweeping by adding each page to the
1495  // capacity and the size when it is encountered. As free spaces are
1496  // discovered during the sweeping they are subtracted from the size and added
1497  // to the available and wasted totals.
1498  void ClearStats() {
1499  accounting_stats_.ClearSizeWaste();
1500  }
1501 
1502  // Available bytes without growing. These are the bytes on the free list.
1503  // The bytes in the linear allocation area are not included in this total
1504  // because updating the stats would slow down allocation. New pages are
1505  // immediately added to the free list so they show up here.
1506  intptr_t Available() { return free_list_.available(); }
1507 
1508  // Allocated bytes in this space. Garbage bytes that were not found due to
1509  // lazy sweeping are counted as being allocated! The bytes in the current
1510  // linear allocation area (between top and limit) are also counted here.
1511  virtual intptr_t Size() { return accounting_stats_.Size(); }
1512 
1513  // As size, but the bytes in lazily swept pages are estimated and the bytes
1514  // in the current linear allocation area are not included.
1515  virtual intptr_t SizeOfObjects() {
1517  return Size() - unswept_free_bytes_ - (limit() - top());
1518  }
1519 
1520  // Wasted bytes in this space. These are just the bytes that were thrown away
1521  // due to being too small to use for allocation. They do not include the
1522  // free bytes that were not found at all due to lazy sweeping.
1523  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1524 
1525  // Returns the allocation pointer in this space.
1528 
1529  // Allocate the requested number of bytes in the space if possible, return a
1530  // failure object if not.
1531  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1532 
1533  virtual bool ReserveSpace(int bytes);
1534 
1535  // Give a block of memory to the space's free list. It might be added to
1536  // the free list or accounted as waste.
1537  // If add_to_freelist is false then just accounting stats are updated and
1538  // no attempt to add area to free list is made.
1539  int Free(Address start, int size_in_bytes) {
1540  int wasted = free_list_.Free(start, size_in_bytes);
1541  accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1542  return size_in_bytes - wasted;
1543  }
1544 
1545  void ResetFreeList() {
1546  free_list_.Reset();
1547  }
1548 
1549  // Set space allocation info.
1551  ASSERT(top == limit ||
1552  Page::FromAddress(top) == Page::FromAddress(limit - 1));
1555  }
1556 
1557  void Allocate(int bytes) {
1558  accounting_stats_.AllocateBytes(bytes);
1559  }
1560 
1561  void IncreaseCapacity(int size) {
1562  accounting_stats_.ExpandSpace(size);
1563  }
1564 
1565  // Releases an unused page and shrinks the space.
1566  void ReleasePage(Page* page);
1567 
1568  // Releases all of the unused pages.
1569  void ReleaseAllUnusedPages();
1570 
1571  // The dummy page that anchors the linked list of pages.
1572  Page* anchor() { return &anchor_; }
1573 
1574 #ifdef VERIFY_HEAP
1575  // Verify integrity of this space.
1576  virtual void Verify(ObjectVisitor* visitor);
1577 
1578  // Overridden by subclasses to verify space-specific object
1579  // properties (e.g., only maps or free-list nodes are in map space).
1580  virtual void VerifyObject(HeapObject* obj) {}
1581 #endif
1582 
1583 #ifdef DEBUG
1584  // Print meta info and objects in this space.
1585  virtual void Print();
1586 
1587  // Reports statistics for the space
1588  void ReportStatistics();
1589 
1590  // Report code object related statistics
1591  void CollectCodeStatistics();
1592  static void ReportCodeStatistics();
1593  static void ResetCodeStatistics();
1594 #endif
1595 
1598 
1599  // Evacuation candidates are swept by evacuator. Needs to return a valid
1600  // result before _and_ after evacuation has finished.
1601  static bool ShouldBeSweptLazily(Page* p) {
1602  return !p->IsEvacuationCandidate() &&
1604  !p->WasSweptPrecisely();
1605  }
1606 
1607  void SetPagesToSweep(Page* first) {
1609  if (first == &anchor_) first = NULL;
1610  first_unswept_page_ = first;
1611  }
1612 
1614  unswept_free_bytes_ += by;
1615  }
1616 
1619  unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1620  }
1621 
1624  unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1625  }
1626 
1627  bool AdvanceSweeper(intptr_t bytes_to_sweep);
1628 
1630  return !first_unswept_page_->is_valid();
1631  }
1632 
1633  Page* FirstPage() { return anchor_.next_page(); }
1634  Page* LastPage() { return anchor_.prev_page(); }
1635 
1636  void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1637  free_list_.CountFreeListItems(p, sizes);
1638  }
1639 
1641 
1642  bool CanExpand();
1643 
1644  // Returns the number of total pages in this space.
1645  int CountTotalPages();
1646 
1647  // Return size of allocatable area on a page in this space.
1648  inline int AreaSize() {
1649  return area_size_;
1650  }
1651 
1652  protected:
1654 
1655  // Maximum capacity of this space.
1656  intptr_t max_capacity_;
1657 
1658  intptr_t SizeOfFirstPage();
1659 
1660  // Accounting information for this space.
1661  AllocationStats accounting_stats_;
1662 
1663  // The dummy page that anchors the double linked list of pages.
1665 
1666  // The space's free list.
1667  FreeList free_list_;
1668 
1669  // Normal allocation information.
1671 
1672  // Bytes of each page that cannot be allocated. Possibly non-zero
1673  // for pages in spaces with only fixed-size objects. Always zero
1674  // for pages in spaces with variable sized objects (those pages are
1675  // padded with free-list nodes).
1677 
1679 
1680  // The first page to be swept when the lazy sweeper advances. Is set
1681  // to NULL when all pages have been swept.
1683 
1684  // The number of free bytes which could be reclaimed by advancing the
1685  // lazy sweeper. This is only an estimation because lazy sweeping is
1686  // done conservatively.
1688 
1689  // Expands the space by allocating a fixed number of pages. Returns false if
1690  // it cannot allocate requested number of pages from OS, or if the hard heap
1691  // size limit has been hit.
1692  bool Expand();
1693 
1694  // Generic fast case allocation function that tries linear allocation at the
1695  // address denoted by top in allocation_info_.
1696  inline HeapObject* AllocateLinearly(int size_in_bytes);
1697 
1698  // Slow path of AllocateRaw. This function is space-dependent.
1699  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1700 
1701  friend class PageIterator;
1702 };
1703 
1704 
1705 class NumberAndSizeInfo BASE_EMBEDDED {
1706  public:
1707  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1708 
1709  int number() const { return number_; }
1710  void increment_number(int num) { number_ += num; }
1711 
1712  int bytes() const { return bytes_; }
1713  void increment_bytes(int size) { bytes_ += size; }
1714 
1715  void clear() {
1716  number_ = 0;
1717  bytes_ = 0;
1718  }
1719 
1720  private:
1721  int number_;
1722  int bytes_;
1723 };
1724 
1725 
1726 // HistogramInfo class for recording a single "bar" of a histogram. This
1727 // class is used for collecting statistics to print to the log file.
1728 class HistogramInfo: public NumberAndSizeInfo {
1729  public:
1730  HistogramInfo() : NumberAndSizeInfo() {}
1731 
1732  const char* name() { return name_; }
1733  void set_name(const char* name) { name_ = name; }
1734 
1735  private:
1736  const char* name_;
1737 };
1738 
1739 
1743 };
1744 
1745 
1746 class SemiSpace;
1747 
1748 
1749 class NewSpacePage : public MemoryChunk {
1750  public:
1751  // GC related flags copied from from-space to to-space when
1752  // flipping semispaces.
1753  static const intptr_t kCopyOnFlipFlagsMask =
1757 
1759 
1760  inline NewSpacePage* next_page() const {
1761  return static_cast<NewSpacePage*>(next_chunk());
1762  }
1763 
1764  inline void set_next_page(NewSpacePage* page) {
1765  set_next_chunk(page);
1766  }
1767 
1768  inline NewSpacePage* prev_page() const {
1769  return static_cast<NewSpacePage*>(prev_chunk());
1770  }
1771 
1772  inline void set_prev_page(NewSpacePage* page) {
1773  set_prev_chunk(page);
1774  }
1775 
1777  return reinterpret_cast<SemiSpace*>(owner());
1778  }
1779 
1780  bool is_anchor() { return !this->InNewSpace(); }
1781 
1782  static bool IsAtStart(Address addr) {
1783  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1784  == kObjectStartOffset;
1785  }
1786 
1787  static bool IsAtEnd(Address addr) {
1788  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1789  }
1790 
1792  return reinterpret_cast<Address>(this);
1793  }
1794 
1795  // Finds the NewSpacePage containg the given address.
1796  static inline NewSpacePage* FromAddress(Address address_in_page) {
1797  Address page_start =
1798  reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
1800  NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1801  return page;
1802  }
1803 
1804  // Find the page for a limit address. A limit address is either an address
1805  // inside a page, or the address right after the last byte of a page.
1806  static inline NewSpacePage* FromLimit(Address address_limit) {
1807  return NewSpacePage::FromAddress(address_limit - 1);
1808  }
1809 
1810  private:
1811  // Create a NewSpacePage object that is only used as anchor
1812  // for the doubly-linked list of real pages.
1813  explicit NewSpacePage(SemiSpace* owner) {
1814  InitializeAsAnchor(owner);
1815  }
1816 
1817  static NewSpacePage* Initialize(Heap* heap,
1818  Address start,
1820 
1821  // Intialize a fake NewSpacePage used as sentinel at the ends
1822  // of a doubly-linked list of real NewSpacePages.
1823  // Only uses the prev/next links, and sets flags to not be in new-space.
1824  void InitializeAsAnchor(SemiSpace* owner);
1825 
1826  friend class SemiSpace;
1827  friend class SemiSpaceIterator;
1828 };
1829 
1830 
1831 // -----------------------------------------------------------------------------
1832 // SemiSpace in young generation
1833 //
1834 // A semispace is a contiguous chunk of memory holding page-like memory
1835 // chunks. The mark-compact collector uses the memory of the first page in
1836 // the from space as a marking stack when tracing live objects.
1837 
1838 class SemiSpace : public Space {
1839  public:
1840  // Constructor.
1842  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1843  start_(NULL),
1844  age_mark_(NULL),
1845  id_(semispace),
1846  anchor_(this),
1847  current_page_(NULL) { }
1848 
1849  // Sets up the semispace using the given chunk.
1850  void SetUp(Address start, int initial_capacity, int maximum_capacity);
1851 
1852  // Tear down the space. Heap memory was not allocated by the space, so it
1853  // is not deallocated here.
1854  void TearDown();
1855 
1856  // True if the space has been set up but not torn down.
1857  bool HasBeenSetUp() { return start_ != NULL; }
1858 
1859  // Grow the semispace to the new capacity. The new capacity
1860  // requested must be larger than the current capacity and less than
1861  // the maximum capacity.
1862  bool GrowTo(int new_capacity);
1863 
1864  // Shrinks the semispace to the new capacity. The new capacity
1865  // requested must be more than the amount of used memory in the
1866  // semispace and less than the current capacity.
1867  bool ShrinkTo(int new_capacity);
1868 
1869  // Returns the start address of the first page of the space.
1871  ASSERT(anchor_.next_page() != &anchor_);
1872  return anchor_.next_page()->area_start();
1873  }
1874 
1875  // Returns the start address of the current page of the space.
1877  return current_page_->area_start();
1878  }
1879 
1880  // Returns one past the end address of the space.
1882  return anchor_.prev_page()->area_end();
1883  }
1884 
1885  // Returns one past the end address of the current page of the space.
1887  return current_page_->area_end();
1888  }
1889 
1890  bool AdvancePage() {
1891  NewSpacePage* next_page = current_page_->next_page();
1892  if (next_page == anchor()) return false;
1893  current_page_ = next_page;
1894  return true;
1895  }
1896 
1897  // Resets the space to using the first page.
1898  void Reset();
1899 
1900  // Age mark accessors.
1901  Address age_mark() { return age_mark_; }
1902  void set_age_mark(Address mark);
1903 
1904  // True if the address is in the address range of this semispace (not
1905  // necessarily below the allocation pointer).
1906  bool Contains(Address a) {
1907  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1908  == reinterpret_cast<uintptr_t>(start_);
1909  }
1910 
1911  // True if the object is a heap object in the address range of this
1912  // semispace (not necessarily below the allocation pointer).
1913  bool Contains(Object* o) {
1914  return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1915  }
1916 
1917  // If we don't have these here then SemiSpace will be abstract. However
1918  // they should never be called.
1919  virtual intptr_t Size() {
1920  UNREACHABLE();
1921  return 0;
1922  }
1923 
1924  virtual bool ReserveSpace(int bytes) {
1925  UNREACHABLE();
1926  return false;
1927  }
1928 
1929  bool is_committed() { return committed_; }
1930  bool Commit();
1931  bool Uncommit();
1932 
1933  NewSpacePage* first_page() { return anchor_.next_page(); }
1934  NewSpacePage* current_page() { return current_page_; }
1935 
1936 #ifdef VERIFY_HEAP
1937  virtual void Verify();
1938 #endif
1939 
1940 #ifdef DEBUG
1941  virtual void Print();
1942  // Validate a range of of addresses in a SemiSpace.
1943  // The "from" address must be on a page prior to the "to" address,
1944  // in the linked page order, or it must be earlier on the same page.
1945  static void AssertValidRange(Address from, Address to);
1946 #else
1947  // Do nothing.
1948  inline static void AssertValidRange(Address from, Address to) {}
1949 #endif
1950 
1951  // Returns the current capacity of the semi space.
1952  int Capacity() { return capacity_; }
1953 
1954  // Returns the maximum capacity of the semi space.
1955  int MaximumCapacity() { return maximum_capacity_; }
1956 
1957  // Returns the initial capacity of the semi space.
1958  int InitialCapacity() { return initial_capacity_; }
1959 
1960  SemiSpaceId id() { return id_; }
1961 
1962  static void Swap(SemiSpace* from, SemiSpace* to);
1963 
1964  private:
1965  // Flips the semispace between being from-space and to-space.
1966  // Copies the flags into the masked positions on all pages in the space.
1967  void FlipPages(intptr_t flags, intptr_t flag_mask);
1968 
1969  NewSpacePage* anchor() { return &anchor_; }
1970 
1971  // The current and maximum capacity of the space.
1972  int capacity_;
1973  int maximum_capacity_;
1974  int initial_capacity_;
1975 
1976  // The start address of the space.
1977  Address start_;
1978  // Used to govern object promotion during mark-compact collection.
1979  Address age_mark_;
1980 
1981  // Masks and comparison values to test for containment in this semispace.
1982  uintptr_t address_mask_;
1983  uintptr_t object_mask_;
1984  uintptr_t object_expected_;
1985 
1986  bool committed_;
1987  SemiSpaceId id_;
1988 
1989  NewSpacePage anchor_;
1990  NewSpacePage* current_page_;
1991 
1992  friend class SemiSpaceIterator;
1993  friend class NewSpacePageIterator;
1994  public:
1995  TRACK_MEMORY("SemiSpace")
1996 };
1997 
1998 
1999 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2000 // semispace of the heap's new space. It iterates over the objects in the
2001 // semispace from a given start address (defaulting to the bottom of the
2002 // semispace) to the top of the semispace. New objects allocated after the
2003 // iterator is created are not iterated.
2005  public:
2006  // Create an iterator over the objects in the given space. If no start
2007  // address is given, the iterator starts from the bottom of the space. If
2008  // no size function is given, the iterator calls Object::Size().
2009 
2010  // Iterate over all of allocated to-space.
2011  explicit SemiSpaceIterator(NewSpace* space);
2012  // Iterate over all of allocated to-space, with a custome size function.
2013  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2014  // Iterate over part of allocated to-space, from start to the end
2015  // of allocation.
2016  SemiSpaceIterator(NewSpace* space, Address start);
2017  // Iterate from one address to another in the same semi-space.
2018  SemiSpaceIterator(Address from, Address to);
2019 
2021  if (current_ == limit_) return NULL;
2022  if (NewSpacePage::IsAtEnd(current_)) {
2023  NewSpacePage* page = NewSpacePage::FromLimit(current_);
2024  page = page->next_page();
2025  ASSERT(!page->is_anchor());
2026  current_ = page->area_start();
2027  if (current_ == limit_) return NULL;
2028  }
2029 
2030  HeapObject* object = HeapObject::FromAddress(current_);
2031  int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2032 
2033  current_ += size;
2034  return object;
2035  }
2036 
2037  // Implementation of the ObjectIterator functions.
2038  virtual HeapObject* next_object() { return Next(); }
2039 
2040  private:
2041  void Initialize(Address start,
2042  Address end,
2043  HeapObjectCallback size_func);
2044 
2045  // The current iteration point.
2046  Address current_;
2047  // The end of iteration.
2048  Address limit_;
2049  // The callback function.
2050  HeapObjectCallback size_func_;
2051 };
2052 
2053 
2054 // -----------------------------------------------------------------------------
2055 // A PageIterator iterates the pages in a semi-space.
2056 class NewSpacePageIterator BASE_EMBEDDED {
2057  public:
2058  // Make an iterator that runs over all pages in to-space.
2059  explicit inline NewSpacePageIterator(NewSpace* space);
2060 
2061  // Make an iterator that runs over all pages in the given semispace,
2062  // even those not used in allocation.
2063  explicit inline NewSpacePageIterator(SemiSpace* space);
2064 
2065  // Make iterator that iterates from the page containing start
2066  // to the page that contains limit in the same semispace.
2067  inline NewSpacePageIterator(Address start, Address limit);
2068 
2069  inline bool has_next();
2070  inline NewSpacePage* next();
2071 
2072  private:
2073  NewSpacePage* prev_page_; // Previous page returned.
2074  // Next page that will be returned. Cached here so that we can use this
2075  // iterator for operations that deallocate pages.
2076  NewSpacePage* next_page_;
2077  // Last page returned.
2078  NewSpacePage* last_page_;
2079 };
2080 
2081 
2082 // -----------------------------------------------------------------------------
2083 // The young generation space.
2084 //
2085 // The new space consists of a contiguous pair of semispaces. It simply
2086 // forwards most functions to the appropriate semispace.
2087 
2088 class NewSpace : public Space {
2089  public:
2090  // Constructor.
2091  explicit NewSpace(Heap* heap)
2092  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2093  to_space_(heap, kToSpace),
2094  from_space_(heap, kFromSpace),
2095  reservation_(),
2096  inline_allocation_limit_step_(0) {}
2097 
2098  // Sets up the new space using the given chunk.
2099  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2100 
2101  // Tears down the space. Heap memory was not allocated by the space, so it
2102  // is not deallocated here.
2103  void TearDown();
2104 
2105  // True if the space has been set up but not torn down.
2106  bool HasBeenSetUp() {
2107  return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2108  }
2109 
2110  // Flip the pair of spaces.
2111  void Flip();
2112 
2113  // Grow the capacity of the semispaces. Assumes that they are not at
2114  // their maximum capacity.
2115  void Grow();
2116 
2117  // Shrink the capacity of the semispaces.
2118  void Shrink();
2119 
2120  // True if the address or object lies in the address range of either
2121  // semispace (not necessarily below the allocation pointer).
2122  bool Contains(Address a) {
2123  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2124  == reinterpret_cast<uintptr_t>(start_);
2125  }
2126 
2127  bool Contains(Object* o) {
2128  Address a = reinterpret_cast<Address>(o);
2129  return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2130  }
2131 
2132  // Return the allocated bytes in the active semispace.
2133  virtual intptr_t Size() {
2134  return pages_used_ * NewSpacePage::kAreaSize +
2135  static_cast<int>(top() - to_space_.page_low());
2136  }
2137 
2138  // The same, but returning an int. We have to have the one that returns
2139  // intptr_t because it is inherited, but if we know we are dealing with the
2140  // new space, which can't get as big as the other spaces then this is useful:
2141  int SizeAsInt() { return static_cast<int>(Size()); }
2142 
2143  // Return the current capacity of a semispace.
2144  intptr_t EffectiveCapacity() {
2145  SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2146  return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2147  }
2148 
2149  // Return the current capacity of a semispace.
2150  intptr_t Capacity() {
2151  ASSERT(to_space_.Capacity() == from_space_.Capacity());
2152  return to_space_.Capacity();
2153  }
2154 
2155  // Return the total amount of memory committed for new space.
2156  intptr_t CommittedMemory() {
2157  if (from_space_.is_committed()) return 2 * Capacity();
2158  return Capacity();
2159  }
2160 
2161  // Return the available bytes without growing.
2162  intptr_t Available() {
2163  return Capacity() - Size();
2164  }
2165 
2166  // Return the maximum capacity of a semispace.
2168  ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2169  return to_space_.MaximumCapacity();
2170  }
2171 
2172  // Returns the initial capacity of a semispace.
2174  ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2175  return to_space_.InitialCapacity();
2176  }
2177 
2178  // Return the address of the allocation pointer in the active semispace.
2180  ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
2181  return allocation_info_.top;
2182  }
2183  // Return the address of the first object in the active semispace.
2184  Address bottom() { return to_space_.space_start(); }
2185 
2186  // Get the age mark of the inactive semispace.
2187  Address age_mark() { return from_space_.age_mark(); }
2188  // Set the age mark in the active semispace.
2189  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2190 
2191  // The start address of the space and a bit mask. Anding an address in the
2192  // new space with the mask will result in the start address.
2193  Address start() { return start_; }
2194  uintptr_t mask() { return address_mask_; }
2195 
2196  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2197  ASSERT(Contains(addr));
2199  IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2200  return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2201  }
2202 
2203  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2204  return reinterpret_cast<Address>(index << kPointerSizeLog2);
2205  }
2206 
2207  // The allocation top and limit addresses.
2208  Address* allocation_top_address() { return &allocation_info_.top; }
2209  Address* allocation_limit_address() { return &allocation_info_.limit; }
2210 
2211  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2212 
2213  // Reset the allocation pointer to the beginning of the active semispace.
2214  void ResetAllocationInfo();
2215 
2216  void LowerInlineAllocationLimit(intptr_t step) {
2217  inline_allocation_limit_step_ = step;
2218  if (step == 0) {
2219  allocation_info_.limit = to_space_.page_high();
2220  } else {
2221  allocation_info_.limit = Min(
2222  allocation_info_.top + inline_allocation_limit_step_,
2223  allocation_info_.limit);
2224  }
2225  top_on_previous_step_ = allocation_info_.top;
2226  }
2227 
2228  // Get the extent of the inactive semispace (for use as a marking stack,
2229  // or to zap it). Notice: space-addresses are not necessarily on the
2230  // same page, so FromSpaceStart() might be above FromSpaceEnd().
2231  Address FromSpacePageLow() { return from_space_.page_low(); }
2232  Address FromSpacePageHigh() { return from_space_.page_high(); }
2233  Address FromSpaceStart() { return from_space_.space_start(); }
2234  Address FromSpaceEnd() { return from_space_.space_end(); }
2235 
2236  // Get the extent of the active semispace's pages' memory.
2237  Address ToSpaceStart() { return to_space_.space_start(); }
2238  Address ToSpaceEnd() { return to_space_.space_end(); }
2239 
2240  inline bool ToSpaceContains(Address address) {
2241  return to_space_.Contains(address);
2242  }
2243  inline bool FromSpaceContains(Address address) {
2244  return from_space_.Contains(address);
2245  }
2246 
2247  // True if the object is a heap object in the address range of the
2248  // respective semispace (not necessarily below the allocation pointer of the
2249  // semispace).
2250  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2251  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2252 
2253  // Try to switch the active semispace to a new, empty, page.
2254  // Returns false if this isn't possible or reasonable (i.e., there
2255  // are no pages, or the current page is already empty), or true
2256  // if successful.
2257  bool AddFreshPage();
2258 
2259  virtual bool ReserveSpace(int bytes);
2260 
2261  // Resizes a sequential string which must be the most recent thing that was
2262  // allocated in new space.
2263  template <typename StringType>
2264  inline void ShrinkStringAtAllocationBoundary(String* string, int len);
2265 
2266 #ifdef VERIFY_HEAP
2267  // Verify the active semispace.
2268  virtual void Verify();
2269 #endif
2270 
2271 #ifdef DEBUG
2272  // Print the active semispace.
2273  virtual void Print() { to_space_.Print(); }
2274 #endif
2275 
2276  // Iterates the active semispace to collect statistics.
2277  void CollectStatistics();
2278  // Reports previously collected statistics of the active semispace.
2279  void ReportStatistics();
2280  // Clears previously collected statistics.
2281  void ClearHistograms();
2282 
2283  // Record the allocation or promotion of a heap object. Note that we don't
2284  // record every single allocation, but only those that happen in the
2285  // to space during a scavenge GC.
2286  void RecordAllocation(HeapObject* obj);
2287  void RecordPromotion(HeapObject* obj);
2288 
2289  // Return whether the operation succeded.
2291  if (from_space_.is_committed()) return true;
2292  return from_space_.Commit();
2293  }
2294 
2296  if (!from_space_.is_committed()) return true;
2297  return from_space_.Uncommit();
2298  }
2299 
2300  inline intptr_t inline_allocation_limit_step() {
2301  return inline_allocation_limit_step_;
2302  }
2303 
2304  SemiSpace* active_space() { return &to_space_; }
2305 
2306  private:
2307  // Update allocation info to match the current to-space page.
2308  void UpdateAllocationInfo();
2309 
2310  Address chunk_base_;
2311  uintptr_t chunk_size_;
2312 
2313  // The semispaces.
2314  SemiSpace to_space_;
2315  SemiSpace from_space_;
2316  VirtualMemory reservation_;
2317  int pages_used_;
2318 
2319  // Start address and bit mask for containment testing.
2320  Address start_;
2321  uintptr_t address_mask_;
2322  uintptr_t object_mask_;
2323  uintptr_t object_expected_;
2324 
2325  // Allocation pointer and limit for normal allocation and allocation during
2326  // mark-compact collection.
2327  AllocationInfo allocation_info_;
2328 
2329  // When incremental marking is active we will set allocation_info_.limit
2330  // to be lower than actual limit and then will gradually increase it
2331  // in steps to guarantee that we do incremental marking steps even
2332  // when all allocation is performed from inlined generated code.
2333  intptr_t inline_allocation_limit_step_;
2334 
2335  Address top_on_previous_step_;
2336 
2337  HistogramInfo* allocated_histogram_;
2338  HistogramInfo* promoted_histogram_;
2339 
2340  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2341 
2342  friend class SemiSpaceIterator;
2343 
2344  public:
2345  TRACK_MEMORY("NewSpace")
2346 };
2347 
2348 
2349 // -----------------------------------------------------------------------------
2350 // Old object space (excluding map objects)
2351 
2352 class OldSpace : public PagedSpace {
2353  public:
2354  // Creates an old space object with a given maximum capacity.
2355  // The constructor does not allocate pages from OS.
2357  intptr_t max_capacity,
2358  AllocationSpace id,
2360  : PagedSpace(heap, max_capacity, id, executable) {
2361  page_extra_ = 0;
2362  }
2363 
2364  // The limit of allocation for a page in this space.
2366  return page->area_end();
2367  }
2368 
2369  public:
2370  TRACK_MEMORY("OldSpace")
2371 };
2372 
2373 
2374 // For contiguous spaces, top should be in the space (or at the end) and limit
2375 // should be the end of the space.
2376 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2377  SLOW_ASSERT((space).page_low() <= (info).top \
2378  && (info).top <= (space).page_high() \
2379  && (info).limit <= (space).page_high())
2380 
2381 
2382 // -----------------------------------------------------------------------------
2383 // Old space for objects of a fixed size
2384 
2385 class FixedSpace : public PagedSpace {
2386  public:
2388  intptr_t max_capacity,
2389  AllocationSpace id,
2391  const char* name)
2392  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2393  object_size_in_bytes_(object_size_in_bytes),
2394  name_(name) {
2396  }
2397 
2398  // The limit of allocation for a page in this space.
2400  return page->area_end() - page_extra_;
2401  }
2402 
2403  int object_size_in_bytes() { return object_size_in_bytes_; }
2404 
2405  // Prepares for a mark-compact GC.
2406  virtual void PrepareForMarkCompact();
2407 
2408  private:
2409  // The size of objects in this space.
2410  int object_size_in_bytes_;
2411 
2412  // The name of this space.
2413  const char* name_;
2414 };
2415 
2416 
2417 // -----------------------------------------------------------------------------
2418 // Old space for all map objects
2419 
2420 class MapSpace : public FixedSpace {
2421  public:
2422  // Creates a map space object with a maximum capacity.
2423  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2424  : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
2425  max_map_space_pages_(kMaxMapPageIndex - 1) {
2426  }
2427 
2428  // Given an index, returns the page address.
2429  // TODO(1600): this limit is artifical just to keep code compilable
2430  static const int kMaxMapPageIndex = 1 << 16;
2431 
2432  virtual int RoundSizeDownToObjectAlignment(int size) {
2433  if (IsPowerOf2(Map::kSize)) {
2434  return RoundDown(size, Map::kSize);
2435  } else {
2436  return (size / Map::kSize) * Map::kSize;
2437  }
2438  }
2439 
2440  protected:
2441  virtual void VerifyObject(HeapObject* obj);
2442 
2443  private:
2444  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2445 
2446  // Do map space compaction if there is a page gap.
2447  int CompactionThreshold() {
2448  return kMapsPerPage * (max_map_space_pages_ - 1);
2449  }
2450 
2451  const int max_map_space_pages_;
2452 
2453  public:
2454  TRACK_MEMORY("MapSpace")
2455 };
2456 
2457 
2458 // -----------------------------------------------------------------------------
2459 // Old space for all global object property cell objects
2460 
2461 class CellSpace : public FixedSpace {
2462  public:
2463  // Creates a property cell space object with a maximum capacity.
2464  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2465  : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2466  {}
2467 
2468  virtual int RoundSizeDownToObjectAlignment(int size) {
2470  return RoundDown(size, JSGlobalPropertyCell::kSize);
2471  } else {
2473  }
2474  }
2475 
2476  protected:
2477  virtual void VerifyObject(HeapObject* obj);
2478 
2479  public:
2480  TRACK_MEMORY("CellSpace")
2481 };
2482 
2483 
2484 // -----------------------------------------------------------------------------
2485 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2486 // the large object space. A large object is allocated from OS heap with
2487 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2488 // A large object always starts at Page::kObjectStartOffset to a page.
2489 // Large objects do not move during garbage collections.
2490 
2491 class LargeObjectSpace : public Space {
2492  public:
2493  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2494  virtual ~LargeObjectSpace() {}
2495 
2496  // Initializes internal data structures.
2497  bool SetUp();
2498 
2499  // Releases internal resources, frees objects in this space.
2500  void TearDown();
2501 
2502  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2503  if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2504  return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2505  }
2506 
2507  // Shared implementation of AllocateRaw, AllocateRawCode and
2508  // AllocateRawFixedArray.
2509  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2511 
2512  // Available bytes for objects in this space.
2513  inline intptr_t Available();
2514 
2515  virtual intptr_t Size() {
2516  return size_;
2517  }
2518 
2519  virtual intptr_t SizeOfObjects() {
2520  return objects_size_;
2521  }
2522 
2523  intptr_t CommittedMemory() {
2524  return Size();
2525  }
2526 
2527  int PageCount() {
2528  return page_count_;
2529  }
2530 
2531  // Finds an object for a given address, returns Failure::Exception()
2532  // if it is not found. The function iterates through all objects in this
2533  // space, may be slow.
2534  MaybeObject* FindObject(Address a);
2535 
2536  // Finds a large object page containing the given address, returns NULL
2537  // if such a page doesn't exist.
2539 
2540  // Frees unmarked objects.
2541  void FreeUnmarkedObjects();
2542 
2543  // Checks whether a heap object is in this space; O(1).
2544  bool Contains(HeapObject* obj);
2545 
2546  // Checks whether the space is empty.
2547  bool IsEmpty() { return first_page_ == NULL; }
2548 
2549  // See the comments for ReserveSpace in the Space class. This has to be
2550  // called after ReserveSpace has been called on the paged spaces, since they
2551  // may use some memory, leaving less for large objects.
2552  virtual bool ReserveSpace(int bytes);
2553 
2554  LargePage* first_page() { return first_page_; }
2555 
2556 #ifdef VERIFY_HEAP
2557  virtual void Verify();
2558 #endif
2559 
2560 #ifdef DEBUG
2561  virtual void Print();
2562  void ReportStatistics();
2563  void CollectCodeStatistics();
2564 #endif
2565  // Checks whether an address is in the object area in this space. It
2566  // iterates all objects in the space. May be slow.
2567  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2568 
2569  private:
2570  intptr_t max_capacity_;
2571  // The head of the linked list of large object chunks.
2572  LargePage* first_page_;
2573  intptr_t size_; // allocated bytes
2574  int page_count_; // number of chunks
2575  intptr_t objects_size_; // size of objects
2576  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2577  HashMap chunk_map_;
2578 
2579  friend class LargeObjectIterator;
2580 
2581  public:
2582  TRACK_MEMORY("LargeObjectSpace")
2583 };
2584 
2585 
2587  public:
2588  explicit LargeObjectIterator(LargeObjectSpace* space);
2590 
2591  HeapObject* Next();
2592 
2593  // implementation of ObjectIterator.
2594  virtual HeapObject* next_object() { return Next(); }
2595 
2596  private:
2597  LargePage* current_;
2598  HeapObjectCallback size_func_;
2599 };
2600 
2601 
2602 // Iterates over the chunks (pages and large object pages) that can contain
2603 // pointers to new space.
2604 class PointerChunkIterator BASE_EMBEDDED {
2605  public:
2606  inline explicit PointerChunkIterator(Heap* heap);
2607 
2608  // Return NULL when the iterator is done.
2610  switch (state_) {
2611  case kOldPointerState: {
2612  if (old_pointer_iterator_.has_next()) {
2613  return old_pointer_iterator_.next();
2614  }
2615  state_ = kMapState;
2616  // Fall through.
2617  }
2618  case kMapState: {
2619  if (map_iterator_.has_next()) {
2620  return map_iterator_.next();
2621  }
2622  state_ = kLargeObjectState;
2623  // Fall through.
2624  }
2625  case kLargeObjectState: {
2626  HeapObject* heap_object;
2627  do {
2628  heap_object = lo_iterator_.Next();
2629  if (heap_object == NULL) {
2630  state_ = kFinishedState;
2631  return NULL;
2632  }
2633  // Fixed arrays are the only pointer-containing objects in large
2634  // object space.
2635  } while (!heap_object->IsFixedArray());
2636  MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2637  return answer;
2638  }
2639  case kFinishedState:
2640  return NULL;
2641  default:
2642  break;
2643  }
2644  UNREACHABLE();
2645  return NULL;
2646  }
2647 
2648 
2649  private:
2650  enum State {
2651  kOldPointerState,
2652  kMapState,
2653  kLargeObjectState,
2654  kFinishedState
2655  };
2656  State state_;
2657  PageIterator old_pointer_iterator_;
2658  PageIterator map_iterator_;
2659  LargeObjectIterator lo_iterator_;
2660 };
2661 
2662 
2663 #ifdef DEBUG
2664 struct CommentStatistic {
2665  const char* comment;
2666  int size;
2667  int count;
2668  void Clear() {
2669  comment = NULL;
2670  size = 0;
2671  count = 0;
2672  }
2673  // Must be small, since an iteration is used for lookup.
2674  static const int kMaxComments = 64;
2675 };
2676 #endif
2677 
2678 
2679 } } // namespace v8::internal
2680 
2681 #endif // V8_SPACES_H_
byte * Address
Definition: globals.h:157
Address FromSpaceEnd()
Definition: spaces.h:2234
void increment_number(int num)
Definition: spaces.h:1710
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2191
static const int kHeaderSize
Definition: objects.h:3706
virtual Address PageAllocationLimit(Page *page)
Definition: spaces.h:2399
void ClearEvacuationCandidate()
Definition: spaces.h:608
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:563
virtual intptr_t Size()
Definition: spaces.h:2515
static const size_t kSlotsBufferOffset
Definition: spaces.h:502
Space(Heap *heap, AllocationSpace id, Executability executable)
Definition: spaces.h:777
void ShrinkStringAtAllocationBoundary(String *string, int len)
Definition: spaces-inl.h:342
#define SLOW_ASSERT(condition)
Definition: checks.h:276
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:2719
void Allocate(int bytes)
Definition: spaces.h:1557
static const int kEvacuationCandidateMask
Definition: spaces.h:411
void set_next_page(Page *page)
Definition: spaces-inl.h:236
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:644
static int CellsForLength(int length)
Definition: spaces.h:179
bool GrowTo(int new_capacity)
Definition: spaces.cc:1390
static int SizeFor(int cells_count)
Definition: spaces.h:187
static int CodePageAreaSize()
Definition: spaces.h:1049
virtual void RepairFreeListsAfterBoot()
Definition: spaces.cc:2295
static void PrintWord(uint32_t word, uint32_t himask=0)
Definition: spaces.h:223
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:212
intptr_t Available()
Definition: spaces.h:1506
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1827
Address FromSpacePageHigh()
Definition: spaces.h:2232
CellType * cell()
Definition: spaces.h:128
static FreeListNode * cast(MaybeObject *maybe)
Definition: spaces.h:1329
bool Contains(Address addr)
Definition: spaces.h:364
friend class PageIterator
Definition: spaces.h:1701
FixedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, int object_size_in_bytes, const char *name)
Definition: spaces.h:2387
void PrintF(const char *format,...)
Definition: v8utils.cc:40
virtual intptr_t SizeOfObjects()
Definition: spaces.h:795
void increment_bytes(int size)
Definition: spaces.h:1713
void SetTop(Address top, Address limit)
Definition: spaces.h:1550
bool was_swept_conservatively()
Definition: spaces.h:1596
void set_next(FreeListNode *next)
Definition: spaces.cc:1879
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:664
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.h:2356
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2214
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
bool SetUp(const size_t requested_size)
Definition: spaces.cc:135
void ReleaseAllUnusedPages()
Definition: spaces.cc:958
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
virtual intptr_t Waste()
Definition: spaces.h:1523
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2574
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2562
MemoryChunk * next_chunk_
Definition: spaces.h:620
static const int kMaxMapPageIndex
Definition: spaces.h:2430
#define ASSERT_NOT_NULL(p)
Definition: checks.h:285
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1601
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2584
Address FromSpaceStart()
Definition: spaces.h:2233
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
Definition: spaces.h:2196
static const intptr_t kSizeOffset
Definition: spaces.h:495
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:273
Address age_mark()
Definition: spaces.h:2187
INLINE(static uint32_t IndexToCell(uint32_t index))
Definition: spaces.h:191
void TakeControl(VirtualMemory *from)
Definition: platform.h:418
void set_name(const char *name)
Definition: spaces.h:1733
void ResetAllocationInfo()
Definition: spaces.cc:1192
void AddObject(Address addr, int size)
Definition: spaces.h:912
Address * allocation_top_address()
Definition: spaces.h:2208
Address space_start()
Definition: spaces.h:1870
intptr_t SizeOfFirstPage()
Definition: spaces.cc:879
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
AllocationAction
Definition: v8.h:2713
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2300
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:578
const int kBitsPerByteLog2
Definition: globals.h:238
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:378
#define ASSERT(condition)
Definition: checks.h:270
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:344
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static const uint32_t kBitsPerCell
Definition: spaces.h:166
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
void ClearFlag(int flag)
Definition: spaces.h:425
#define ASSERT_PAGE_OFFSET(offset)
Definition: spaces.h:106
const int kPointerSizeLog2
Definition: globals.h:232
bool FromSpaceContains(Object *o)
Definition: spaces.h:2251
FreeListNode * next()
Definition: spaces.cc:1855
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:589
static const int kFlagsOffset
Definition: spaces.h:579
bool WasSweptConservatively()
Definition: spaces.h:732
const intptr_t kCodeAlignment
Definition: v8globals.h:58
#define POINTER_SIZE_ALIGN(value)
Definition: v8globals.h:387
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1809
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:252
NewSpacePage * current_page()
Definition: spaces.h:1934
virtual bool ReserveSpace(int bytes)
Definition: spaces.h:1924
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:264
virtual HeapObject * next_object()
Definition: spaces.h:2038
intptr_t EffectiveCapacity()
Definition: spaces.h:2144
virtual HeapObject * next_object()
Definition: spaces.h:1141
Address OffsetToAddress(int offset)
Definition: spaces.h:703
static const int kPageSize
Definition: spaces.h:711
uint32_t CellType
Definition: spaces.h:123
INLINE(static Bitmap *FromAddress(Address addr))
Definition: spaces.h:211
static bool IsAlignedToPageSize(Address a)
Definition: spaces.h:692
CodeRange(Isolate *isolate)
Definition: spaces.cc:126
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:356
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
static bool IsAtEnd(Address addr)
Definition: spaces.h:1787
virtual intptr_t Size()=0
static const size_t kLength
Definition: spaces.h:172
void ClearSweptConservatively()
Definition: spaces.h:739
const int kIntSize
Definition: globals.h:217
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2613
friend class SemiSpace
Definition: spaces.h:1826
void IncrementUnsweptFreeBytes(int by)
Definition: spaces.h:1613
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1617
MarkBit Next()
Definition: spaces.h:143
AllocationStats accounting_stats_
Definition: spaces.h:1661
void Free(MemoryChunk *chunk)
Definition: spaces.cc:598
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:342
Executability executable()
Definition: spaces.h:785
NewSpacePage * first_page()
Definition: spaces.h:1933
SlotsBuffer * slots_buffer_
Definition: spaces.h:641
intptr_t AvailableExecutable()
Definition: spaces.h:978
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:684
void ClearSweptPrecisely()
Definition: spaces.h:738
#define UNREACHABLE()
Definition: checks.h:50
static const uint32_t kBytesPerCellLog2
Definition: spaces.h:170
void AllocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1273
static const size_t kHeaderSize
Definition: spaces.h:507
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:238
virtual HeapObject * next_object()=0
void SetFlagTo(int flag, bool value)
Definition: spaces.h:429
LargePage * FindPage(Address a)
Definition: spaces.cc:2711
FreeListNode ** next_address()
Definition: spaces.cc:1868
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2309
intptr_t CommittedMemory()
Definition: spaces.h:1491
const intptr_t kFailureTagMask
Definition: v8globals.h:64
bool Contains(Address a)
Definition: spaces.h:2122
#define MUST_USE_RESULT
Definition: globals.h:346
bool Contains(Address a)
Definition: spaces-inl.h:178
Address ToSpaceEnd()
Definition: spaces.h:2238
void SetFlag(int flag)
Definition: spaces.h:421
intptr_t CommittedMemory()
Definition: spaces.h:2156
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:504
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1796
Address ToSpaceStart()
Definition: spaces.h:2237
friend class NewSpacePageIterator
Definition: spaces.h:1993
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:637
virtual intptr_t SizeOfObjects()
Definition: spaces.h:1515
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2216
static int CodePageAreaStartOffset()
Definition: spaces.cc:720
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2423
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2339
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:797
bool ContainsLimit(Address addr)
Definition: spaces.h:371
const int kPointerSize
Definition: globals.h:220
virtual intptr_t Size()
Definition: spaces.h:2133
bool IsFlagSet(int flag)
Definition: spaces.h:437
intptr_t OffsetFrom(T x)
Definition: utils.h:126
void MarkEvacuationCandidate()
Definition: spaces.h:603
static int CodePageGuardSize()
Definition: spaces.cc:715
bool IsAligned(T value, U alignment)
Definition: utils.h:206
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition: globals.h:318
void InitializeReservedMemory()
Definition: spaces.h:340
void CountFreeListItems(Page *p, FreeList::SizeStats *sizes)
Definition: spaces.h:1636
intptr_t Available()
Definition: spaces.h:2162
bool Contains(Object *o)
Definition: spaces.h:2127
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2248
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1622
bool WasSweptPrecisely()
Definition: spaces.h:731
bool ShouldSkipEvacuationSlotRecording()
Definition: spaces.h:583
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:350
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
size_t size() const
Definition: spaces.h:519
void IncreaseCapacity(int size)
Definition: spaces.h:1561
SemiSpaceId id()
Definition: spaces.h:1960
static const int kSize
Definition: objects.h:5139
void set_age_mark(Address mark)
Definition: spaces.h:2189
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1817
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:717
bool contains(Address address)
Definition: spaces.h:838
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2468
const int kBitsPerByte
Definition: globals.h:237
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
Definition: spaces.cc:211
#define TRACK_MEMORY(name)
Definition: v8globals.h:411
bool IsPowerOf2(T x)
Definition: utils.h:50
static const intptr_t kAlignmentMask
Definition: spaces.h:493
bool WasSwept()
Definition: spaces.h:733
#define BASE_EMBEDDED
Definition: allocation.h:68
static int CodePageAreaEndOffset()
Definition: spaces.cc:727
bool FromSpaceContains(Address address)
Definition: spaces.h:2243
bool ToSpaceContains(Address address)
Definition: spaces.h:2240
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1539
HeapObject * GetObject()
Definition: spaces.h:754
static const uint32_t kBytesPerCell
Definition: spaces.h:169
void ReleasePage(Page *page)
Definition: spaces.cc:922
MemoryChunk * prev_chunk_
Definition: spaces.h:621
void MarkSweptConservatively()
Definition: spaces.h:736
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
bool ToSpaceContains(Object *o)
Definition: spaces.h:2250
MaybeObject * FindObject(Address a)
Definition: spaces.cc:2702
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:525
VirtualMemory reservation_
Definition: spaces.h:630
static const int kObjectStartOffset
Definition: spaces.h:516
void ExpandSpace(int size_in_bytes)
Definition: spaces.h:1257
bool SlowContains(Address addr)
Definition: spaces.h:2567
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:1772
bool Contains(HeapObject *obj)
Definition: spaces.cc:2781
Space * owner() const
Definition: spaces.h:320
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1496
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:386
void Print(uint32_t pos, uint32_t cell)
Definition: spaces.h:235
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:414
void MarkSweptPrecisely()
Definition: spaces.h:735
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1588
INLINE(static Page *FromAllocationTop(Address top))
Definition: spaces.h:680
static const intptr_t kLiveBytesOffset
Definition: spaces.h:497
intptr_t Capacity()
Definition: spaces.h:2150
bool Contains(HeapObject *o)
Definition: spaces.h:1471
void set_write_barrier_counter(int counter)
Definition: spaces.h:479
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2502
INLINE(int Offset(Address a))
Definition: spaces.h:697
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1764
void ShrinkSpace(int size_in_bytes)
Definition: spaces.h:1266
bool UncommitFromSpace()
Definition: spaces.h:2295
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
Definition: spaces.cc:491
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:1806
virtual intptr_t Size()
Definition: spaces.h:1511
Heap * heap() const
Definition: spaces.h:782
Page * prev_page()
Definition: spaces-inl.h:230
bool Contains(Address a)
Definition: spaces.h:1906
void IncrementLiveBytes(int by)
Definition: spaces.h:460
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1323
CellType mask()
Definition: spaces.h:129
SemiSpace * semi_space()
Definition: spaces.h:1776
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:599
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2302
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1310
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:1841
NewSpacePage * next_page() const
Definition: spaces.h:1760
virtual HeapObject * next_object()
Definition: spaces.h:2594
INLINE(static uint32_t CellToIndex(uint32_t index))
Definition: spaces.h:195
static const intptr_t kAlignment
Definition: spaces.h:490
void set_owner(Space *space)
Definition: spaces.h:329
static bool IsSeq(uint32_t cell)
Definition: spaces.h:265
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:769
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2464
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:651
static const int kAreaSize
Definition: spaces.h:1758
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1753
LargePage * next_page() const
Definition: spaces.h:758
static const int kPointersFromHereAreInterestingMask
Definition: spaces.h:408
static const int kBodyOffset
Definition: spaces.h:509
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:318
bool IsEvacuationCandidate()
Definition: spaces.h:581
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:780
T RoundDown(T x, intptr_t m)
Definition: utils.h:142
void set_size(size_t size)
Definition: spaces.h:521
virtual intptr_t Size()
Definition: spaces.h:1919
Address MarkbitIndexToAddress(uint32_t index)
Definition: spaces.h:570
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:471
void set_next_page(LargePage *page)
Definition: spaces.h:762
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1597
MarkBit(CellType *cell, CellType mask, bool data_only)
Definition: spaces.h:125
INLINE(MarkBit::CellType *cells())
Definition: spaces.h:203
INLINE(Address MarkbitIndexToAddress(uint32_t index))
Definition: spaces.h:2203
static const uint32_t kBitIndexMask
Definition: spaces.h:168
Page * next_page()
Definition: spaces-inl.h:224
SemiSpace * active_space()
Definition: spaces.h:2304
ObjectSpace
Definition: v8.h:2700
const int kFailureTag
Definition: v8globals.h:62
virtual Address PageAllocationLimit(Page *page)
Definition: spaces.h:2365
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2541
INLINE(Address address())
Definition: spaces.h:207
intptr_t write_barrier_counter_
Definition: spaces.h:643
void set_age_mark(Address mark)
Definition: spaces.cc:1517
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static int CodePageGuardStartOffset()
Definition: spaces.cc:708
void SetPagesToSweep(Page *first)
Definition: spaces.h:1607
NewSpacePage * prev_page() const
Definition: spaces.h:1768
const int kPageSizeBits
Definition: v8globals.h:92
SkipList * skip_list()
Definition: spaces.h:587
intptr_t unswept_free_bytes_
Definition: spaces.h:1687
INLINE(static Page *FromAddress(Address a))
Definition: spaces.h:672
Address * allocation_limit_address()
Definition: spaces.h:2209
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:623
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2432
AllocationInfo allocation_info_
Definition: spaces.h:1670
const char * name()
Definition: spaces.h:1732
INLINE(static uint32_t CellAlignIndex(uint32_t index))
Definition: spaces.h:199
static const uint32_t kBitsPerCellLog2
Definition: spaces.h:167
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:245
static const int kPointersToHereAreInterestingMask
Definition: spaces.h:405
static const int kObjectStartAlignment
Definition: spaces.h:515
Executability executable()
Definition: spaces.h:530
void set_store_buffer_counter(int counter)
Definition: spaces.h:360
NewSpace(Heap *heap)
Definition: spaces.h:2091
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
Definition: spaces.h:215
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1046
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:358
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:827
#define CODE_POINTER_ALIGN(value)
Definition: v8globals.h:391
static bool IsAtStart(Address addr)
Definition: spaces.h:1782
T Min(T a, T b)
Definition: utils.h:229
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2519
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static void Update(Address addr, int size)
Definition: spaces.h:924
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1428
void DeallocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1279
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
bool Contains(Object *o)
Definition: spaces.h:1913
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2650
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:444
void WasteBytes(int size_in_bytes)
Definition: spaces.h:1285
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:559
SkipList * skip_list_
Definition: spaces.h:642
uintptr_t mask()
Definition: spaces.h:2194
void set_skip_list(SkipList *skip_list)
Definition: spaces.h:591
MemoryChunk * prev_chunk() const
Definition: spaces.h:315
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
Definition: spaces.cc:734
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:317
VirtualMemory * reserved_memory()
Definition: spaces.h:336
Address StartFor(Address addr)
Definition: spaces.h:908
static const size_t kSize
Definition: spaces.h:175
SlotsBuffer * slots_buffer()
Definition: spaces.h:595
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1948
MemoryChunk * next()
Definition: spaces.h:2609
static MemoryChunk * FromAnyPointerAddress(Address addr)
Definition: spaces-inl.h:197
static int RegionNumber(Address addr)
Definition: spaces.h:920
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:673
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2290
AllocationSpace identity()
Definition: spaces.h:788
static const int kNonCodeObjectAreaSize
Definition: spaces.h:714
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2354
MemoryChunk * next_chunk() const
Definition: spaces.h:314
Address FromSpacePageLow()
Definition: spaces.h:2231
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923
virtual ~Space()
Definition: spaces.h:780