v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "allocation.h"
32 #include "hashmap.h"
33 #include "list.h"
34 #include "log.h"
35 #include "platform/mutex.h"
36 #include "v8utils.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 class Isolate;
42 
43 // -----------------------------------------------------------------------------
44 // Heap structures:
45 //
46 // A JS heap consists of a young generation, an old generation, and a large
47 // object space. The young generation is divided into two semispaces. A
48 // scavenger implements Cheney's copying algorithm. The old generation is
49 // separated into a map space and an old object space. The map space contains
50 // all (and only) map objects, the rest of old objects go into the old space.
51 // The old generation is collected by a mark-sweep-compact collector.
52 //
53 // The semispaces of the young generation are contiguous. The old and map
54 // spaces consists of a list of pages. A page has a page header and an object
55 // area.
56 //
57 // There is a separate large object space for objects larger than
58 // Page::kMaxHeapObjectSize, so that they do not have to move during
59 // collection. The large object space is paged. Pages in large object space
60 // may be larger than the page size.
61 //
62 // A store-buffer based write barrier is used to keep track of intergenerational
63 // references. See store-buffer.h.
64 //
65 // During scavenges and mark-sweep collections we sometimes (after a store
66 // buffer overflow) iterate intergenerational pointers without decoding heap
67 // object maps so if the page belongs to old pointer space or large object
68 // space it is essential to guarantee that the page does not contain any
69 // garbage pointers to new space: every pointer aligned word which satisfies
70 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
71 // new space. Thus objects in old pointer and large object spaces should have a
72 // special layout (e.g. no bare integer fields). This requirement does not
73 // apply to map space which is iterated in a special fashion. However we still
74 // require pointer fields of dead maps to be cleaned.
75 //
76 // To enable lazy cleaning of old space pages we can mark chunks of the page
77 // as being garbage. Garbage sections are marked with a special map. These
78 // sections are skipped when scanning the page, even if we are otherwise
79 // scanning without regard for object boundaries. Garbage sections are chained
80 // together to form a free list after a GC. Garbage sections created outside
81 // of GCs by object trunctation etc. may not be in the free list chain. Very
82 // small free spaces are ignored, they need only be cleaned of bogus pointers
83 // into new space.
84 //
85 // Each page may have up to one special garbage section. The start of this
86 // section is denoted by the top field in the space. The end of the section
87 // is denoted by the limit field in the space. This special garbage section
88 // is not marked with a free space map in the data. The point of this section
89 // is to enable linear allocation without having to constantly update the byte
90 // array every time the top field is updated and a new object is created. The
91 // special garbage section is not in the chain of garbage sections.
92 //
93 // Since the top and limit fields are in the space, not the page, only one page
94 // has a special garbage section, and if the top and limit are equal then there
95 // is no special garbage section.
96 
97 // Some assertion macros used in the debugging mode.
98 
99 #define ASSERT_PAGE_ALIGNED(address) \
100  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
101 
102 #define ASSERT_OBJECT_ALIGNED(address) \
103  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
104 
105 #define ASSERT_OBJECT_SIZE(size) \
106  ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
107 
108 #define ASSERT_PAGE_OFFSET(offset) \
109  ASSERT((Page::kObjectStartOffset <= offset) \
110  && (offset <= Page::kPageSize))
111 
112 #define ASSERT_MAP_PAGE_INDEX(index) \
113  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
114 
115 
116 class PagedSpace;
117 class MemoryAllocator;
118 class AllocationInfo;
119 class Space;
120 class FreeList;
121 class MemoryChunk;
122 
123 class MarkBit {
124  public:
125  typedef uint32_t CellType;
126 
128  : cell_(cell), mask_(mask), data_only_(data_only) { }
129 
130  inline CellType* cell() { return cell_; }
131  inline CellType mask() { return mask_; }
132 
133 #ifdef DEBUG
134  bool operator==(const MarkBit& other) {
135  return cell_ == other.cell_ && mask_ == other.mask_;
136  }
137 #endif
138 
139  inline void Set() { *cell_ |= mask_; }
140  inline bool Get() { return (*cell_ & mask_) != 0; }
141  inline void Clear() { *cell_ &= ~mask_; }
142 
143  inline bool data_only() { return data_only_; }
144 
145  inline MarkBit Next() {
146  CellType new_mask = mask_ << 1;
147  if (new_mask == 0) {
148  return MarkBit(cell_ + 1, 1, data_only_);
149  } else {
150  return MarkBit(cell_, new_mask, data_only_);
151  }
152  }
153 
154  private:
155  CellType* cell_;
156  CellType mask_;
157  // This boolean indicates that the object is in a data-only space with no
158  // pointers. This enables some optimizations when marking.
159  // It is expected that this field is inlined and turned into control flow
160  // at the place where the MarkBit object is created.
161  bool data_only_;
162 };
163 
164 
165 // Bitmap is a sequence of cells each containing fixed number of bits.
166 class Bitmap {
167  public:
168  static const uint32_t kBitsPerCell = 32;
169  static const uint32_t kBitsPerCellLog2 = 5;
170  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
171  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
173 
174  static const size_t kLength =
175  (1 << kPageSizeBits) >> (kPointerSizeLog2);
176 
177  static const size_t kSize =
179 
180 
181  static int CellsForLength(int length) {
182  return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
183  }
184 
185  int CellsCount() {
186  return CellsForLength(kLength);
187  }
188 
189  static int SizeFor(int cells_count) {
190  return sizeof(MarkBit::CellType) * cells_count;
191  }
192 
193  INLINE(static uint32_t IndexToCell(uint32_t index)) {
194  return index >> kBitsPerCellLog2;
195  }
196 
197  INLINE(static uint32_t CellToIndex(uint32_t index)) {
198  return index << kBitsPerCellLog2;
199  }
200 
201  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
202  return (index + kBitIndexMask) & ~kBitIndexMask;
203  }
204 
206  return reinterpret_cast<MarkBit::CellType*>(this);
207  }
208 
209  INLINE(Address address()) {
210  return reinterpret_cast<Address>(this);
211  }
212 
213  INLINE(static Bitmap* FromAddress(Address addr)) {
214  return reinterpret_cast<Bitmap*>(addr);
215  }
216 
217  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
218  MarkBit::CellType mask = 1 << (index & kBitIndexMask);
219  MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
220  return MarkBit(cell, mask, data_only);
221  }
222 
223  static inline void Clear(MemoryChunk* chunk);
224 
225  static void PrintWord(uint32_t word, uint32_t himask = 0) {
226  for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227  if ((mask & himask) != 0) PrintF("[");
228  PrintF((mask & word) ? "1" : "0");
229  if ((mask & himask) != 0) PrintF("]");
230  }
231  }
232 
233  class CellPrinter {
234  public:
235  CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
236 
237  void Print(uint32_t pos, uint32_t cell) {
238  if (cell == seq_type) {
239  seq_length++;
240  return;
241  }
242 
243  Flush();
244 
245  if (IsSeq(cell)) {
246  seq_start = pos;
247  seq_length = 0;
248  seq_type = cell;
249  return;
250  }
251 
252  PrintF("%d: ", pos);
253  PrintWord(cell);
254  PrintF("\n");
255  }
256 
257  void Flush() {
258  if (seq_length > 0) {
259  PrintF("%d: %dx%d\n",
260  seq_start,
261  seq_type == 0 ? 0 : 1,
262  seq_length * kBitsPerCell);
263  seq_length = 0;
264  }
265  }
266 
267  static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
268 
269  private:
270  uint32_t seq_start;
271  uint32_t seq_type;
272  uint32_t seq_length;
273  };
274 
275  void Print() {
276  CellPrinter printer;
277  for (int i = 0; i < CellsCount(); i++) {
278  printer.Print(i, cells()[i]);
279  }
280  printer.Flush();
281  PrintF("\n");
282  }
283 
284  bool IsClean() {
285  for (int i = 0; i < CellsCount(); i++) {
286  if (cells()[i] != 0) {
287  return false;
288  }
289  }
290  return true;
291  }
292 };
293 
294 
295 class SkipList;
296 class SlotsBuffer;
297 
298 // MemoryChunk represents a memory region owned by a specific space.
299 // It is divided into the header and the body. Chunk start is always
300 // 1MB aligned. Start of the body is aligned so it can accommodate
301 // any heap object.
302 class MemoryChunk {
303  public:
304  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
306  return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
307  }
308 
309  // Only works for addresses in pointer spaces, not data or code spaces.
310  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
311 
312  Address address() { return reinterpret_cast<Address>(this); }
313 
314  bool is_valid() { return address() != NULL; }
315 
317  return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
318  }
319 
321  return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
322  }
323 
325  Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
326  }
327 
329  Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
330  }
331 
332  Space* owner() const {
333  if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
334  kFailureTag) {
335  return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
336  kFailureTag);
337  } else {
338  return NULL;
339  }
340  }
341 
342  void set_owner(Space* space) {
343  ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
344  owner_ = reinterpret_cast<Address>(space) + kFailureTag;
345  ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
346  kFailureTag);
347  }
348 
350  return &reservation_;
351  }
352 
355  }
356 
357  void set_reserved_memory(VirtualMemory* reservation) {
358  ASSERT_NOT_NULL(reservation);
359  reservation_.TakeControl(reservation);
360  }
361 
363  void initialize_scan_on_scavenge(bool scan) {
364  if (scan) {
366  } else {
368  }
369  }
370  inline void set_scan_on_scavenge(bool scan);
371 
373  void set_store_buffer_counter(int counter) {
374  store_buffer_counter_ = counter;
375  }
376 
377  bool Contains(Address addr) {
378  return addr >= area_start() && addr < area_end();
379  }
380 
381  // Checks whether addr can be a limit of addresses in this page.
382  // It's a limit if it's in the page, or if it's just after the
383  // last byte of the page.
384  bool ContainsLimit(Address addr) {
385  return addr >= area_start() && addr <= area_end();
386  }
387 
388  // Every n write barrier invocations we go to runtime even though
389  // we could have handled it in generated code. This lets us check
390  // whether we have hit the limit and should do some more marking.
391  static const int kWriteBarrierCounterGranularity = 500;
392 
399  IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
400  IN_TO_SPACE, // All pages in new space has one of these two set.
405 
406  // Pages swept precisely can be iterated, hitting only the live objects.
407  // Whereas those swept conservatively cannot be iterated over. Both flags
408  // indicate that marking bits have been cleared by the sweeper, otherwise
409  // marking bits are still intact.
412 
413  // Large objects can have a progress bar in their page header. These object
414  // are scanned in increments and will be kept black while being scanned.
415  // Even if the mutator writes to them they will be kept black and a white
416  // to grey transition is performed in the value.
418 
419  // Last flag, keep at bottom.
421  };
422 
423 
426 
429 
430  static const int kEvacuationCandidateMask =
432 
434  (1 << EVACUATION_CANDIDATE) |
435  (1 << RESCAN_ON_EVACUATION) |
436  (1 << IN_FROM_SPACE) |
437  (1 << IN_TO_SPACE);
438 
439 
440  void SetFlag(int flag) {
441  flags_ |= static_cast<uintptr_t>(1) << flag;
442  }
443 
444  void ClearFlag(int flag) {
445  flags_ &= ~(static_cast<uintptr_t>(1) << flag);
446  }
447 
448  void SetFlagTo(int flag, bool value) {
449  if (value) {
450  SetFlag(flag);
451  } else {
452  ClearFlag(flag);
453  }
454  }
455 
456  bool IsFlagSet(int flag) {
457  return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
458  }
459 
460  // Set or clear multiple flags at a time. The flags in the mask
461  // are set to the value in "flags", the rest retain the current value
462  // in flags_.
463  void SetFlags(intptr_t flags, intptr_t mask) {
464  flags_ = (flags_ & ~mask) | (flags & mask);
465  }
466 
467  // Return all current flags.
468  intptr_t GetFlags() { return flags_; }
469 
470 
471  // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
472  // sweeping must not be performed on that page.
473  // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
474  // page and will not touch the page memory anymore.
475  // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
476  // sweeper thread.
477  // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
483  };
484 
486  return static_cast<ParallelSweepingState>(
488  }
489 
492  }
493 
499  }
500 
501  // Manage live byte count (count of bytes known to be live,
502  // because they are marked black).
503  void ResetLiveBytes() {
504  if (FLAG_gc_verbose) {
505  PrintF("ResetLiveBytes:%p:%x->0\n",
506  static_cast<void*>(this), live_byte_count_);
507  }
508  live_byte_count_ = 0;
509  }
510  void IncrementLiveBytes(int by) {
511  if (FLAG_gc_verbose) {
512  printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
513  static_cast<void*>(this), live_byte_count_,
514  ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
515  live_byte_count_ + by);
516  }
517  live_byte_count_ += by;
518  ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
519  }
520  int LiveBytes() {
521  ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
522  return live_byte_count_;
523  }
524 
526  return static_cast<int>(write_barrier_counter_);
527  }
528 
529  void set_write_barrier_counter(int counter) {
530  write_barrier_counter_ = counter;
531  }
532 
533  int progress_bar() {
535  return progress_bar_;
536  }
537 
541  }
542 
545  set_progress_bar(0);
547  }
548  }
549 
551  Address slot_address = reinterpret_cast<Address>(slot);
552  ASSERT(slot_address > this->address());
553  return (slot_address - (this->address() + kObjectStartOffset)) <
554  progress_bar();
555  }
556 
557  static void IncrementLiveBytesFromGC(Address address, int by) {
559  }
560 
561  static void IncrementLiveBytesFromMutator(Address address, int by);
562 
563  static const intptr_t kAlignment =
564  (static_cast<uintptr_t>(1) << kPageSizeBits);
565 
566  static const intptr_t kAlignmentMask = kAlignment - 1;
567 
568  static const intptr_t kSizeOffset = 0;
569 
570  static const intptr_t kLiveBytesOffset =
574 
576 
577  static const size_t kWriteBarrierCounterOffset =
579 
582  5 * kPointerSize +
584 
585  static const int kBodyOffset =
587 
588  // The start offset of the object area in a page. Aligned to both maps and
589  // code alignment to be suitable for both. Also aligned to 32 words because
590  // the marking bitmap is arranged in 32 bit chunks.
591  static const int kObjectStartAlignment = 32 * kPointerSize;
592  static const int kObjectStartOffset = kBodyOffset - 1 +
594 
595  size_t size() const { return size_; }
596 
597  void set_size(size_t size) {
598  size_ = size;
599  }
600 
604  }
605 
608  }
609 
612  }
613 
614  bool InNewSpace() {
615  return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
616  }
617 
618  bool InToSpace() {
619  return IsFlagSet(IN_TO_SPACE);
620  }
621 
622  bool InFromSpace() {
623  return IsFlagSet(IN_FROM_SPACE);
624  }
625 
626  // ---------------------------------------------------------------------
627  // Markbits support
628 
629  inline Bitmap* markbits() {
630  return Bitmap::FromAddress(address() + kHeaderSize);
631  }
632 
633  void PrintMarkbits() { markbits()->Print(); }
634 
635  inline uint32_t AddressToMarkbitIndex(Address addr) {
636  return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
637  }
638 
639  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
640  const intptr_t offset =
641  reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
642 
643  return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
644  }
645 
646  inline Address MarkbitIndexToAddress(uint32_t index) {
647  return this->address() + (index << kPointerSizeLog2);
648  }
649 
650  void InsertAfter(MemoryChunk* other);
651  void Unlink();
652 
653  inline Heap* heap() { return heap_; }
654 
655  static const int kFlagsOffset = kPointerSize;
656 
658 
661  }
662 
663  inline SkipList* skip_list() {
664  return skip_list_;
665  }
666 
669  }
670 
672  return slots_buffer_;
673  }
674 
676  return &slots_buffer_;
677  }
678 
682  }
683 
687  }
688 
690  Address area_end() { return area_end_; }
691  int area_size() {
692  return static_cast<int>(area_end() - area_start());
693  }
694  bool CommitArea(size_t requested);
695 
696  // Approximate amount of physical memory committed for this chunk.
698  return high_water_mark_;
699  }
700 
701  static inline void UpdateHighWaterMark(Address mark);
702 
703  protected:
704  size_t size_;
705  intptr_t flags_;
706 
707  // Start and end of allocatable memory on this chunk.
710 
711  // If the chunk needs to remember its memory reservation, it is stored here.
713  // The identity of the owning space. This is tagged as a failure pointer, but
714  // no failure can be in an object, so this can be distinguished from any entry
715  // in a fixed array.
718  // Used by the store buffer to keep track of which pages to mark scan-on-
719  // scavenge.
721  // Count of bytes marked black on page.
726  // Used by the incremental marker to keep track of the scanning progress in
727  // large objects that have a progress bar and are scanned in increments.
729  // Assuming the initial allocation on a page is sequential,
730  // count highest number of bytes ever allocated on the page.
732 
734 
735  // PagedSpace free-list statistics.
741 
742  static MemoryChunk* Initialize(Heap* heap,
743  Address base,
744  size_t size,
748  Space* owner);
749 
750  private:
751  // next_chunk_ holds a pointer of type MemoryChunk
752  AtomicWord next_chunk_;
753  // prev_chunk_ holds a pointer of type MemoryChunk
754  AtomicWord prev_chunk_;
755 
756  friend class MemoryAllocator;
757 };
758 
759 
761 
762 
763 // -----------------------------------------------------------------------------
764 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
765 //
766 // The only way to get a page pointer is by calling factory methods:
767 // Page* p = Page::FromAddress(addr); or
768 // Page* p = Page::FromAllocationTop(top);
769 class Page : public MemoryChunk {
770  public:
771  // Returns the page containing a given address. The address ranges
772  // from [page_addr .. page_addr + kPageSize[
773  // This only works if the object is in fact in a page. See also MemoryChunk::
774  // FromAddress() and FromAnyAddress().
776  return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
777  }
778 
779  // Returns the page containing an allocation top. Because an allocation
780  // top address can be the upper bound of the page, we need to subtract
781  // it with kPointerSize first. The address ranges from
782  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
783  INLINE(static Page* FromAllocationTop(Address top)) {
784  Page* p = FromAddress(top - kPointerSize);
785  return p;
786  }
787 
788  // Returns the next page in the chain of pages owned by a space.
789  inline Page* next_page();
790  inline Page* prev_page();
791  inline void set_next_page(Page* page);
792  inline void set_prev_page(Page* page);
793 
794  // Checks whether an address is page aligned.
795  static bool IsAlignedToPageSize(Address a) {
796  return 0 == (OffsetFrom(a) & kPageAlignmentMask);
797  }
798 
799  // Returns the offset of a given address to this page.
801  int offset = static_cast<int>(a - address());
802  return offset;
803  }
804 
805  // Returns the address for a given offset to the this page.
806  Address OffsetToAddress(int offset) {
807  ASSERT_PAGE_OFFSET(offset);
808  return address() + offset;
809  }
810 
811  // ---------------------------------------------------------------------
812 
813  // Page size in bytes. This must be a multiple of the OS page size.
814  static const int kPageSize = 1 << kPageSizeBits;
815 
816  // Maximum object size that fits in a page. Objects larger than that size
817  // are allocated in large object space and are never moved in memory. This
818  // also applies to new space allocation, since objects are never migrated
819  // from new space to large object space. Takes double alignment into account.
821 
822  // Page size mask.
823  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
824 
825  inline void ClearGCFields();
826 
827  static inline Page* Initialize(Heap* heap,
828  MemoryChunk* chunk,
830  PagedSpace* owner);
831 
833 
837 
840 
843 
845 
846 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
847  type name() { return name##_; } \
848  void set_##name(type name) { name##_ = name; } \
849  void add_##name(type name) { name##_ += name; }
850 
851  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
852  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
853  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
854  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
855  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
856 
857 #undef FRAGMENTATION_STATS_ACCESSORS
858 
859 #ifdef DEBUG
860  void Print();
861 #endif // DEBUG
862 
863  friend class MemoryAllocator;
864 };
865 
866 
868 
869 
870 class LargePage : public MemoryChunk {
871  public:
874  }
875 
876  inline LargePage* next_page() const {
877  return static_cast<LargePage*>(next_chunk());
878  }
879 
880  inline void set_next_page(LargePage* page) {
881  set_next_chunk(page);
882  }
883  private:
884  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
885 
886  friend class MemoryAllocator;
887 };
888 
890 
891 // ----------------------------------------------------------------------------
892 // Space is the abstract superclass for all allocation spaces.
893 class Space : public Malloced {
894  public:
896  : heap_(heap), id_(id), executable_(executable) {}
897 
898  virtual ~Space() {}
899 
900  Heap* heap() const { return heap_; }
901 
902  // Does the space need executable memory?
903  Executability executable() { return executable_; }
904 
905  // Identity used in error reporting.
906  AllocationSpace identity() { return id_; }
907 
908  // Returns allocated size.
909  virtual intptr_t Size() = 0;
910 
911  // Returns size of objects. Can differ from the allocated size
912  // (e.g. see LargeObjectSpace).
913  virtual intptr_t SizeOfObjects() { return Size(); }
914 
916  if (id_ == CODE_SPACE) {
917  return RoundDown(size, kCodeAlignment);
918  } else {
919  return RoundDown(size, kPointerSize);
920  }
921  }
922 
923 #ifdef DEBUG
924  virtual void Print() = 0;
925 #endif
926 
927  private:
928  Heap* heap_;
929  AllocationSpace id_;
930  Executability executable_;
931 };
932 
933 
934 // ----------------------------------------------------------------------------
935 // All heap objects containing executable code (code objects) must be allocated
936 // from a 2 GB range of memory, so that they can call each other using 32-bit
937 // displacements. This happens automatically on 32-bit platforms, where 32-bit
938 // displacements cover the entire 4GB virtual address space. On 64-bit
939 // platforms, we support this using the CodeRange object, which reserves and
940 // manages a range of virtual memory.
941 class CodeRange {
942  public:
943  explicit CodeRange(Isolate* isolate);
945 
946  // Reserves a range of virtual memory, but does not commit any of it.
947  // Can only be called once, at heap initialization time.
948  // Returns false on failure.
949  bool SetUp(const size_t requested_size);
950 
951  // Frees the range of virtual memory, and frees the data structures used to
952  // manage it.
953  void TearDown();
954 
955  bool exists() { return this != NULL && code_range_ != NULL; }
957  if (this == NULL || code_range_ == NULL) return NULL;
958  return static_cast<Address>(code_range_->address());
959  }
960  bool contains(Address address) {
961  if (this == NULL || code_range_ == NULL) return false;
962  Address start = static_cast<Address>(code_range_->address());
963  return start <= address && address < start + code_range_->size();
964  }
965 
966  // Allocates a chunk of memory from the large-object portion of
967  // the code range. On platforms with no separate code range, should
968  // not be called.
969  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
970  const size_t commit_size,
971  size_t* allocated);
972  bool CommitRawMemory(Address start, size_t length);
973  bool UncommitRawMemory(Address start, size_t length);
974  void FreeRawMemory(Address buf, size_t length);
975 
976  private:
977  Isolate* isolate_;
978 
979  // The reserved range of virtual memory that all code objects are put in.
980  VirtualMemory* code_range_;
981  // Plain old data class, just a struct plus a constructor.
982  class FreeBlock {
983  public:
984  FreeBlock(Address start_arg, size_t size_arg)
985  : start(start_arg), size(size_arg) {
987  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
988  }
989  FreeBlock(void* start_arg, size_t size_arg)
990  : start(static_cast<Address>(start_arg)), size(size_arg) {
992  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
993  }
994 
995  Address start;
996  size_t size;
997  };
998 
999  // Freed blocks of memory are added to the free list. When the allocation
1000  // list is exhausted, the free list is sorted and merged to make the new
1001  // allocation list.
1002  List<FreeBlock> free_list_;
1003  // Memory is allocated from the free blocks on the allocation list.
1004  // The block at current_allocation_block_index_ is the current block.
1005  List<FreeBlock> allocation_list_;
1006  int current_allocation_block_index_;
1007 
1008  // Finds a block on the allocation list that contains at least the
1009  // requested amount of memory. If none is found, sorts and merges
1010  // the existing free memory blocks, and searches again.
1011  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
1012  void GetNextAllocationBlock(size_t requested);
1013  // Compares the start addresses of two free blocks.
1014  static int CompareFreeBlockAddress(const FreeBlock* left,
1015  const FreeBlock* right);
1016 
1017  DISALLOW_COPY_AND_ASSIGN(CodeRange);
1018 };
1019 
1020 
1021 class SkipList {
1022  public:
1024  Clear();
1025  }
1026 
1027  void Clear() {
1028  for (int idx = 0; idx < kSize; idx++) {
1029  starts_[idx] = reinterpret_cast<Address>(-1);
1030  }
1031  }
1032 
1034  return starts_[RegionNumber(addr)];
1035  }
1036 
1037  void AddObject(Address addr, int size) {
1038  int start_region = RegionNumber(addr);
1039  int end_region = RegionNumber(addr + size - kPointerSize);
1040  for (int idx = start_region; idx <= end_region; idx++) {
1041  if (starts_[idx] > addr) starts_[idx] = addr;
1042  }
1043  }
1044 
1045  static inline int RegionNumber(Address addr) {
1046  return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1047  }
1048 
1049  static void Update(Address addr, int size) {
1050  Page* page = Page::FromAddress(addr);
1051  SkipList* list = page->skip_list();
1052  if (list == NULL) {
1053  list = new SkipList();
1054  page->set_skip_list(list);
1055  }
1056 
1057  list->AddObject(addr, size);
1058  }
1059 
1060  private:
1061  static const int kRegionSizeLog2 = 13;
1062  static const int kRegionSize = 1 << kRegionSizeLog2;
1063  static const int kSize = Page::kPageSize / kRegionSize;
1064 
1065  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1066 
1067  Address starts_[kSize];
1068 };
1069 
1070 
1071 // ----------------------------------------------------------------------------
1072 // A space acquires chunks of memory from the operating system. The memory
1073 // allocator allocated and deallocates pages for the paged heap spaces and large
1074 // pages for large object space.
1075 //
1076 // Each space has to manage it's own pages.
1077 //
1079  public:
1080  explicit MemoryAllocator(Isolate* isolate);
1081 
1082  // Initializes its internal bookkeeping structures.
1083  // Max capacity of the total space and executable memory limit.
1084  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1085 
1086  void TearDown();
1087 
1088  Page* AllocatePage(
1089  intptr_t size, PagedSpace* owner, Executability executable);
1090 
1092  intptr_t object_size, Space* owner, Executability executable);
1093 
1094  void Free(MemoryChunk* chunk);
1095 
1096  // Returns the maximum available bytes of heaps.
1097  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1098 
1099  // Returns allocated spaces in bytes.
1100  intptr_t Size() { return size_; }
1101 
1102  // Returns the maximum available executable bytes of heaps.
1103  intptr_t AvailableExecutable() {
1104  if (capacity_executable_ < size_executable_) return 0;
1105  return capacity_executable_ - size_executable_;
1106  }
1107 
1108  // Returns allocated executable spaces in bytes.
1109  intptr_t SizeExecutable() { return size_executable_; }
1110 
1111  // Returns maximum available bytes that the old space can have.
1112  intptr_t MaxAvailable() {
1114  }
1115 
1116  // Returns an indication of whether a pointer is in a space that has
1117  // been allocated by this MemoryAllocator.
1118  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1119  return address < lowest_ever_allocated_ ||
1120  address >= highest_ever_allocated_;
1121  }
1122 
1123 #ifdef DEBUG
1124  // Reports statistic info of the space.
1125  void ReportStatistics();
1126 #endif
1127 
1128  // Returns a MemoryChunk in which the memory region from commit_area_size to
1129  // reserve_area_size of the chunk area is reserved but not committed, it
1130  // could be committed later by calling MemoryChunk::CommitArea.
1131  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1132  intptr_t commit_area_size,
1133  Executability executable,
1134  Space* space);
1135 
1136  Address ReserveAlignedMemory(size_t requested,
1137  size_t alignment,
1138  VirtualMemory* controller);
1139  Address AllocateAlignedMemory(size_t reserve_size,
1140  size_t commit_size,
1141  size_t alignment,
1142  Executability executable,
1143  VirtualMemory* controller);
1144 
1145  bool CommitMemory(Address addr, size_t size, Executability executable);
1146 
1147  void FreeMemory(VirtualMemory* reservation, Executability executable);
1148  void FreeMemory(Address addr, size_t size, Executability executable);
1149 
1150  // Commit a contiguous block of memory from the initial chunk. Assumes that
1151  // the address is not NULL, the size is greater than zero, and that the
1152  // block is contained in the initial chunk. Returns true if it succeeded
1153  // and false otherwise.
1154  bool CommitBlock(Address start, size_t size, Executability executable);
1155 
1156  // Uncommit a contiguous block of memory [start..(start+size)[.
1157  // start is not NULL, the size is greater than zero, and the
1158  // block is contained in the initial chunk. Returns true if it succeeded
1159  // and false otherwise.
1160  bool UncommitBlock(Address start, size_t size);
1161 
1162  // Zaps a contiguous block of memory [start..(start+size)[ thus
1163  // filling it up with a recognizable non-NULL bit pattern.
1164  void ZapBlock(Address start, size_t size);
1165 
1167  AllocationAction action,
1168  size_t size);
1169 
1171  ObjectSpace space,
1172  AllocationAction action);
1173 
1175  MemoryAllocationCallback callback);
1176 
1178  MemoryAllocationCallback callback);
1179 
1180  static int CodePageGuardStartOffset();
1181 
1182  static int CodePageGuardSize();
1183 
1184  static int CodePageAreaStartOffset();
1185 
1186  static int CodePageAreaEndOffset();
1187 
1188  static int CodePageAreaSize() {
1190  }
1191 
1193  Address start,
1194  size_t commit_size,
1195  size_t reserved_size);
1196 
1197  private:
1198  Isolate* isolate_;
1199 
1200  // Maximum space size in bytes.
1201  size_t capacity_;
1202  // Maximum subset of capacity_ that can be executable
1203  size_t capacity_executable_;
1204 
1205  // Allocated space size in bytes.
1206  size_t size_;
1207  // Allocated executable space size in bytes.
1208  size_t size_executable_;
1209 
1210  // We keep the lowest and highest addresses allocated as a quick way
1211  // of determining that pointers are outside the heap. The estimate is
1212  // conservative, i.e. not all addrsses in 'allocated' space are allocated
1213  // to our heap. The range is [lowest, highest[, inclusive on the low end
1214  // and exclusive on the high end.
1215  void* lowest_ever_allocated_;
1216  void* highest_ever_allocated_;
1217 
1218  struct MemoryAllocationCallbackRegistration {
1219  MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1220  ObjectSpace space,
1221  AllocationAction action)
1222  : callback(callback), space(space), action(action) {
1223  }
1224  MemoryAllocationCallback callback;
1225  ObjectSpace space;
1226  AllocationAction action;
1227  };
1228 
1229  // A List of callback that are triggered when memory is allocated or free'd
1230  List<MemoryAllocationCallbackRegistration>
1231  memory_allocation_callbacks_;
1232 
1233  // Initializes pages in a chunk. Returns the first page address.
1234  // This function and GetChunkId() are provided for the mark-compact
1235  // collector to rebuild page headers in the from space, which is
1236  // used as a marking stack and its page headers are destroyed.
1237  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1238  PagedSpace* owner);
1239 
1240  void UpdateAllocatedSpaceLimits(void* low, void* high) {
1241  lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1242  highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1243  }
1244 
1245  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1246 };
1247 
1248 
1249 // -----------------------------------------------------------------------------
1250 // Interface for heap object iterator to be implemented by all object space
1251 // object iterators.
1252 //
1253 // NOTE: The space specific object iterators also implements the own next()
1254 // method which is used to avoid using virtual functions
1255 // iterating a specific space.
1256 
1257 class ObjectIterator : public Malloced {
1258  public:
1259  virtual ~ObjectIterator() { }
1260 
1261  virtual HeapObject* next_object() = 0;
1262 };
1263 
1264 
1265 // -----------------------------------------------------------------------------
1266 // Heap object iterator in new/old/map spaces.
1267 //
1268 // A HeapObjectIterator iterates objects from the bottom of the given space
1269 // to its top or from the bottom of the given page to its top.
1270 //
1271 // If objects are allocated in the page during iteration the iterator may
1272 // or may not iterate over those objects. The caller must create a new
1273 // iterator in order to be sure to visit these new objects.
1275  public:
1276  // Creates a new object iterator in a given space.
1277  // If the size function is not given, the iterator calls the default
1278  // Object::Size().
1279  explicit HeapObjectIterator(PagedSpace* space);
1281  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1282 
1283  // Advance to the next object, skipping free spaces and other fillers and
1284  // skipping the special garbage section of which there is one per space.
1285  // Returns NULL when the iteration has ended.
1286  inline HeapObject* Next() {
1287  do {
1288  HeapObject* next_obj = FromCurrentPage();
1289  if (next_obj != NULL) return next_obj;
1290  } while (AdvanceToNextPage());
1291  return NULL;
1292  }
1293 
1295  return Next();
1296  }
1297 
1298  private:
1299  enum PageMode { kOnePageOnly, kAllPagesInSpace };
1300 
1301  Address cur_addr_; // Current iteration point.
1302  Address cur_end_; // End iteration point.
1303  HeapObjectCallback size_func_; // Size function or NULL.
1304  PagedSpace* space_;
1305  PageMode page_mode_;
1306 
1307  // Fast (inlined) path of next().
1308  inline HeapObject* FromCurrentPage();
1309 
1310  // Slow path of next(), goes into the next page. Returns false if the
1311  // iteration has ended.
1312  bool AdvanceToNextPage();
1313 
1314  // Initializes fields.
1315  inline void Initialize(PagedSpace* owner,
1316  Address start,
1317  Address end,
1318  PageMode mode,
1319  HeapObjectCallback size_func);
1320 };
1321 
1322 
1323 // -----------------------------------------------------------------------------
1324 // A PageIterator iterates the pages in a paged space.
1325 
1326 class PageIterator BASE_EMBEDDED {
1327  public:
1328  explicit inline PageIterator(PagedSpace* space);
1329 
1330  inline bool has_next();
1331  inline Page* next();
1332 
1333  private:
1334  PagedSpace* space_;
1335  Page* prev_page_; // Previous page returned.
1336  // Next page that will be returned. Cached here so that we can use this
1337  // iterator for operations that deallocate pages.
1338  Page* next_page_;
1339 };
1340 
1341 
1342 // -----------------------------------------------------------------------------
1343 // A space has a circular list of pages. The next page can be accessed via
1344 // Page::next_page() call.
1345 
1346 // An abstraction of allocation and relocation pointers in a page-structured
1347 // space.
1349  public:
1350  AllocationInfo() : top_(NULL), limit_(NULL) {
1351  }
1352 
1353  INLINE(void set_top(Address top)) {
1354  SLOW_ASSERT(top == NULL ||
1355  (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1356  top_ = top;
1357  }
1358 
1359  INLINE(Address top()) const {
1360  SLOW_ASSERT(top_ == NULL ||
1361  (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1362  return top_;
1363  }
1364 
1366  return &top_;
1367  }
1368 
1369  INLINE(void set_limit(Address limit)) {
1370  SLOW_ASSERT(limit == NULL ||
1371  (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1372  limit_ = limit;
1373  }
1374 
1375  INLINE(Address limit()) const {
1376  SLOW_ASSERT(limit_ == NULL ||
1377  (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
1378  return limit_;
1379  }
1380 
1382  return &limit_;
1383  }
1384 
1385 #ifdef DEBUG
1386  bool VerifyPagedAllocation() {
1387  return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1388  && (top_ <= limit_);
1389  }
1390 #endif
1391 
1392  private:
1393  // Current allocation top.
1394  Address top_;
1395  // Current allocation limit.
1396  Address limit_;
1397 };
1398 
1399 
1400 // An abstraction of the accounting statistics of a page-structured space.
1401 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1402 // including page bookkeeping structures) currently in the space. The 'size'
1403 // of a space is the number of allocated bytes, the 'waste' in the space is
1404 // the number of bytes that are not allocated and not available to
1405 // allocation without reorganizing the space via a GC (e.g. small blocks due
1406 // to internal fragmentation, top of page areas in map space), and the bytes
1407 // 'available' is the number of unallocated bytes that are not waste. The
1408 // capacity is the sum of size, waste, and available.
1409 //
1410 // The stats are only set by functions that ensure they stay balanced. These
1411 // functions increase or decrease one of the non-capacity stats in
1412 // conjunction with capacity, or else they always balance increases and
1413 // decreases to the non-capacity stats.
1414 class AllocationStats BASE_EMBEDDED {
1415  public:
1416  AllocationStats() { Clear(); }
1417 
1418  // Zero out all the allocation statistics (i.e., no capacity).
1419  void Clear() {
1420  capacity_ = 0;
1421  max_capacity_ = 0;
1422  size_ = 0;
1423  waste_ = 0;
1424  }
1425 
1427  size_ = capacity_;
1428  waste_ = 0;
1429  }
1430 
1431  // Reset the allocation statistics (i.e., available = capacity with no
1432  // wasted or allocated bytes).
1433  void Reset() {
1434  size_ = 0;
1435  waste_ = 0;
1436  }
1437 
1438  // Accessors for the allocation statistics.
1439  intptr_t Capacity() { return capacity_; }
1440  intptr_t MaxCapacity() { return max_capacity_; }
1441  intptr_t Size() { return size_; }
1442  intptr_t Waste() { return waste_; }
1443 
1444  // Grow the space by adding available bytes. They are initially marked as
1445  // being in use (part of the size), but will normally be immediately freed,
1446  // putting them on the free list and removing them from size_.
1447  void ExpandSpace(int size_in_bytes) {
1448  capacity_ += size_in_bytes;
1449  size_ += size_in_bytes;
1450  if (capacity_ > max_capacity_) {
1451  max_capacity_ = capacity_;
1452  }
1453  ASSERT(size_ >= 0);
1454  }
1455 
1456  // Shrink the space by removing available bytes. Since shrinking is done
1457  // during sweeping, bytes have been marked as being in use (part of the size)
1458  // and are hereby freed.
1459  void ShrinkSpace(int size_in_bytes) {
1460  capacity_ -= size_in_bytes;
1461  size_ -= size_in_bytes;
1462  ASSERT(size_ >= 0);
1463  }
1464 
1465  // Allocate from available bytes (available -> size).
1466  void AllocateBytes(intptr_t size_in_bytes) {
1467  size_ += size_in_bytes;
1468  ASSERT(size_ >= 0);
1469  }
1470 
1471  // Free allocated bytes, making them available (size -> available).
1472  void DeallocateBytes(intptr_t size_in_bytes) {
1473  size_ -= size_in_bytes;
1474  ASSERT(size_ >= 0);
1475  }
1476 
1477  // Waste free bytes (available -> waste).
1478  void WasteBytes(int size_in_bytes) {
1479  size_ -= size_in_bytes;
1480  waste_ += size_in_bytes;
1481  ASSERT(size_ >= 0);
1482  }
1483 
1484  private:
1485  intptr_t capacity_;
1486  intptr_t max_capacity_;
1487  intptr_t size_;
1488  intptr_t waste_;
1489 };
1490 
1491 
1492 // -----------------------------------------------------------------------------
1493 // Free lists for old object spaces
1494 //
1495 // Free-list nodes are free blocks in the heap. They look like heap objects
1496 // (free-list node pointers have the heap object tag, and they have a map like
1497 // a heap object). They have a size and a next pointer. The next pointer is
1498 // the raw address of the next free list node (or NULL).
1499 class FreeListNode: public HeapObject {
1500  public:
1501  // Obtain a free-list node from a raw address. This is not a cast because
1502  // it does not check nor require that the first word at the address is a map
1503  // pointer.
1505  return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1506  }
1507 
1508  static inline bool IsFreeListNode(HeapObject* object);
1509 
1510  // Set the size in bytes, which can be read with HeapObject::Size(). This
1511  // function also writes a map to the first word of the block so that it
1512  // looks like a heap object to the garbage collector and heap iteration
1513  // functions.
1514  void set_size(Heap* heap, int size_in_bytes);
1515 
1516  // Accessors for the next field.
1517  inline FreeListNode* next();
1518  inline FreeListNode** next_address();
1519  inline void set_next(FreeListNode* next);
1520 
1521  inline void Zap();
1522 
1523  static inline FreeListNode* cast(MaybeObject* maybe) {
1524  ASSERT(!maybe->IsFailure());
1525  return reinterpret_cast<FreeListNode*>(maybe);
1526  }
1527 
1528  private:
1529  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1530 
1531  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1532 };
1533 
1534 
1535 // The free list category holds a pointer to the top element and a pointer to
1536 // the end element of the linked list of free memory blocks.
1538  public:
1540  top_(0),
1541  end_(NULL),
1542  available_(0) {}
1543 
1544  intptr_t Concatenate(FreeListCategory* category);
1545 
1546  void Reset();
1547 
1548  void Free(FreeListNode* node, int size_in_bytes);
1549 
1550  FreeListNode* PickNodeFromList(int *node_size);
1551  FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1552 
1553  intptr_t EvictFreeListItemsInList(Page* p);
1555 
1556  void RepairFreeList(Heap* heap);
1557 
1558  FreeListNode* top() const {
1559  return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
1560  }
1561 
1563  NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
1564  }
1565 
1566  FreeListNode** GetEndAddress() { return &end_; }
1567  FreeListNode* end() const { return end_; }
1568  void set_end(FreeListNode* end) { end_ = end; }
1569 
1570  int* GetAvailableAddress() { return &available_; }
1571  int available() const { return available_; }
1572  void set_available(int available) { available_ = available; }
1573 
1574  Mutex* mutex() { return &mutex_; }
1575 
1576  bool IsEmpty() {
1577  return top() == 0;
1578  }
1579 
1580 #ifdef DEBUG
1581  intptr_t SumFreeList();
1582  int FreeListLength();
1583 #endif
1584 
1585  private:
1586  // top_ points to the top FreeListNode* in the free list category.
1587  AtomicWord top_;
1588  FreeListNode* end_;
1589  Mutex mutex_;
1590 
1591  // Total available bytes in all blocks of this free list category.
1592  int available_;
1593 };
1594 
1595 
1596 // The free list for the old space. The free list is organized in such a way
1597 // as to encourage objects allocated around the same time to be near each
1598 // other. The normal way to allocate is intended to be by bumping a 'top'
1599 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1600 // find a new space to allocate from. This is done with the free list, which
1601 // is divided up into rough categories to cut down on waste. Having finer
1602 // categories would scatter allocation more.
1603 
1604 // The old space free list is organized in categories.
1605 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1606 // They can be reclaimed by the compactor. However the distance between top
1607 // and limit may be this small.
1608 // 32-255 words: There is a list of spaces this large. It is used for top and
1609 // limit when the object we need to allocate is 1-31 words in size. These
1610 // spaces are called small.
1611 // 256-2047 words: There is a list of spaces this large. It is used for top and
1612 // limit when the object we need to allocate is 32-255 words in size. These
1613 // spaces are called medium.
1614 // 1048-16383 words: There is a list of spaces this large. It is used for top
1615 // and limit when the object we need to allocate is 256-2047 words in size.
1616 // These spaces are call large.
1617 // At least 16384 words. This list is for objects of 2048 words or larger.
1618 // Empty pages are added to this list. These spaces are called huge.
1619 class FreeList {
1620  public:
1621  explicit FreeList(PagedSpace* owner);
1622 
1623  intptr_t Concatenate(FreeList* free_list);
1624 
1625  // Clear the free list.
1626  void Reset();
1627 
1628  // Return the number of bytes available on the free list.
1629  intptr_t available() {
1630  return small_list_.available() + medium_list_.available() +
1631  large_list_.available() + huge_list_.available();
1632  }
1633 
1634  // Place a node on the free list. The block of size 'size_in_bytes'
1635  // starting at 'start' is placed on the free list. The return value is the
1636  // number of bytes that have been lost due to internal fragmentation by
1637  // freeing the block. Bookkeeping information will be written to the block,
1638  // i.e., its contents will be destroyed. The start address should be word
1639  // aligned, and the size should be a non-zero multiple of the word size.
1640  int Free(Address start, int size_in_bytes);
1641 
1642  // Allocate a block of size 'size_in_bytes' from the free list. The block
1643  // is unitialized. A failure is returned if no block is available. The
1644  // number of bytes lost to fragmentation is returned in the output parameter
1645  // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1646  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1647 
1648  bool IsEmpty() {
1649  return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1650  large_list_.IsEmpty() && huge_list_.IsEmpty();
1651  }
1652 
1653 #ifdef DEBUG
1654  void Zap();
1655  intptr_t SumFreeLists();
1656  bool IsVeryLong();
1657 #endif
1658 
1659  // Used after booting the VM.
1660  void RepairLists(Heap* heap);
1661 
1662  intptr_t EvictFreeListItems(Page* p);
1664 
1665  FreeListCategory* small_list() { return &small_list_; }
1666  FreeListCategory* medium_list() { return &medium_list_; }
1667  FreeListCategory* large_list() { return &large_list_; }
1668  FreeListCategory* huge_list() { return &huge_list_; }
1669 
1670  private:
1671  // The size range of blocks, in bytes.
1672  static const int kMinBlockSize = 3 * kPointerSize;
1673  static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1674 
1675  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1676 
1677  PagedSpace* owner_;
1678  Heap* heap_;
1679 
1680  static const int kSmallListMin = 0x20 * kPointerSize;
1681  static const int kSmallListMax = 0xff * kPointerSize;
1682  static const int kMediumListMax = 0x7ff * kPointerSize;
1683  static const int kLargeListMax = 0x3fff * kPointerSize;
1684  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1685  static const int kMediumAllocationMax = kSmallListMax;
1686  static const int kLargeAllocationMax = kMediumListMax;
1687  FreeListCategory small_list_;
1688  FreeListCategory medium_list_;
1689  FreeListCategory large_list_;
1690  FreeListCategory huge_list_;
1691 
1692  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1693 };
1694 
1695 
1696 class PagedSpace : public Space {
1697  public:
1698  // Creates a space with a maximum capacity, and an id.
1699  PagedSpace(Heap* heap,
1700  intptr_t max_capacity,
1701  AllocationSpace id,
1703 
1704  virtual ~PagedSpace() {}
1705 
1706  // Set up the space using the given address range of virtual memory (from
1707  // the memory allocator's initial chunk) if possible. If the block of
1708  // addresses is not big enough to contain a single page-aligned page, a
1709  // fresh chunk will be allocated.
1710  bool SetUp();
1711 
1712  // Returns true if the space has been successfully set up and not
1713  // subsequently torn down.
1714  bool HasBeenSetUp();
1715 
1716  // Cleans up the space, frees all pages in this space except those belonging
1717  // to the initial chunk, uncommits addresses in the initial chunk.
1718  void TearDown();
1719 
1720  // Checks whether an object/address is in this space.
1721  inline bool Contains(Address a);
1722  bool Contains(HeapObject* o) { return Contains(o->address()); }
1723 
1724  // Given an address occupied by a live object, return that object if it is
1725  // in this space, or Failure::Exception() if it is not. The implementation
1726  // iterates over objects in the page containing the address, the cost is
1727  // linear in the number of objects in the page. It may be slow.
1728  MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1729 
1730  // During boot the free_space_map is created, and afterwards we may need
1731  // to write it into the free list nodes that were already created.
1732  void RepairFreeListsAfterBoot();
1733 
1734  // Prepares for a mark-compact GC.
1735  void PrepareForMarkCompact();
1736 
1737  // Current capacity without growing (Size() + Available()).
1738  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1739 
1740  // Total amount of memory committed for this space. For paged
1741  // spaces this equals the capacity.
1742  intptr_t CommittedMemory() { return Capacity(); }
1743 
1744  // The maximum amount of memory ever committed for this space.
1745  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1746 
1747  // Approximate amount of physical memory committed for this space.
1748  size_t CommittedPhysicalMemory();
1749 
1750  struct SizeStats {
1751  intptr_t Total() {
1753  }
1754 
1755  intptr_t small_size_;
1756  intptr_t medium_size_;
1757  intptr_t large_size_;
1758  intptr_t huge_size_;
1759  };
1760 
1761  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1762  void ResetFreeListStatistics();
1763 
1764  // Sets the capacity, the available space and the wasted space to zero.
1765  // The stats are rebuilt during sweeping by adding each page to the
1766  // capacity and the size when it is encountered. As free spaces are
1767  // discovered during the sweeping they are subtracted from the size and added
1768  // to the available and wasted totals.
1769  void ClearStats() {
1770  accounting_stats_.ClearSizeWaste();
1772  }
1773 
1774  // Increases the number of available bytes of that space.
1775  void AddToAccountingStats(intptr_t bytes) {
1776  accounting_stats_.DeallocateBytes(bytes);
1777  }
1778 
1779  // Available bytes without growing. These are the bytes on the free list.
1780  // The bytes in the linear allocation area are not included in this total
1781  // because updating the stats would slow down allocation. New pages are
1782  // immediately added to the free list so they show up here.
1783  intptr_t Available() { return free_list_.available(); }
1784 
1785  // Allocated bytes in this space. Garbage bytes that were not found due to
1786  // lazy sweeping are counted as being allocated! The bytes in the current
1787  // linear allocation area (between top and limit) are also counted here.
1788  virtual intptr_t Size() { return accounting_stats_.Size(); }
1789 
1790  // As size, but the bytes in lazily swept pages are estimated and the bytes
1791  // in the current linear allocation area are not included.
1792  virtual intptr_t SizeOfObjects();
1793 
1794  // Wasted bytes in this space. These are just the bytes that were thrown away
1795  // due to being too small to use for allocation. They do not include the
1796  // free bytes that were not found at all due to lazy sweeping.
1797  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1798 
1799  // Returns the allocation pointer in this space.
1800  Address top() { return allocation_info_.top(); }
1801  Address limit() { return allocation_info_.limit(); }
1802 
1803  // The allocation top address.
1805  return allocation_info_.top_address();
1806  }
1807 
1808  // The allocation limit address.
1811  }
1812 
1813  // Allocate the requested number of bytes in the space if possible, return a
1814  // failure object if not.
1815  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1816 
1817  // Give a block of memory to the space's free list. It might be added to
1818  // the free list or accounted as waste.
1819  // If add_to_freelist is false then just accounting stats are updated and
1820  // no attempt to add area to free list is made.
1821  int Free(Address start, int size_in_bytes) {
1822  int wasted = free_list_.Free(start, size_in_bytes);
1823  accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1824  return size_in_bytes - wasted;
1825  }
1826 
1827  void ResetFreeList() {
1828  free_list_.Reset();
1829  }
1830 
1831  // Set space allocation info.
1833  ASSERT(top == limit ||
1834  Page::FromAddress(top) == Page::FromAddress(limit - 1));
1836  allocation_info_.set_top(top);
1837  allocation_info_.set_limit(limit);
1838  }
1839 
1840  // Empty space allocation info, returning unused area to free list.
1842  // Mark the old linear allocation area with a free space map so it can be
1843  // skipped when scanning the heap.
1844  int old_linear_size = static_cast<int>(limit() - top());
1845  Free(top(), old_linear_size);
1847  }
1848 
1849  void Allocate(int bytes) {
1850  accounting_stats_.AllocateBytes(bytes);
1851  }
1852 
1853  void IncreaseCapacity(int size);
1854 
1855  // Releases an unused page and shrinks the space.
1856  void ReleasePage(Page* page, bool unlink);
1857 
1858  // The dummy page that anchors the linked list of pages.
1859  Page* anchor() { return &anchor_; }
1860 
1861 #ifdef VERIFY_HEAP
1862  // Verify integrity of this space.
1863  virtual void Verify(ObjectVisitor* visitor);
1864 
1865  // Overridden by subclasses to verify space-specific object
1866  // properties (e.g., only maps or free-list nodes are in map space).
1867  virtual void VerifyObject(HeapObject* obj) {}
1868 #endif
1869 
1870 #ifdef DEBUG
1871  // Print meta info and objects in this space.
1872  virtual void Print();
1873 
1874  // Reports statistics for the space
1875  void ReportStatistics();
1876 
1877  // Report code object related statistics
1878  void CollectCodeStatistics();
1879  static void ReportCodeStatistics(Isolate* isolate);
1880  static void ResetCodeStatistics(Isolate* isolate);
1881 #endif
1882 
1885 
1886  // Evacuation candidates are swept by evacuator. Needs to return a valid
1887  // result before _and_ after evacuation has finished.
1888  static bool ShouldBeSweptLazily(Page* p) {
1889  return !p->IsEvacuationCandidate() &&
1891  !p->WasSweptPrecisely();
1892  }
1893 
1894  void SetPagesToSweep(Page* first) {
1896  if (first == &anchor_) first = NULL;
1897  first_unswept_page_ = first;
1898  }
1899 
1900  void IncrementUnsweptFreeBytes(intptr_t by) {
1901  unswept_free_bytes_ += by;
1902  }
1903 
1906  unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1907  }
1908 
1909  void DecrementUnsweptFreeBytes(intptr_t by) {
1910  unswept_free_bytes_ -= by;
1911  }
1912 
1915  unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1916  }
1917 
1919  unswept_free_bytes_ = 0;
1920  }
1921 
1922  bool AdvanceSweeper(intptr_t bytes_to_sweep);
1923 
1924  // When parallel sweeper threads are active and the main thread finished
1925  // its sweeping phase, this function waits for them to complete, otherwise
1926  // AdvanceSweeper with size_in_bytes is called.
1927  bool EnsureSweeperProgress(intptr_t size_in_bytes);
1928 
1930  return !first_unswept_page_->is_valid();
1931  }
1932 
1933  Page* FirstPage() { return anchor_.next_page(); }
1934  Page* LastPage() { return anchor_.prev_page(); }
1935 
1937 
1938  bool CanExpand();
1939 
1940  // Returns the number of total pages in this space.
1941  int CountTotalPages();
1942 
1943  // Return size of allocatable area on a page in this space.
1944  inline int AreaSize() {
1945  return area_size_;
1946  }
1947 
1948  protected:
1950 
1952 
1953  // Maximum capacity of this space.
1954  intptr_t max_capacity_;
1955 
1956  intptr_t SizeOfFirstPage();
1957 
1958  // Accounting information for this space.
1959  AllocationStats accounting_stats_;
1960 
1961  // The dummy page that anchors the double linked list of pages.
1963 
1964  // The space's free list.
1966 
1967  // Normal allocation information.
1969 
1971 
1972  // The first page to be swept when the lazy sweeper advances. Is set
1973  // to NULL when all pages have been swept.
1975 
1976  // The number of free bytes which could be reclaimed by advancing the
1977  // lazy sweeper. This is only an estimation because lazy sweeping is
1978  // done conservatively.
1980 
1981  // Expands the space by allocating a fixed number of pages. Returns false if
1982  // it cannot allocate requested number of pages from OS, or if the hard heap
1983  // size limit has been hit.
1984  bool Expand();
1985 
1986  // Generic fast case allocation function that tries linear allocation at the
1987  // address denoted by top in allocation_info_.
1988  inline HeapObject* AllocateLinearly(int size_in_bytes);
1989 
1990  // Slow path of AllocateRaw. This function is space-dependent.
1991  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1992 
1993  friend class PageIterator;
1994  friend class MarkCompactCollector;
1995 };
1996 
1997 
1998 class NumberAndSizeInfo BASE_EMBEDDED {
1999  public:
2000  NumberAndSizeInfo() : number_(0), bytes_(0) {}
2001 
2002  int number() const { return number_; }
2003  void increment_number(int num) { number_ += num; }
2004 
2005  int bytes() const { return bytes_; }
2006  void increment_bytes(int size) { bytes_ += size; }
2007 
2008  void clear() {
2009  number_ = 0;
2010  bytes_ = 0;
2011  }
2012 
2013  private:
2014  int number_;
2015  int bytes_;
2016 };
2017 
2018 
2019 // HistogramInfo class for recording a single "bar" of a histogram. This
2020 // class is used for collecting statistics to print to the log file.
2021 class HistogramInfo: public NumberAndSizeInfo {
2022  public:
2023  HistogramInfo() : NumberAndSizeInfo() {}
2024 
2025  const char* name() { return name_; }
2026  void set_name(const char* name) { name_ = name; }
2027 
2028  private:
2029  const char* name_;
2030 };
2031 
2032 
2036 };
2037 
2038 
2039 class SemiSpace;
2040 
2041 
2042 class NewSpacePage : public MemoryChunk {
2043  public:
2044  // GC related flags copied from from-space to to-space when
2045  // flipping semispaces.
2046  static const intptr_t kCopyOnFlipFlagsMask =
2050 
2052 
2053  inline NewSpacePage* next_page() const {
2054  return static_cast<NewSpacePage*>(next_chunk());
2055  }
2056 
2057  inline void set_next_page(NewSpacePage* page) {
2058  set_next_chunk(page);
2059  }
2060 
2061  inline NewSpacePage* prev_page() const {
2062  return static_cast<NewSpacePage*>(prev_chunk());
2063  }
2064 
2065  inline void set_prev_page(NewSpacePage* page) {
2066  set_prev_chunk(page);
2067  }
2068 
2070  return reinterpret_cast<SemiSpace*>(owner());
2071  }
2072 
2073  bool is_anchor() { return !this->InNewSpace(); }
2074 
2075  static bool IsAtStart(Address addr) {
2076  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2077  == kObjectStartOffset;
2078  }
2079 
2080  static bool IsAtEnd(Address addr) {
2081  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2082  }
2083 
2085  return reinterpret_cast<Address>(this);
2086  }
2087 
2088  // Finds the NewSpacePage containg the given address.
2089  static inline NewSpacePage* FromAddress(Address address_in_page) {
2090  Address page_start =
2091  reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2093  NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2094  return page;
2095  }
2096 
2097  // Find the page for a limit address. A limit address is either an address
2098  // inside a page, or the address right after the last byte of a page.
2099  static inline NewSpacePage* FromLimit(Address address_limit) {
2100  return NewSpacePage::FromAddress(address_limit - 1);
2101  }
2102 
2103  // Checks if address1 and address2 are on the same new space page.
2104  static inline bool OnSamePage(Address address1, Address address2) {
2105  return NewSpacePage::FromAddress(address1) ==
2106  NewSpacePage::FromAddress(address2);
2107  }
2108 
2109  private:
2110  // Create a NewSpacePage object that is only used as anchor
2111  // for the doubly-linked list of real pages.
2112  explicit NewSpacePage(SemiSpace* owner) {
2113  InitializeAsAnchor(owner);
2114  }
2115 
2116  static NewSpacePage* Initialize(Heap* heap,
2117  Address start,
2119 
2120  // Intialize a fake NewSpacePage used as sentinel at the ends
2121  // of a doubly-linked list of real NewSpacePages.
2122  // Only uses the prev/next links, and sets flags to not be in new-space.
2123  void InitializeAsAnchor(SemiSpace* owner);
2124 
2125  friend class SemiSpace;
2126  friend class SemiSpaceIterator;
2127 };
2128 
2129 
2130 // -----------------------------------------------------------------------------
2131 // SemiSpace in young generation
2132 //
2133 // A semispace is a contiguous chunk of memory holding page-like memory
2134 // chunks. The mark-compact collector uses the memory of the first page in
2135 // the from space as a marking stack when tracing live objects.
2136 
2137 class SemiSpace : public Space {
2138  public:
2139  // Constructor.
2141  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2142  start_(NULL),
2143  age_mark_(NULL),
2144  id_(semispace),
2145  anchor_(this),
2146  current_page_(NULL) { }
2147 
2148  // Sets up the semispace using the given chunk.
2149  void SetUp(Address start, int initial_capacity, int maximum_capacity);
2150 
2151  // Tear down the space. Heap memory was not allocated by the space, so it
2152  // is not deallocated here.
2153  void TearDown();
2154 
2155  // True if the space has been set up but not torn down.
2156  bool HasBeenSetUp() { return start_ != NULL; }
2157 
2158  // Grow the semispace to the new capacity. The new capacity
2159  // requested must be larger than the current capacity and less than
2160  // the maximum capacity.
2161  bool GrowTo(int new_capacity);
2162 
2163  // Shrinks the semispace to the new capacity. The new capacity
2164  // requested must be more than the amount of used memory in the
2165  // semispace and less than the current capacity.
2166  bool ShrinkTo(int new_capacity);
2167 
2168  // Returns the start address of the first page of the space.
2170  ASSERT(anchor_.next_page() != &anchor_);
2171  return anchor_.next_page()->area_start();
2172  }
2173 
2174  // Returns the start address of the current page of the space.
2176  return current_page_->area_start();
2177  }
2178 
2179  // Returns one past the end address of the space.
2181  return anchor_.prev_page()->area_end();
2182  }
2183 
2184  // Returns one past the end address of the current page of the space.
2186  return current_page_->area_end();
2187  }
2188 
2189  bool AdvancePage() {
2190  NewSpacePage* next_page = current_page_->next_page();
2191  if (next_page == anchor()) return false;
2192  current_page_ = next_page;
2193  return true;
2194  }
2195 
2196  // Resets the space to using the first page.
2197  void Reset();
2198 
2199  // Age mark accessors.
2200  Address age_mark() { return age_mark_; }
2201  void set_age_mark(Address mark);
2202 
2203  // True if the address is in the address range of this semispace (not
2204  // necessarily below the allocation pointer).
2205  bool Contains(Address a) {
2206  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2207  == reinterpret_cast<uintptr_t>(start_);
2208  }
2209 
2210  // True if the object is a heap object in the address range of this
2211  // semispace (not necessarily below the allocation pointer).
2212  bool Contains(Object* o) {
2213  return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2214  }
2215 
2216  // If we don't have these here then SemiSpace will be abstract. However
2217  // they should never be called.
2218  virtual intptr_t Size() {
2219  UNREACHABLE();
2220  return 0;
2221  }
2222 
2223  bool is_committed() { return committed_; }
2224  bool Commit();
2225  bool Uncommit();
2226 
2227  NewSpacePage* first_page() { return anchor_.next_page(); }
2228  NewSpacePage* current_page() { return current_page_; }
2229 
2230 #ifdef VERIFY_HEAP
2231  virtual void Verify();
2232 #endif
2233 
2234 #ifdef DEBUG
2235  virtual void Print();
2236  // Validate a range of of addresses in a SemiSpace.
2237  // The "from" address must be on a page prior to the "to" address,
2238  // in the linked page order, or it must be earlier on the same page.
2239  static void AssertValidRange(Address from, Address to);
2240 #else
2241  // Do nothing.
2242  inline static void AssertValidRange(Address from, Address to) {}
2243 #endif
2244 
2245  // Returns the current capacity of the semi space.
2246  int Capacity() { return capacity_; }
2247 
2248  // Returns the maximum capacity of the semi space.
2249  int MaximumCapacity() { return maximum_capacity_; }
2250 
2251  // Returns the initial capacity of the semi space.
2252  int InitialCapacity() { return initial_capacity_; }
2253 
2254  SemiSpaceId id() { return id_; }
2255 
2256  static void Swap(SemiSpace* from, SemiSpace* to);
2257 
2258  // Returns the maximum amount of memory ever committed by the semi space.
2259  size_t MaximumCommittedMemory() { return maximum_committed_; }
2260 
2261  // Approximate amount of physical memory committed for this space.
2262  size_t CommittedPhysicalMemory();
2263 
2264  private:
2265  // Flips the semispace between being from-space and to-space.
2266  // Copies the flags into the masked positions on all pages in the space.
2267  void FlipPages(intptr_t flags, intptr_t flag_mask);
2268 
2269  // Updates Capacity and MaximumCommitted based on new capacity.
2270  void SetCapacity(int new_capacity);
2271 
2272  NewSpacePage* anchor() { return &anchor_; }
2273 
2274  // The current and maximum capacity of the space.
2275  int capacity_;
2276  int maximum_capacity_;
2277  int initial_capacity_;
2278 
2279  intptr_t maximum_committed_;
2280 
2281  // The start address of the space.
2282  Address start_;
2283  // Used to govern object promotion during mark-compact collection.
2284  Address age_mark_;
2285 
2286  // Masks and comparison values to test for containment in this semispace.
2287  uintptr_t address_mask_;
2288  uintptr_t object_mask_;
2289  uintptr_t object_expected_;
2290 
2291  bool committed_;
2292  SemiSpaceId id_;
2293 
2294  NewSpacePage anchor_;
2295  NewSpacePage* current_page_;
2296 
2297  friend class SemiSpaceIterator;
2298  friend class NewSpacePageIterator;
2299  public:
2300  TRACK_MEMORY("SemiSpace")
2301 };
2302 
2303 
2304 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2305 // semispace of the heap's new space. It iterates over the objects in the
2306 // semispace from a given start address (defaulting to the bottom of the
2307 // semispace) to the top of the semispace. New objects allocated after the
2308 // iterator is created are not iterated.
2310  public:
2311  // Create an iterator over the objects in the given space. If no start
2312  // address is given, the iterator starts from the bottom of the space. If
2313  // no size function is given, the iterator calls Object::Size().
2314 
2315  // Iterate over all of allocated to-space.
2316  explicit SemiSpaceIterator(NewSpace* space);
2317  // Iterate over all of allocated to-space, with a custome size function.
2318  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2319  // Iterate over part of allocated to-space, from start to the end
2320  // of allocation.
2321  SemiSpaceIterator(NewSpace* space, Address start);
2322  // Iterate from one address to another in the same semi-space.
2323  SemiSpaceIterator(Address from, Address to);
2324 
2326  if (current_ == limit_) return NULL;
2327  if (NewSpacePage::IsAtEnd(current_)) {
2328  NewSpacePage* page = NewSpacePage::FromLimit(current_);
2329  page = page->next_page();
2330  ASSERT(!page->is_anchor());
2331  current_ = page->area_start();
2332  if (current_ == limit_) return NULL;
2333  }
2334 
2335  HeapObject* object = HeapObject::FromAddress(current_);
2336  int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2337 
2338  current_ += size;
2339  return object;
2340  }
2341 
2342  // Implementation of the ObjectIterator functions.
2343  virtual HeapObject* next_object() { return Next(); }
2344 
2345  private:
2346  void Initialize(Address start,
2347  Address end,
2348  HeapObjectCallback size_func);
2349 
2350  // The current iteration point.
2351  Address current_;
2352  // The end of iteration.
2353  Address limit_;
2354  // The callback function.
2355  HeapObjectCallback size_func_;
2356 };
2357 
2358 
2359 // -----------------------------------------------------------------------------
2360 // A PageIterator iterates the pages in a semi-space.
2361 class NewSpacePageIterator BASE_EMBEDDED {
2362  public:
2363  // Make an iterator that runs over all pages in to-space.
2364  explicit inline NewSpacePageIterator(NewSpace* space);
2365 
2366  // Make an iterator that runs over all pages in the given semispace,
2367  // even those not used in allocation.
2368  explicit inline NewSpacePageIterator(SemiSpace* space);
2369 
2370  // Make iterator that iterates from the page containing start
2371  // to the page that contains limit in the same semispace.
2372  inline NewSpacePageIterator(Address start, Address limit);
2373 
2374  inline bool has_next();
2375  inline NewSpacePage* next();
2376 
2377  private:
2378  NewSpacePage* prev_page_; // Previous page returned.
2379  // Next page that will be returned. Cached here so that we can use this
2380  // iterator for operations that deallocate pages.
2381  NewSpacePage* next_page_;
2382  // Last page returned.
2383  NewSpacePage* last_page_;
2384 };
2385 
2386 
2387 // -----------------------------------------------------------------------------
2388 // The young generation space.
2389 //
2390 // The new space consists of a contiguous pair of semispaces. It simply
2391 // forwards most functions to the appropriate semispace.
2392 
2393 class NewSpace : public Space {
2394  public:
2395  // Constructor.
2396  explicit NewSpace(Heap* heap)
2397  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2398  to_space_(heap, kToSpace),
2399  from_space_(heap, kFromSpace),
2400  reservation_(),
2401  inline_allocation_limit_step_(0) {}
2402 
2403  // Sets up the new space using the given chunk.
2404  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2405 
2406  // Tears down the space. Heap memory was not allocated by the space, so it
2407  // is not deallocated here.
2408  void TearDown();
2409 
2410  // True if the space has been set up but not torn down.
2411  bool HasBeenSetUp() {
2412  return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2413  }
2414 
2415  // Flip the pair of spaces.
2416  void Flip();
2417 
2418  // Grow the capacity of the semispaces. Assumes that they are not at
2419  // their maximum capacity.
2420  void Grow();
2421 
2422  // Shrink the capacity of the semispaces.
2423  void Shrink();
2424 
2425  // True if the address or object lies in the address range of either
2426  // semispace (not necessarily below the allocation pointer).
2427  bool Contains(Address a) {
2428  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2429  == reinterpret_cast<uintptr_t>(start_);
2430  }
2431 
2432  bool Contains(Object* o) {
2433  Address a = reinterpret_cast<Address>(o);
2434  return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2435  }
2436 
2437  // Return the allocated bytes in the active semispace.
2438  virtual intptr_t Size() {
2439  return pages_used_ * NewSpacePage::kAreaSize +
2440  static_cast<int>(top() - to_space_.page_low());
2441  }
2442 
2443  // The same, but returning an int. We have to have the one that returns
2444  // intptr_t because it is inherited, but if we know we are dealing with the
2445  // new space, which can't get as big as the other spaces then this is useful:
2446  int SizeAsInt() { return static_cast<int>(Size()); }
2447 
2448  // Return the current capacity of a semispace.
2449  intptr_t EffectiveCapacity() {
2450  SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2451  return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2452  }
2453 
2454  // Return the current capacity of a semispace.
2455  intptr_t Capacity() {
2456  ASSERT(to_space_.Capacity() == from_space_.Capacity());
2457  return to_space_.Capacity();
2458  }
2459 
2460  // Return the total amount of memory committed for new space.
2461  intptr_t CommittedMemory() {
2462  if (from_space_.is_committed()) return 2 * Capacity();
2463  return Capacity();
2464  }
2465 
2466  // Return the total amount of memory committed for new space.
2468  return to_space_.MaximumCommittedMemory() +
2469  from_space_.MaximumCommittedMemory();
2470  }
2471 
2472  // Approximate amount of physical memory committed for this space.
2473  size_t CommittedPhysicalMemory();
2474 
2475  // Return the available bytes without growing.
2476  intptr_t Available() {
2477  return Capacity() - Size();
2478  }
2479 
2480  // Return the maximum capacity of a semispace.
2482  ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2483  return to_space_.MaximumCapacity();
2484  }
2485 
2486  // Returns the initial capacity of a semispace.
2488  ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2489  return to_space_.InitialCapacity();
2490  }
2491 
2492  // Return the address of the allocation pointer in the active semispace.
2494  ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2495  return allocation_info_.top();
2496  }
2497 
2499  ASSERT(to_space_.current_page()->ContainsLimit(top));
2500  allocation_info_.set_top(top);
2501  }
2502 
2503  // Return the address of the allocation pointer limit in the active semispace.
2505  ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2506  return allocation_info_.limit();
2507  }
2508 
2509  // Return the address of the first object in the active semispace.
2510  Address bottom() { return to_space_.space_start(); }
2511 
2512  // Get the age mark of the inactive semispace.
2513  Address age_mark() { return from_space_.age_mark(); }
2514  // Set the age mark in the active semispace.
2515  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2516 
2517  // The start address of the space and a bit mask. Anding an address in the
2518  // new space with the mask will result in the start address.
2519  Address start() { return start_; }
2520  uintptr_t mask() { return address_mask_; }
2521 
2522  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2523  ASSERT(Contains(addr));
2525  IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2526  return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2527  }
2528 
2529  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2530  return reinterpret_cast<Address>(index << kPointerSizeLog2);
2531  }
2532 
2533  // The allocation top and limit address.
2535  return allocation_info_.top_address();
2536  }
2537 
2538  // The allocation limit address.
2540  return allocation_info_.limit_address();
2541  }
2542 
2543  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2544 
2545  // Reset the allocation pointer to the beginning of the active semispace.
2546  void ResetAllocationInfo();
2547 
2548  void UpdateInlineAllocationLimit(int size_in_bytes);
2549  void LowerInlineAllocationLimit(intptr_t step) {
2550  inline_allocation_limit_step_ = step;
2552  top_on_previous_step_ = allocation_info_.top();
2553  }
2554 
2555  // Get the extent of the inactive semispace (for use as a marking stack,
2556  // or to zap it). Notice: space-addresses are not necessarily on the
2557  // same page, so FromSpaceStart() might be above FromSpaceEnd().
2558  Address FromSpacePageLow() { return from_space_.page_low(); }
2559  Address FromSpacePageHigh() { return from_space_.page_high(); }
2560  Address FromSpaceStart() { return from_space_.space_start(); }
2561  Address FromSpaceEnd() { return from_space_.space_end(); }
2562 
2563  // Get the extent of the active semispace's pages' memory.
2564  Address ToSpaceStart() { return to_space_.space_start(); }
2565  Address ToSpaceEnd() { return to_space_.space_end(); }
2566 
2567  inline bool ToSpaceContains(Address address) {
2568  return to_space_.Contains(address);
2569  }
2570  inline bool FromSpaceContains(Address address) {
2571  return from_space_.Contains(address);
2572  }
2573 
2574  // True if the object is a heap object in the address range of the
2575  // respective semispace (not necessarily below the allocation pointer of the
2576  // semispace).
2577  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2578  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2579 
2580  // Try to switch the active semispace to a new, empty, page.
2581  // Returns false if this isn't possible or reasonable (i.e., there
2582  // are no pages, or the current page is already empty), or true
2583  // if successful.
2584  bool AddFreshPage();
2585 
2586 #ifdef VERIFY_HEAP
2587  // Verify the active semispace.
2588  virtual void Verify();
2589 #endif
2590 
2591 #ifdef DEBUG
2592  // Print the active semispace.
2593  virtual void Print() { to_space_.Print(); }
2594 #endif
2595 
2596  // Iterates the active semispace to collect statistics.
2597  void CollectStatistics();
2598  // Reports previously collected statistics of the active semispace.
2599  void ReportStatistics();
2600  // Clears previously collected statistics.
2601  void ClearHistograms();
2602 
2603  // Record the allocation or promotion of a heap object. Note that we don't
2604  // record every single allocation, but only those that happen in the
2605  // to space during a scavenge GC.
2606  void RecordAllocation(HeapObject* obj);
2607  void RecordPromotion(HeapObject* obj);
2608 
2609  // Return whether the operation succeded.
2611  if (from_space_.is_committed()) return true;
2612  return from_space_.Commit();
2613  }
2614 
2616  if (!from_space_.is_committed()) return true;
2617  return from_space_.Uncommit();
2618  }
2619 
2620  inline intptr_t inline_allocation_limit_step() {
2621  return inline_allocation_limit_step_;
2622  }
2623 
2624  SemiSpace* active_space() { return &to_space_; }
2625 
2626  private:
2627  // Update allocation info to match the current to-space page.
2628  void UpdateAllocationInfo();
2629 
2630  Address chunk_base_;
2631  uintptr_t chunk_size_;
2632 
2633  // The semispaces.
2634  SemiSpace to_space_;
2635  SemiSpace from_space_;
2636  VirtualMemory reservation_;
2637  int pages_used_;
2638 
2639  // Start address and bit mask for containment testing.
2640  Address start_;
2641  uintptr_t address_mask_;
2642  uintptr_t object_mask_;
2643  uintptr_t object_expected_;
2644 
2645  // Allocation pointer and limit for normal allocation and allocation during
2646  // mark-compact collection.
2647  AllocationInfo allocation_info_;
2648 
2649  // When incremental marking is active we will set allocation_info_.limit
2650  // to be lower than actual limit and then will gradually increase it
2651  // in steps to guarantee that we do incremental marking steps even
2652  // when all allocation is performed from inlined generated code.
2653  intptr_t inline_allocation_limit_step_;
2654 
2655  Address top_on_previous_step_;
2656 
2657  HistogramInfo* allocated_histogram_;
2658  HistogramInfo* promoted_histogram_;
2659 
2660  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2661 
2662  friend class SemiSpaceIterator;
2663 
2664  public:
2665  TRACK_MEMORY("NewSpace")
2666 };
2667 
2668 
2669 // -----------------------------------------------------------------------------
2670 // Old object space (excluding map objects)
2671 
2672 class OldSpace : public PagedSpace {
2673  public:
2674  // Creates an old space object with a given maximum capacity.
2675  // The constructor does not allocate pages from OS.
2677  intptr_t max_capacity,
2678  AllocationSpace id,
2680  : PagedSpace(heap, max_capacity, id, executable) {
2681  }
2682 
2683  public:
2684  TRACK_MEMORY("OldSpace")
2685 };
2686 
2687 
2688 // For contiguous spaces, top should be in the space (or at the end) and limit
2689 // should be the end of the space.
2690 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2691  SLOW_ASSERT((space).page_low() <= (info).top() \
2692  && (info).top() <= (space).page_high() \
2693  && (info).limit() <= (space).page_high())
2694 
2695 
2696 // -----------------------------------------------------------------------------
2697 // Old space for all map objects
2698 
2699 class MapSpace : public PagedSpace {
2700  public:
2701  // Creates a map space object with a maximum capacity.
2702  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2703  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2704  max_map_space_pages_(kMaxMapPageIndex - 1) {
2705  }
2706 
2707  // Given an index, returns the page address.
2708  // TODO(1600): this limit is artifical just to keep code compilable
2709  static const int kMaxMapPageIndex = 1 << 16;
2710 
2712  if (IsPowerOf2(Map::kSize)) {
2713  return RoundDown(size, Map::kSize);
2714  } else {
2715  return (size / Map::kSize) * Map::kSize;
2716  }
2717  }
2718 
2719  protected:
2720  virtual void VerifyObject(HeapObject* obj);
2721 
2722  private:
2723  static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2724 
2725  // Do map space compaction if there is a page gap.
2726  int CompactionThreshold() {
2727  return kMapsPerPage * (max_map_space_pages_ - 1);
2728  }
2729 
2730  const int max_map_space_pages_;
2731 
2732  public:
2733  TRACK_MEMORY("MapSpace")
2734 };
2735 
2736 
2737 // -----------------------------------------------------------------------------
2738 // Old space for simple property cell objects
2739 
2740 class CellSpace : public PagedSpace {
2741  public:
2742  // Creates a property cell space object with a maximum capacity.
2743  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2744  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2745  }
2746 
2748  if (IsPowerOf2(Cell::kSize)) {
2749  return RoundDown(size, Cell::kSize);
2750  } else {
2751  return (size / Cell::kSize) * Cell::kSize;
2752  }
2753  }
2754 
2755  protected:
2756  virtual void VerifyObject(HeapObject* obj);
2757 
2758  public:
2759  TRACK_MEMORY("CellSpace")
2760 };
2761 
2762 
2763 // -----------------------------------------------------------------------------
2764 // Old space for all global object property cell objects
2765 
2767  public:
2768  // Creates a property cell space object with a maximum capacity.
2769  PropertyCellSpace(Heap* heap, intptr_t max_capacity,
2770  AllocationSpace id)
2771  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2772  }
2773 
2776  return RoundDown(size, PropertyCell::kSize);
2777  } else {
2778  return (size / PropertyCell::kSize) * PropertyCell::kSize;
2779  }
2780  }
2781 
2782  protected:
2783  virtual void VerifyObject(HeapObject* obj);
2784 
2785  public:
2786  TRACK_MEMORY("PropertyCellSpace")
2787 };
2788 
2789 
2790 // -----------------------------------------------------------------------------
2791 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2792 // the large object space. A large object is allocated from OS heap with
2793 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2794 // A large object always starts at Page::kObjectStartOffset to a page.
2795 // Large objects do not move during garbage collections.
2796 
2797 class LargeObjectSpace : public Space {
2798  public:
2799  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2800  virtual ~LargeObjectSpace() {}
2801 
2802  // Initializes internal data structures.
2803  bool SetUp();
2804 
2805  // Releases internal resources, frees objects in this space.
2806  void TearDown();
2807 
2808  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2809  if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2810  return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2811  }
2812 
2813  // Shared implementation of AllocateRaw, AllocateRawCode and
2814  // AllocateRawFixedArray.
2815  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2817 
2818  // Available bytes for objects in this space.
2819  inline intptr_t Available();
2820 
2821  virtual intptr_t Size() {
2822  return size_;
2823  }
2824 
2825  virtual intptr_t SizeOfObjects() {
2826  return objects_size_;
2827  }
2828 
2830  return maximum_committed_;
2831  }
2832 
2833  intptr_t CommittedMemory() {
2834  return Size();
2835  }
2836 
2837  // Approximate amount of physical memory committed for this space.
2838  size_t CommittedPhysicalMemory();
2839 
2840  int PageCount() {
2841  return page_count_;
2842  }
2843 
2844  // Finds an object for a given address, returns Failure::Exception()
2845  // if it is not found. The function iterates through all objects in this
2846  // space, may be slow.
2847  MaybeObject* FindObject(Address a);
2848 
2849  // Finds a large object page containing the given address, returns NULL
2850  // if such a page doesn't exist.
2852 
2853  // Frees unmarked objects.
2854  void FreeUnmarkedObjects();
2855 
2856  // Checks whether a heap object is in this space; O(1).
2857  bool Contains(HeapObject* obj);
2858 
2859  // Checks whether the space is empty.
2860  bool IsEmpty() { return first_page_ == NULL; }
2861 
2862  LargePage* first_page() { return first_page_; }
2863 
2864 #ifdef VERIFY_HEAP
2865  virtual void Verify();
2866 #endif
2867 
2868 #ifdef DEBUG
2869  virtual void Print();
2870  void ReportStatistics();
2871  void CollectCodeStatistics();
2872 #endif
2873  // Checks whether an address is in the object area in this space. It
2874  // iterates all objects in the space. May be slow.
2875  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2876 
2877  private:
2878  intptr_t max_capacity_;
2879  intptr_t maximum_committed_;
2880  // The head of the linked list of large object chunks.
2881  LargePage* first_page_;
2882  intptr_t size_; // allocated bytes
2883  int page_count_; // number of chunks
2884  intptr_t objects_size_; // size of objects
2885  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2886  HashMap chunk_map_;
2887 
2888  friend class LargeObjectIterator;
2889 
2890  public:
2891  TRACK_MEMORY("LargeObjectSpace")
2892 };
2893 
2894 
2896  public:
2897  explicit LargeObjectIterator(LargeObjectSpace* space);
2899 
2900  HeapObject* Next();
2901 
2902  // implementation of ObjectIterator.
2903  virtual HeapObject* next_object() { return Next(); }
2904 
2905  private:
2906  LargePage* current_;
2907  HeapObjectCallback size_func_;
2908 };
2909 
2910 
2911 // Iterates over the chunks (pages and large object pages) that can contain
2912 // pointers to new space.
2913 class PointerChunkIterator BASE_EMBEDDED {
2914  public:
2915  inline explicit PointerChunkIterator(Heap* heap);
2916 
2917  // Return NULL when the iterator is done.
2919  switch (state_) {
2920  case kOldPointerState: {
2921  if (old_pointer_iterator_.has_next()) {
2922  return old_pointer_iterator_.next();
2923  }
2924  state_ = kMapState;
2925  // Fall through.
2926  }
2927  case kMapState: {
2928  if (map_iterator_.has_next()) {
2929  return map_iterator_.next();
2930  }
2931  state_ = kLargeObjectState;
2932  // Fall through.
2933  }
2934  case kLargeObjectState: {
2935  HeapObject* heap_object;
2936  do {
2937  heap_object = lo_iterator_.Next();
2938  if (heap_object == NULL) {
2939  state_ = kFinishedState;
2940  return NULL;
2941  }
2942  // Fixed arrays are the only pointer-containing objects in large
2943  // object space.
2944  } while (!heap_object->IsFixedArray());
2945  MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2946  return answer;
2947  }
2948  case kFinishedState:
2949  return NULL;
2950  default:
2951  break;
2952  }
2953  UNREACHABLE();
2954  return NULL;
2955  }
2956 
2957 
2958  private:
2959  enum State {
2960  kOldPointerState,
2961  kMapState,
2962  kLargeObjectState,
2963  kFinishedState
2964  };
2965  State state_;
2966  PageIterator old_pointer_iterator_;
2967  PageIterator map_iterator_;
2968  LargeObjectIterator lo_iterator_;
2969 };
2970 
2971 
2972 #ifdef DEBUG
2973 struct CommentStatistic {
2974  const char* comment;
2975  int size;
2976  int count;
2977  void Clear() {
2978  comment = NULL;
2979  size = 0;
2980  count = 0;
2981  }
2982  // Must be small, since an iteration is used for lookup.
2983  static const int kMaxComments = 64;
2984 };
2985 #endif
2986 
2987 
2988 } } // namespace v8::internal
2989 
2990 #endif // V8_SPACES_H_
byte * Address
Definition: globals.h:186
Address FromSpaceEnd()
Definition: spaces.h:2561
void increment_number(int num)
Definition: spaces.h:2003
INLINE(Address limit()) const
Definition: spaces.h:1375
static const int kHeaderSize
Definition: objects.h:4653
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
intptr_t MaximumCommittedMemory()
Definition: spaces.h:1745
void ClearEvacuationCandidate()
Definition: spaces.h:684
static bool OnSamePage(Address address1, Address address2)
Definition: spaces.h:2104
bool CommitRawMemory(Address start, size_t length)
Definition: spaces.cc:248
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:639
virtual intptr_t Size()
Definition: spaces.h:2821
static const size_t kSlotsBufferOffset
Definition: spaces.h:575
Space(Heap *heap, AllocationSpace id, Executability executable)
Definition: spaces.h:895
#define SLOW_ASSERT(condition)
Definition: checks.h:306
void ResetUnsweptFreeBytes()
Definition: spaces.h:1918
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:4038
void Allocate(int bytes)
Definition: spaces.h:1849
static const int kEvacuationCandidateMask
Definition: spaces.h:430
intptr_t Concatenate(FreeListCategory *category)
Definition: spaces.cc:2076
void set_next_page(Page *page)
Definition: spaces-inl.h:250
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Definition: spaces.cc:887
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:797
static int CellsForLength(int length)
Definition: spaces.h:181
bool GrowTo(int new_capacity)
Definition: spaces.cc:1587
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1994
void RepairFreeList(Heap *heap)
Definition: spaces.cc:2188
static int SizeFor(int cells_count)
Definition: spaces.h:189
static int CodePageAreaSize()
Definition: spaces.h:1188
void RepairFreeListsAfterBoot()
Definition: spaces.cc:2578
INLINE(Address top()) const
Definition: spaces.h:1359
static void PrintWord(uint32_t word, uint32_t himask=0)
Definition: spaces.h:225
FreeListCategory * medium_list()
Definition: spaces.h:1666
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:217
intptr_t Available()
Definition: spaces.h:1783
size_t MaximumCommittedMemory()
Definition: spaces.h:2259
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:463
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:2008
Address FromSpacePageHigh()
Definition: spaces.h:2559
CellType * cell()
Definition: spaces.h:130
static FreeListNode * cast(MaybeObject *maybe)
Definition: spaces.h:1523
bool Contains(Address addr)
Definition: spaces.h:377
friend class PageIterator
Definition: spaces.h:1993
void PrintF(const char *format,...)
Definition: v8utils.cc:40
virtual intptr_t SizeOfObjects()
Definition: spaces.h:913
void increment_bytes(int size)
Definition: spaces.h:2006
bool was_swept_conservatively()
Definition: spaces.h:1883
void RepairLists(Heap *heap)
Definition: spaces.cc:2476
void set_next(FreeListNode *next)
Definition: spaces.cc:2060
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:817
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2467
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.h:2676
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:277
V8_INLINE bool IsOutsideAllocatedSpace(const void *address) const
Definition: spaces.h:1118
bool SetUp(const size_t requested_size)
Definition: spaces.cc:136
bool CommitArea(size_t requested)
Definition: spaces.cc:513
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:186
virtual intptr_t Waste()
Definition: spaces.h:1797
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2855
intptr_t EvictFreeListItemsInList(Page *p)
Definition: spaces.cc:2107
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2844
void set_top(Address top)
Definition: spaces.h:2498
static const int kMaxMapPageIndex
Definition: spaces.h:2709
bool IsLeftOfProgressBar(Object **slot)
Definition: spaces.h:550
#define ASSERT_NOT_NULL(p)
Definition: checks.h:343
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1888
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2868
Address FromSpaceStart()
Definition: spaces.h:2560
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
Definition: spaces.h:2522
static const intptr_t kSizeOffset
Definition: spaces.h:568
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:288
Address age_mark()
Definition: spaces.h:2513
INLINE(static uint32_t IndexToCell(uint32_t index))
Definition: spaces.h:193
void TakeControl(VirtualMemory *from)
Definition: platform.h:481
void set_name(const char *name)
Definition: spaces.h:2026
void ResetAllocationInfo()
Definition: spaces.cc:1359
void AddObject(Address addr, int size)
Definition: spaces.h:1037
Address * allocation_top_address()
Definition: spaces.h:2534
bool UncommitRawMemory(Address start, size_t length)
Definition: spaces.cc:253
INLINE(void set_limit(Address limit))
Definition: spaces.h:1369
Address space_start()
Definition: spaces.h:2169
intptr_t SizeOfFirstPage()
Definition: spaces.cc:1050
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
intptr_t available_in_small_free_list_
Definition: spaces.h:736
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:43
AllocationAction
Definition: v8.h:4032
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2620
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:728
const int kBitsPerByteLog2
Definition: globals.h:288
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:391
#define ASSERT(condition)
Definition: checks.h:329
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:357
static const uint32_t kBitsPerCell
Definition: spaces.h:168
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:557
void ClearFlag(int flag)
Definition: spaces.h:444
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
Definition: spaces.cc:586
#define ASSERT_PAGE_OFFSET(offset)
Definition: spaces.h:108
const int kPointerSizeLog2
Definition: globals.h:281
bool FromSpaceContains(Object *o)
Definition: spaces.h:2578
void DecrementUnsweptFreeBytes(intptr_t by)
Definition: spaces.h:1909
FreeListNode * next()
Definition: spaces.cc:2036
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:739
static const int kFlagsOffset
Definition: spaces.h:655
bool WasSweptConservatively()
Definition: spaces.h:835
const intptr_t kCodeAlignment
Definition: v8globals.h:58
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:218
#define POINTER_SIZE_ALIGN(value)
Definition: v8globals.h:390
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1978
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:266
NewSpacePage * current_page()
Definition: spaces.h:2228
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:277
virtual HeapObject * next_object()
Definition: spaces.h:2343
intptr_t EffectiveCapacity()
Definition: spaces.h:2449
static const int kSize
Definition: objects.h:9548
virtual HeapObject * next_object()
Definition: spaces.h:1294
Address OffsetToAddress(int offset)
Definition: spaces.h:806
static const int kPageSize
Definition: spaces.h:814
uint32_t CellType
Definition: spaces.h:125
INLINE(static Bitmap *FromAddress(Address addr))
Definition: spaces.h:213
static bool IsAlignedToPageSize(Address a)
Definition: spaces.h:795
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
CodeRange(Isolate *isolate)
Definition: spaces.cc:127
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:321
static bool IsAtEnd(Address addr)
Definition: spaces.h:2080
virtual intptr_t Size()=0
static const size_t kLength
Definition: spaces.h:174
void ClearSweptConservatively()
Definition: spaces.h:842
const int kIntSize
Definition: globals.h:263
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2897
friend class SemiSpace
Definition: spaces.h:2125
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2829
bool CommitMemory(Address addr, size_t size, Executability executable)
Definition: spaces.cc:310
FreeListNode ** GetEndAddress()
Definition: spaces.h:1566
bool ContainsPageFreeListItems(Page *p)
Definition: spaces.cc:2468
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1904
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
MarkBit Next()
Definition: spaces.h:145
AllocationStats accounting_stats_
Definition: spaces.h:1959
void Free(MemoryChunk *chunk)
Definition: spaces.cc:751
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:368
Executability executable()
Definition: spaces.h:903
NewSpacePage * first_page()
Definition: spaces.h:2227
SlotsBuffer * slots_buffer_
Definition: spaces.h:723
intptr_t AvailableExecutable()
Definition: spaces.h:1103
size_t CommittedPhysicalMemory()
Definition: spaces.h:697
void ReleasePage(Page *page, bool unlink)
Definition: spaces.cc:1118
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:837
void ClearSweptPrecisely()
Definition: spaces.h:841
void set_available(int available)
Definition: spaces.h:1572
FreeList(PagedSpace *owner)
Definition: spaces.cc:2202
#define UNREACHABLE()
Definition: checks.h:52
static const uint32_t kBytesPerCellLog2
Definition: spaces.h:172
FreeListCategory * small_list()
Definition: spaces.h:1665
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:382
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void AllocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1466
static const size_t kHeaderSize
Definition: spaces.h:580
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:248
virtual HeapObject * next_object()=0
void SetFlagTo(int flag, bool value)
Definition: spaces.h:448
LargePage * FindPage(Address a)
Definition: spaces.cc:3012
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2860
intptr_t non_available_small_blocks_
Definition: spaces.h:740
FreeListNode ** next_address()
Definition: spaces.cc:2049
Address * allocation_top_address()
Definition: spaces.h:1804
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2583
intptr_t CommittedMemory()
Definition: spaces.h:1742
const intptr_t kFailureTagMask
Definition: v8globals.h:64
bool Contains(Address a)
Definition: spaces.h:2427
#define MUST_USE_RESULT
Definition: globals.h:381
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool Contains(Address a)
Definition: spaces-inl.h:179
FreeListNode * PickNodeFromList(int *node_size)
Definition: spaces.cc:2139
Address ToSpaceEnd()
Definition: spaces.h:2565
void SetFlag(int flag)
Definition: spaces.h:440
intptr_t CommittedMemory()
Definition: spaces.h:2461
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2208
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:577
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2089
Address ToSpaceStart()
Definition: spaces.h:2564
friend class NewSpacePageIterator
Definition: spaces.h:2298
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:790
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2549
static int CodePageAreaStartOffset()
Definition: spaces.cc:873
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2702
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2616
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:915
bool ContainsLimit(Address addr)
Definition: spaces.h:384
const int kPointerSize
Definition: globals.h:268
virtual intptr_t Size()
Definition: spaces.h:2438
bool IsFlagSet(int flag)
Definition: spaces.h:456
INLINE(void set_top(Address top))
Definition: spaces.h:1353
intptr_t OffsetFrom(T x)
Definition: utils.h:120
void MarkEvacuationCandidate()
Definition: spaces.h:679
static int CodePageGuardSize()
Definition: spaces.cc:868
bool IsAligned(T value, U alignment)
Definition: utils.h:211
void InitializeReservedMemory()
Definition: spaces.h:353
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2568
size_t CommittedPhysicalMemory()
Definition: spaces.cc:984
intptr_t Available()
Definition: spaces.h:2476
bool Contains(Object *o)
Definition: spaces.h:2432
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:198
intptr_t EvictFreeListItems(Page *p)
Definition: spaces.cc:2451
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1913
void AddToAccountingStats(intptr_t bytes)
Definition: spaces.h:1775
intptr_t AtomicWord
Definition: atomicops.h:79
bool WasSweptPrecisely()
Definition: spaces.h:834
int Free(Address start, int size_in_bytes)
Definition: spaces.cc:2226
bool ShouldSkipEvacuationSlotRecording()
Definition: spaces.h:659
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:363
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
intptr_t available_in_large_free_list_
Definition: spaces.h:738
bool ContainsPageFreeListItemsInList(Page *p)
Definition: spaces.cc:2129
size_t size() const
Definition: spaces.h:595
void IncreaseCapacity(int size)
Definition: spaces.cc:1113
SemiSpaceId id()
Definition: spaces.h:2254
static const int kSize
Definition: objects.h:6440
void set_top(FreeListNode *top)
Definition: spaces.h:1562
void set_age_mark(Address mark)
Definition: spaces.h:2515
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1986
bool contains(Address address)
Definition: spaces.h:960
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2747
const int kBitsPerByte
Definition: globals.h:287
void set_prev_page(Page *page)
Definition: spaces-inl.h:256
#define TRACK_MEMORY(name)
Definition: v8globals.h:414
bool IsPowerOf2(T x)
Definition: utils.h:51
static const intptr_t kAlignmentMask
Definition: spaces.h:566
bool WasSwept()
Definition: spaces.h:836
#define BASE_EMBEDDED
Definition: allocation.h:68
static int CodePageAreaEndOffset()
Definition: spaces.cc:880
bool FromSpaceContains(Address address)
Definition: spaces.h:2570
bool ToSpaceContains(Address address)
Definition: spaces.h:2567
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1821
HeapObject * GetObject()
Definition: spaces.h:872
static const uint32_t kBytesPerCell
Definition: spaces.h:171
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void MarkSweptConservatively()
Definition: spaces.h:839
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:163
void IncrementUnsweptFreeBytes(intptr_t by)
Definition: spaces.h:1900
bool ToSpaceContains(Object *o)
Definition: spaces.h:2577
intptr_t HeapObjectTagMask()
Definition: checks.cc:44
MaybeObject * FindObject(Address a)
Definition: spaces.cc:3003
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:601
FreeListCategory * large_list()
Definition: spaces.h:1667
VirtualMemory reservation_
Definition: spaces.h:712
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
FreeListCategory * huge_list()
Definition: spaces.h:1668
static const int kObjectStartOffset
Definition: spaces.h:592
void ExpandSpace(int size_in_bytes)
Definition: spaces.h:1447
bool SlowContains(Address addr)
Definition: spaces.h:2875
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:2065
bool Contains(HeapObject *obj)
Definition: spaces.cc:3083
FreeList * free_list()
Definition: spaces.h:1949
Space * owner() const
Definition: spaces.h:332
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1686
void set_progress_bar(int progress_bar)
Definition: spaces.h:538
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:419
void Print(uint32_t pos, uint32_t cell)
Definition: spaces.h:237
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:433
void MarkSweptPrecisely()
Definition: spaces.h:838
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1786
INLINE(static Page *FromAllocationTop(Address top))
Definition: spaces.h:783
static const intptr_t kLiveBytesOffset
Definition: spaces.h:570
intptr_t Capacity()
Definition: spaces.h:2455
bool Contains(HeapObject *o)
Definition: spaces.h:1722
void set_write_barrier_counter(int counter)
Definition: spaces.h:529
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2808
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:490
PropertyCellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2769
INLINE(int Offset(Address a))
Definition: spaces.h:800
Address * allocation_limit_address()
Definition: spaces.h:1809
void set_next_page(NewSpacePage *page)
Definition: spaces.h:2057
void ShrinkSpace(int size_in_bytes)
Definition: spaces.h:1459
bool UncommitFromSpace()
Definition: spaces.h:2615
AtomicWord parallel_sweeping_
Definition: spaces.h:733
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2099
virtual intptr_t Size()
Definition: spaces.h:1788
Heap * heap() const
Definition: spaces.h:900
Page * prev_page()
Definition: spaces-inl.h:244
bool Contains(Address a)
Definition: spaces.h:2205
void IncrementLiveBytes(int by)
Definition: spaces.h:510
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1509
void SetTopAndLimit(Address top, Address limit)
Definition: spaces.h:1832
CellType mask()
Definition: spaces.h:131
SemiSpace * semi_space()
Definition: spaces.h:2069
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:675
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1504
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:2140
NewSpacePage * next_page() const
Definition: spaces.h:2053
bool IsLazySweepingComplete()
Definition: spaces.h:1929
virtual HeapObject * next_object()
Definition: spaces.h:2903
INLINE(static uint32_t CellToIndex(uint32_t index))
Definition: spaces.h:197
void Free(FreeListNode *node, int size_in_bytes)
Definition: spaces.cc:2178
static const intptr_t kAlignment
Definition: spaces.h:563
void set_owner(Space *space)
Definition: spaces.h:342
static bool IsSeq(uint32_t cell)
Definition: spaces.h:267
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
Definition: spaces.cc:211
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:925
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2743
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:804
static const int kAreaSize
Definition: spaces.h:2051
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:2046
LargePage * next_page() const
Definition: spaces.h:876
static const int kPointersFromHereAreInterestingMask
Definition: spaces.h:427
static const int kBodyOffset
Definition: spaces.h:585
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:328
bool IsEvacuationCandidate()
Definition: spaces.h:657
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:937
T RoundDown(T x, intptr_t m)
Definition: utils.h:136
static const int kSize
Definition: objects.h:9599
void set_size(size_t size)
Definition: spaces.h:597
virtual intptr_t Size()
Definition: spaces.h:2218
Address MarkbitIndexToAddress(uint32_t index)
Definition: spaces.h:646
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:562
void set_next_page(LargePage *page)
Definition: spaces.h:880
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1884
MarkBit(CellType *cell, CellType mask, bool data_only)
Definition: spaces.h:127
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
INLINE(MarkBit::CellType *cells())
Definition: spaces.h:205
INLINE(Address MarkbitIndexToAddress(uint32_t index))
Definition: spaces.h:2529
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1371
static const uint32_t kBitIndexMask
Definition: spaces.h:170
Page * next_page()
Definition: spaces-inl.h:238
SemiSpace * active_space()
Definition: spaces.h:2624
ObjectSpace
Definition: v8.h:4019
const int kFailureTag
Definition: v8globals.h:62
INLINE(Address address())
Definition: spaces.h:209
intptr_t write_barrier_counter_
Definition: spaces.h:725
void set_age_mark(Address mark)
Definition: spaces.cc:1715
static int CodePageGuardStartOffset()
Definition: spaces.cc:861
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void SetPagesToSweep(Page *first)
Definition: spaces.h:1894
NewSpacePage * prev_page() const
Definition: spaces.h:2061
HeapObject * obj
const int kPageSizeBits
Definition: v8globals.h:95
SkipList * skip_list()
Definition: spaces.h:663
FreeListNode * end() const
Definition: spaces.h:1567
intptr_t unswept_free_bytes_
Definition: spaces.h:1979
INLINE(static Page *FromAddress(Address a))
Definition: spaces.h:775
Address * allocation_limit_address()
Definition: spaces.h:2539
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:776
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2711
AllocationInfo allocation_info_
Definition: spaces.h:1968
const char * name()
Definition: spaces.h:2025
INLINE(static uint32_t CellAlignIndex(uint32_t index))
Definition: spaces.h:201
static const uint32_t kBitsPerCellLog2
Definition: spaces.h:169
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:258
FreeListNode * top() const
Definition: spaces.h:1558
static const int kPointersToHereAreInterestingMask
Definition: spaces.h:424
static const int kObjectStartAlignment
Definition: spaces.h:591
Executability executable()
Definition: spaces.h:606
void set_store_buffer_counter(int counter)
Definition: spaces.h:373
NewSpace(Heap *heap)
Definition: spaces.h:2396
void ResetFreeListStatistics()
Definition: spaces.cc:1104
#define FRAGMENTATION_STATS_ACCESSORS(type, name)
Definition: spaces.h:846
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
Definition: spaces.h:217
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1220
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:355
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1576
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:996
#define CODE_POINTER_ALIGN(value)
Definition: v8globals.h:394
static bool IsAtStart(Address addr)
Definition: spaces.h:2075
bool EnsureSweeperProgress(intptr_t size_in_bytes)
Definition: spaces.cc:2632
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2825
static void Update(Address addr, int size)
Definition: spaces.h:1049
intptr_t available_in_medium_free_list_
Definition: spaces.h:737
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1623
void set_end(FreeListNode *end)
Definition: spaces.h:1568
void DeallocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1472
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
bool Contains(Object *o)
Definition: spaces.h:2212
void ResetFreeListStatistics()
Definition: spaces.cc:719
ParallelSweepingState parallel_sweeping()
Definition: spaces.h:485
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2935
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:463
#define V8_INLINE
Definition: v8config.h:316
void WasteBytes(int size_in_bytes)
Definition: spaces.h:1478
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:635
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
SkipList * skip_list_
Definition: spaces.h:724
uintptr_t mask()
Definition: spaces.h:2520
void set_skip_list(SkipList *skip_list)
Definition: spaces.h:667
MemoryChunk * prev_chunk() const
Definition: spaces.h:320
intptr_t available_in_huge_free_list_
Definition: spaces.h:739
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:324
VirtualMemory * reserved_memory()
Definition: spaces.h:349
Address StartFor(Address addr)
Definition: spaces.h:1033
static const size_t kSize
Definition: spaces.h:177
SlotsBuffer * slots_buffer()
Definition: spaces.h:671
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2242
intptr_t available()
Definition: spaces.h:1629
MemoryChunk * next()
Definition: spaces.h:2918
static int RegionNumber(Address addr)
Definition: spaces.h:1045
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:826
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
Definition: spaces.cc:1096
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2610
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2376
AllocationSpace identity()
Definition: spaces.h:906
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2774
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2651
MemoryChunk * next_chunk() const
Definition: spaces.h:316
Address FromSpacePageLow()
Definition: spaces.h:2558
virtual ~Space()
Definition: spaces.h:898