v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_
30 
31 #include "allocation.h"
32 #include "hashmap.h"
33 #include "list.h"
34 #include "log.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 class Isolate;
40 
41 // -----------------------------------------------------------------------------
42 // Heap structures:
43 //
44 // A JS heap consists of a young generation, an old generation, and a large
45 // object space. The young generation is divided into two semispaces. A
46 // scavenger implements Cheney's copying algorithm. The old generation is
47 // separated into a map space and an old object space. The map space contains
48 // all (and only) map objects, the rest of old objects go into the old space.
49 // The old generation is collected by a mark-sweep-compact collector.
50 //
51 // The semispaces of the young generation are contiguous. The old and map
52 // spaces consists of a list of pages. A page has a page header and an object
53 // area.
54 //
55 // There is a separate large object space for objects larger than
56 // Page::kMaxHeapObjectSize, so that they do not have to move during
57 // collection. The large object space is paged. Pages in large object space
58 // may be larger than the page size.
59 //
60 // A store-buffer based write barrier is used to keep track of intergenerational
61 // references. See store-buffer.h.
62 //
63 // During scavenges and mark-sweep collections we sometimes (after a store
64 // buffer overflow) iterate intergenerational pointers without decoding heap
65 // object maps so if the page belongs to old pointer space or large object
66 // space it is essential to guarantee that the page does not contain any
67 // garbage pointers to new space: every pointer aligned word which satisfies
68 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
69 // new space. Thus objects in old pointer and large object spaces should have a
70 // special layout (e.g. no bare integer fields). This requirement does not
71 // apply to map space which is iterated in a special fashion. However we still
72 // require pointer fields of dead maps to be cleaned.
73 //
74 // To enable lazy cleaning of old space pages we can mark chunks of the page
75 // as being garbage. Garbage sections are marked with a special map. These
76 // sections are skipped when scanning the page, even if we are otherwise
77 // scanning without regard for object boundaries. Garbage sections are chained
78 // together to form a free list after a GC. Garbage sections created outside
79 // of GCs by object trunctation etc. may not be in the free list chain. Very
80 // small free spaces are ignored, they need only be cleaned of bogus pointers
81 // into new space.
82 //
83 // Each page may have up to one special garbage section. The start of this
84 // section is denoted by the top field in the space. The end of the section
85 // is denoted by the limit field in the space. This special garbage section
86 // is not marked with a free space map in the data. The point of this section
87 // is to enable linear allocation without having to constantly update the byte
88 // array every time the top field is updated and a new object is created. The
89 // special garbage section is not in the chain of garbage sections.
90 //
91 // Since the top and limit fields are in the space, not the page, only one page
92 // has a special garbage section, and if the top and limit are equal then there
93 // is no special garbage section.
94 
95 // Some assertion macros used in the debugging mode.
96 
97 #define ASSERT_PAGE_ALIGNED(address) \
98  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
99 
100 #define ASSERT_OBJECT_ALIGNED(address) \
101  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
102 
103 #define ASSERT_MAP_ALIGNED(address) \
104  ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
105 
106 #define ASSERT_OBJECT_SIZE(size) \
107  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
108 
109 #define ASSERT_PAGE_OFFSET(offset) \
110  ASSERT((Page::kObjectStartOffset <= offset) \
111  && (offset <= Page::kPageSize))
112 
113 #define ASSERT_MAP_PAGE_INDEX(index) \
114  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
115 
116 
117 class PagedSpace;
118 class MemoryAllocator;
119 class AllocationInfo;
120 class Space;
121 class FreeList;
122 class MemoryChunk;
123 
124 class MarkBit {
125  public:
126  typedef uint32_t CellType;
127 
129  : cell_(cell), mask_(mask), data_only_(data_only) { }
130 
131  inline CellType* cell() { return cell_; }
132  inline CellType mask() { return mask_; }
133 
134 #ifdef DEBUG
135  bool operator==(const MarkBit& other) {
136  return cell_ == other.cell_ && mask_ == other.mask_;
137  }
138 #endif
139 
140  inline void Set() { *cell_ |= mask_; }
141  inline bool Get() { return (*cell_ & mask_) != 0; }
142  inline void Clear() { *cell_ &= ~mask_; }
143 
144  inline bool data_only() { return data_only_; }
145 
146  inline MarkBit Next() {
147  CellType new_mask = mask_ << 1;
148  if (new_mask == 0) {
149  return MarkBit(cell_ + 1, 1, data_only_);
150  } else {
151  return MarkBit(cell_, new_mask, data_only_);
152  }
153  }
154 
155  private:
156  CellType* cell_;
157  CellType mask_;
158  // This boolean indicates that the object is in a data-only space with no
159  // pointers. This enables some optimizations when marking.
160  // It is expected that this field is inlined and turned into control flow
161  // at the place where the MarkBit object is created.
162  bool data_only_;
163 };
164 
165 
166 // Bitmap is a sequence of cells each containing fixed number of bits.
167 class Bitmap {
168  public:
169  static const uint32_t kBitsPerCell = 32;
170  static const uint32_t kBitsPerCellLog2 = 5;
171  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
172  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
174 
175  static const size_t kLength =
176  (1 << kPageSizeBits) >> (kPointerSizeLog2);
177 
178  static const size_t kSize =
180 
181 
182  static int CellsForLength(int length) {
183  return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
184  }
185 
186  int CellsCount() {
187  return CellsForLength(kLength);
188  }
189 
190  static int SizeFor(int cells_count) {
191  return sizeof(MarkBit::CellType) * cells_count;
192  }
193 
194  INLINE(static uint32_t IndexToCell(uint32_t index)) {
195  return index >> kBitsPerCellLog2;
196  }
197 
198  INLINE(static uint32_t CellToIndex(uint32_t index)) {
199  return index << kBitsPerCellLog2;
200  }
201 
202  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
203  return (index + kBitIndexMask) & ~kBitIndexMask;
204  }
205 
207  return reinterpret_cast<MarkBit::CellType*>(this);
208  }
209 
210  INLINE(Address address()) {
211  return reinterpret_cast<Address>(this);
212  }
213 
214  INLINE(static Bitmap* FromAddress(Address addr)) {
215  return reinterpret_cast<Bitmap*>(addr);
216  }
217 
218  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
219  MarkBit::CellType mask = 1 << (index & kBitIndexMask);
220  MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
221  return MarkBit(cell, mask, data_only);
222  }
223 
224  static inline void Clear(MemoryChunk* chunk);
225 
226  static void PrintWord(uint32_t word, uint32_t himask = 0) {
227  for (uint32_t mask = 1; mask != 0; mask <<= 1) {
228  if ((mask & himask) != 0) PrintF("[");
229  PrintF((mask & word) ? "1" : "0");
230  if ((mask & himask) != 0) PrintF("]");
231  }
232  }
233 
234  class CellPrinter {
235  public:
236  CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
237 
238  void Print(uint32_t pos, uint32_t cell) {
239  if (cell == seq_type) {
240  seq_length++;
241  return;
242  }
243 
244  Flush();
245 
246  if (IsSeq(cell)) {
247  seq_start = pos;
248  seq_length = 0;
249  seq_type = cell;
250  return;
251  }
252 
253  PrintF("%d: ", pos);
254  PrintWord(cell);
255  PrintF("\n");
256  }
257 
258  void Flush() {
259  if (seq_length > 0) {
260  PrintF("%d: %dx%d\n",
261  seq_start,
262  seq_type == 0 ? 0 : 1,
263  seq_length * kBitsPerCell);
264  seq_length = 0;
265  }
266  }
267 
268  static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
269 
270  private:
271  uint32_t seq_start;
272  uint32_t seq_type;
273  uint32_t seq_length;
274  };
275 
276  void Print() {
277  CellPrinter printer;
278  for (int i = 0; i < CellsCount(); i++) {
279  printer.Print(i, cells()[i]);
280  }
281  printer.Flush();
282  PrintF("\n");
283  }
284 
285  bool IsClean() {
286  for (int i = 0; i < CellsCount(); i++) {
287  if (cells()[i] != 0) return false;
288  }
289  return true;
290  }
291 };
292 
293 
294 class SkipList;
295 class SlotsBuffer;
296 
297 // MemoryChunk represents a memory region owned by a specific space.
298 // It is divided into the header and the body. Chunk start is always
299 // 1MB aligned. Start of the body is aligned so it can accommodate
300 // any heap object.
301 class MemoryChunk {
302  public:
303  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
305  return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
306  }
307 
308  // Only works for addresses in pointer spaces, not data or code spaces.
309  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
310 
311  Address address() { return reinterpret_cast<Address>(this); }
312 
313  bool is_valid() { return address() != NULL; }
314 
315  MemoryChunk* next_chunk() const { return next_chunk_; }
316  MemoryChunk* prev_chunk() const { return prev_chunk_; }
317 
318  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
319  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
320 
321  Space* owner() const {
322  if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
323  kFailureTag) {
324  return reinterpret_cast<Space*>(owner_ - kFailureTag);
325  } else {
326  return NULL;
327  }
328  }
329 
330  void set_owner(Space* space) {
331  ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
332  owner_ = reinterpret_cast<Address>(space) + kFailureTag;
333  ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
334  kFailureTag);
335  }
336 
338  return &reservation_;
339  }
340 
343  }
344 
345  void set_reserved_memory(VirtualMemory* reservation) {
346  ASSERT_NOT_NULL(reservation);
347  reservation_.TakeControl(reservation);
348  }
349 
351  void initialize_scan_on_scavenge(bool scan) {
352  if (scan) {
354  } else {
356  }
357  }
358  inline void set_scan_on_scavenge(bool scan);
359 
361  void set_store_buffer_counter(int counter) {
362  store_buffer_counter_ = counter;
363  }
364 
365  bool Contains(Address addr) {
366  return addr >= area_start() && addr < area_end();
367  }
368 
369  // Checks whether addr can be a limit of addresses in this page.
370  // It's a limit if it's in the page, or if it's just after the
371  // last byte of the page.
372  bool ContainsLimit(Address addr) {
373  return addr >= area_start() && addr <= area_end();
374  }
375 
382  IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
383  IN_TO_SPACE, // All pages in new space has one of these two set.
388 
389  // Pages swept precisely can be iterated, hitting only the live objects.
390  // Whereas those swept conservatively cannot be iterated over. Both flags
391  // indicate that marking bits have been cleared by the sweeper, otherwise
392  // marking bits are still intact.
395 
396  // Last flag, keep at bottom.
398  };
399 
400 
403 
406 
407  static const int kEvacuationCandidateMask =
409 
411  (1 << EVACUATION_CANDIDATE) |
412  (1 << RESCAN_ON_EVACUATION) |
413  (1 << IN_FROM_SPACE) |
414  (1 << IN_TO_SPACE);
415 
416 
417  void SetFlag(int flag) {
418  flags_ |= static_cast<uintptr_t>(1) << flag;
419  }
420 
421  void ClearFlag(int flag) {
422  flags_ &= ~(static_cast<uintptr_t>(1) << flag);
423  }
424 
425  void SetFlagTo(int flag, bool value) {
426  if (value) {
427  SetFlag(flag);
428  } else {
429  ClearFlag(flag);
430  }
431  }
432 
433  bool IsFlagSet(int flag) {
434  return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
435  }
436 
437  // Set or clear multiple flags at a time. The flags in the mask
438  // are set to the value in "flags", the rest retain the current value
439  // in flags_.
440  void SetFlags(intptr_t flags, intptr_t mask) {
441  flags_ = (flags_ & ~mask) | (flags & mask);
442  }
443 
444  // Return all current flags.
445  intptr_t GetFlags() { return flags_; }
446 
447  // Manage live byte count (count of bytes known to be live,
448  // because they are marked black).
449  void ResetLiveBytes() {
450  if (FLAG_gc_verbose) {
451  PrintF("ResetLiveBytes:%p:%x->0\n",
452  static_cast<void*>(this), live_byte_count_);
453  }
454  live_byte_count_ = 0;
455  }
456  void IncrementLiveBytes(int by) {
457  if (FLAG_gc_verbose) {
458  printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
459  static_cast<void*>(this), live_byte_count_,
460  ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
461  live_byte_count_ + by);
462  }
463  live_byte_count_ += by;
464  ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
465  }
466  int LiveBytes() {
467  ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
468  return live_byte_count_;
469  }
470 
471  static void IncrementLiveBytesFromGC(Address address, int by) {
473  }
474 
475  static void IncrementLiveBytesFromMutator(Address address, int by);
476 
477  static const intptr_t kAlignment =
478  (static_cast<uintptr_t>(1) << kPageSizeBits);
479 
480  static const intptr_t kAlignmentMask = kAlignment - 1;
481 
482  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
483 
484  static const intptr_t kLiveBytesOffset =
488 
490 
491  static const size_t kHeaderSize =
493 
494  static const int kBodyOffset =
496 
497  // The start offset of the object area in a page. Aligned to both maps and
498  // code alignment to be suitable for both. Also aligned to 32 words because
499  // the marking bitmap is arranged in 32 bit chunks.
500  static const int kObjectStartAlignment = 32 * kPointerSize;
501  static const int kObjectStartOffset = kBodyOffset - 1 +
503 
504  size_t size() const { return size_; }
505 
506  void set_size(size_t size) {
507  size_ = size;
508  }
509 
513  }
514 
517  }
518 
521  }
522 
523  bool InNewSpace() {
524  return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
525  }
526 
527  bool InToSpace() {
528  return IsFlagSet(IN_TO_SPACE);
529  }
530 
531  bool InFromSpace() {
532  return IsFlagSet(IN_FROM_SPACE);
533  }
534 
535  // ---------------------------------------------------------------------
536  // Markbits support
537 
538  inline Bitmap* markbits() {
539  return Bitmap::FromAddress(address() + kHeaderSize);
540  }
541 
542  void PrintMarkbits() { markbits()->Print(); }
543 
544  inline uint32_t AddressToMarkbitIndex(Address addr) {
545  return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
546  }
547 
548  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
549  const intptr_t offset =
550  reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
551 
552  return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
553  }
554 
555  inline Address MarkbitIndexToAddress(uint32_t index) {
556  return this->address() + (index << kPointerSizeLog2);
557  }
558 
559  void InsertAfter(MemoryChunk* other);
560  void Unlink();
561 
562  inline Heap* heap() { return heap_; }
563 
564  static const int kFlagsOffset = kPointerSize * 3;
565 
567 
570  }
571 
572  inline SkipList* skip_list() {
573  return skip_list_;
574  }
575 
578  }
579 
581  return slots_buffer_;
582  }
583 
585  return &slots_buffer_;
586  }
587 
591  }
592 
596  }
597 
599  Address area_end() { return area_end_; }
600  int area_size() {
601  return static_cast<int>(area_end() - area_start());
602  }
603 
604  protected:
607  size_t size_;
608  intptr_t flags_;
609 
610  // Start and end of allocatable memory on this chunk.
613 
614  // If the chunk needs to remember its memory reservation, it is stored here.
616  // The identity of the owning space. This is tagged as a failure pointer, but
617  // no failure can be in an object, so this can be distinguished from any entry
618  // in a fixed array.
621  // Used by the store buffer to keep track of which pages to mark scan-on-
622  // scavenge.
624  // Count of bytes marked black on page.
628 
629  static MemoryChunk* Initialize(Heap* heap,
630  Address base,
631  size_t size,
635  Space* owner);
636 
637  friend class MemoryAllocator;
638 };
639 
640 
642 
643 
644 // -----------------------------------------------------------------------------
645 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
646 //
647 // The only way to get a page pointer is by calling factory methods:
648 // Page* p = Page::FromAddress(addr); or
649 // Page* p = Page::FromAllocationTop(top);
650 class Page : public MemoryChunk {
651  public:
652  // Returns the page containing a given address. The address ranges
653  // from [page_addr .. page_addr + kPageSize[
654  // This only works if the object is in fact in a page. See also MemoryChunk::
655  // FromAddress() and FromAnyAddress().
657  return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
658  }
659 
660  // Returns the page containing an allocation top. Because an allocation
661  // top address can be the upper bound of the page, we need to subtract
662  // it with kPointerSize first. The address ranges from
663  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
664  INLINE(static Page* FromAllocationTop(Address top)) {
665  Page* p = FromAddress(top - kPointerSize);
666  return p;
667  }
668 
669  // Returns the next page in the chain of pages owned by a space.
670  inline Page* next_page();
671  inline Page* prev_page();
672  inline void set_next_page(Page* page);
673  inline void set_prev_page(Page* page);
674 
675  // Checks whether an address is page aligned.
676  static bool IsAlignedToPageSize(Address a) {
677  return 0 == (OffsetFrom(a) & kPageAlignmentMask);
678  }
679 
680  // Returns the offset of a given address to this page.
682  int offset = static_cast<int>(a - address());
683  return offset;
684  }
685 
686  // Returns the address for a given offset to the this page.
687  Address OffsetToAddress(int offset) {
688  ASSERT_PAGE_OFFSET(offset);
689  return address() + offset;
690  }
691 
692  // ---------------------------------------------------------------------
693 
694  // Page size in bytes. This must be a multiple of the OS page size.
695  static const int kPageSize = 1 << kPageSizeBits;
696 
697  // Object area size in bytes.
699 
700  // Maximum object size that fits in a page.
702 
703  // Page size mask.
704  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
705 
706  inline void ClearGCFields();
707 
708  static inline Page* Initialize(Heap* heap,
709  MemoryChunk* chunk,
711  PagedSpace* owner);
712 
714 
718 
721 
724 
725 #ifdef DEBUG
726  void Print();
727 #endif // DEBUG
728 
729  friend class MemoryAllocator;
730 };
731 
732 
734 
735 
736 class LargePage : public MemoryChunk {
737  public:
740  }
741 
742  inline LargePage* next_page() const {
743  return static_cast<LargePage*>(next_chunk());
744  }
745 
746  inline void set_next_page(LargePage* page) {
747  set_next_chunk(page);
748  }
749  private:
750  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
751 
752  friend class MemoryAllocator;
753 };
754 
756 
757 // ----------------------------------------------------------------------------
758 // Space is the abstract superclass for all allocation spaces.
759 class Space : public Malloced {
760  public:
762  : heap_(heap), id_(id), executable_(executable) {}
763 
764  virtual ~Space() {}
765 
766  Heap* heap() const { return heap_; }
767 
768  // Does the space need executable memory?
769  Executability executable() { return executable_; }
770 
771  // Identity used in error reporting.
772  AllocationSpace identity() { return id_; }
773 
774  // Returns allocated size.
775  virtual intptr_t Size() = 0;
776 
777  // Returns size of objects. Can differ from the allocated size
778  // (e.g. see LargeObjectSpace).
779  virtual intptr_t SizeOfObjects() { return Size(); }
780 
781  virtual int RoundSizeDownToObjectAlignment(int size) {
782  if (id_ == CODE_SPACE) {
783  return RoundDown(size, kCodeAlignment);
784  } else {
785  return RoundDown(size, kPointerSize);
786  }
787  }
788 
789 #ifdef DEBUG
790  virtual void Print() = 0;
791 #endif
792 
793  // After calling this we can allocate a certain number of bytes using only
794  // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
795  // without using freelists or causing a GC. This is used by partial
796  // snapshots. It returns true of space was reserved or false if a GC is
797  // needed. For paged spaces the space requested must include the space wasted
798  // at the end of each when allocating linearly.
799  virtual bool ReserveSpace(int bytes) = 0;
800 
801  private:
802  Heap* heap_;
803  AllocationSpace id_;
804  Executability executable_;
805 };
806 
807 
808 // ----------------------------------------------------------------------------
809 // All heap objects containing executable code (code objects) must be allocated
810 // from a 2 GB range of memory, so that they can call each other using 32-bit
811 // displacements. This happens automatically on 32-bit platforms, where 32-bit
812 // displacements cover the entire 4GB virtual address space. On 64-bit
813 // platforms, we support this using the CodeRange object, which reserves and
814 // manages a range of virtual memory.
815 class CodeRange {
816  public:
817  explicit CodeRange(Isolate* isolate);
819 
820  // Reserves a range of virtual memory, but does not commit any of it.
821  // Can only be called once, at heap initialization time.
822  // Returns false on failure.
823  bool SetUp(const size_t requested_size);
824 
825  // Frees the range of virtual memory, and frees the data structures used to
826  // manage it.
827  void TearDown();
828 
829  bool exists() { return this != NULL && code_range_ != NULL; }
830  bool contains(Address address) {
831  if (this == NULL || code_range_ == NULL) return false;
832  Address start = static_cast<Address>(code_range_->address());
833  return start <= address && address < start + code_range_->size();
834  }
835 
836  // Allocates a chunk of memory from the large-object portion of
837  // the code range. On platforms with no separate code range, should
838  // not be called.
839  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
840  size_t* allocated);
841  void FreeRawMemory(Address buf, size_t length);
842 
843  private:
844  Isolate* isolate_;
845 
846  // The reserved range of virtual memory that all code objects are put in.
847  VirtualMemory* code_range_;
848  // Plain old data class, just a struct plus a constructor.
849  class FreeBlock {
850  public:
851  FreeBlock(Address start_arg, size_t size_arg)
852  : start(start_arg), size(size_arg) {
854  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
855  }
856  FreeBlock(void* start_arg, size_t size_arg)
857  : start(static_cast<Address>(start_arg)), size(size_arg) {
859  ASSERT(size >= static_cast<size_t>(Page::kPageSize));
860  }
861 
862  Address start;
863  size_t size;
864  };
865 
866  // Freed blocks of memory are added to the free list. When the allocation
867  // list is exhausted, the free list is sorted and merged to make the new
868  // allocation list.
869  List<FreeBlock> free_list_;
870  // Memory is allocated from the free blocks on the allocation list.
871  // The block at current_allocation_block_index_ is the current block.
872  List<FreeBlock> allocation_list_;
873  int current_allocation_block_index_;
874 
875  // Finds a block on the allocation list that contains at least the
876  // requested amount of memory. If none is found, sorts and merges
877  // the existing free memory blocks, and searches again.
878  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
879  void GetNextAllocationBlock(size_t requested);
880  // Compares the start addresses of two free blocks.
881  static int CompareFreeBlockAddress(const FreeBlock* left,
882  const FreeBlock* right);
883 
884  DISALLOW_COPY_AND_ASSIGN(CodeRange);
885 };
886 
887 
888 class SkipList {
889  public:
891  Clear();
892  }
893 
894  void Clear() {
895  for (int idx = 0; idx < kSize; idx++) {
896  starts_[idx] = reinterpret_cast<Address>(-1);
897  }
898  }
899 
901  return starts_[RegionNumber(addr)];
902  }
903 
904  void AddObject(Address addr, int size) {
905  int start_region = RegionNumber(addr);
906  int end_region = RegionNumber(addr + size - kPointerSize);
907  for (int idx = start_region; idx <= end_region; idx++) {
908  if (starts_[idx] > addr) starts_[idx] = addr;
909  }
910  }
911 
912  static inline int RegionNumber(Address addr) {
913  return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
914  }
915 
916  static void Update(Address addr, int size) {
917  Page* page = Page::FromAddress(addr);
918  SkipList* list = page->skip_list();
919  if (list == NULL) {
920  list = new SkipList();
921  page->set_skip_list(list);
922  }
923 
924  list->AddObject(addr, size);
925  }
926 
927  private:
928  static const int kRegionSizeLog2 = 13;
929  static const int kRegionSize = 1 << kRegionSizeLog2;
930  static const int kSize = Page::kPageSize / kRegionSize;
931 
932  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
933 
934  Address starts_[kSize];
935 };
936 
937 
938 // ----------------------------------------------------------------------------
939 // A space acquires chunks of memory from the operating system. The memory
940 // allocator allocated and deallocates pages for the paged heap spaces and large
941 // pages for large object space.
942 //
943 // Each space has to manage it's own pages.
944 //
946  public:
947  explicit MemoryAllocator(Isolate* isolate);
948 
949  // Initializes its internal bookkeeping structures.
950  // Max capacity of the total space and executable memory limit.
951  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
952 
953  void TearDown();
954 
956  intptr_t size, PagedSpace* owner, Executability executable);
957 
959  intptr_t object_size, Space* owner, Executability executable);
960 
961  void Free(MemoryChunk* chunk);
962 
963  // Returns the maximum available bytes of heaps.
964  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
965 
966  // Returns allocated spaces in bytes.
967  intptr_t Size() { return size_; }
968 
969  // Returns the maximum available executable bytes of heaps.
970  intptr_t AvailableExecutable() {
971  if (capacity_executable_ < size_executable_) return 0;
972  return capacity_executable_ - size_executable_;
973  }
974 
975  // Returns allocated executable spaces in bytes.
976  intptr_t SizeExecutable() { return size_executable_; }
977 
978  // Returns maximum available bytes that the old space can have.
979  intptr_t MaxAvailable() {
981  }
982 
983 #ifdef DEBUG
984  // Reports statistic info of the space.
985  void ReportStatistics();
986 #endif
987 
988  MemoryChunk* AllocateChunk(intptr_t body_size,
989  Executability executable,
990  Space* space);
991 
992  Address ReserveAlignedMemory(size_t requested,
993  size_t alignment,
994  VirtualMemory* controller);
995  Address AllocateAlignedMemory(size_t requested,
996  size_t alignment,
997  Executability executable,
998  VirtualMemory* controller);
999 
1000  void FreeMemory(VirtualMemory* reservation, Executability executable);
1001  void FreeMemory(Address addr, size_t size, Executability executable);
1002 
1003  // Commit a contiguous block of memory from the initial chunk. Assumes that
1004  // the address is not NULL, the size is greater than zero, and that the
1005  // block is contained in the initial chunk. Returns true if it succeeded
1006  // and false otherwise.
1007  bool CommitBlock(Address start, size_t size, Executability executable);
1008 
1009  // Uncommit a contiguous block of memory [start..(start+size)[.
1010  // start is not NULL, the size is greater than zero, and the
1011  // block is contained in the initial chunk. Returns true if it succeeded
1012  // and false otherwise.
1013  bool UncommitBlock(Address start, size_t size);
1014 
1015  // Zaps a contiguous block of memory [start..(start+size)[ thus
1016  // filling it up with a recognizable non-NULL bit pattern.
1017  void ZapBlock(Address start, size_t size);
1018 
1020  AllocationAction action,
1021  size_t size);
1022 
1024  ObjectSpace space,
1025  AllocationAction action);
1026 
1028  MemoryAllocationCallback callback);
1029 
1031  MemoryAllocationCallback callback);
1032 
1033  static int CodePageGuardStartOffset();
1034 
1035  static int CodePageGuardSize();
1036 
1037  static int CodePageAreaStartOffset();
1038 
1039  static int CodePageAreaEndOffset();
1040 
1041  static int CodePageAreaSize() {
1043  }
1044 
1046  Address start,
1047  size_t size);
1048 
1049  private:
1050  Isolate* isolate_;
1051 
1052  // Maximum space size in bytes.
1053  size_t capacity_;
1054  // Maximum subset of capacity_ that can be executable
1055  size_t capacity_executable_;
1056 
1057  // Allocated space size in bytes.
1058  size_t size_;
1059  // Allocated executable space size in bytes.
1060  size_t size_executable_;
1061 
1062  struct MemoryAllocationCallbackRegistration {
1063  MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1064  ObjectSpace space,
1065  AllocationAction action)
1066  : callback(callback), space(space), action(action) {
1067  }
1068  MemoryAllocationCallback callback;
1069  ObjectSpace space;
1070  AllocationAction action;
1071  };
1072 
1073  // A List of callback that are triggered when memory is allocated or free'd
1074  List<MemoryAllocationCallbackRegistration>
1075  memory_allocation_callbacks_;
1076 
1077  // Initializes pages in a chunk. Returns the first page address.
1078  // This function and GetChunkId() are provided for the mark-compact
1079  // collector to rebuild page headers in the from space, which is
1080  // used as a marking stack and its page headers are destroyed.
1081  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1082  PagedSpace* owner);
1083 
1084  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1085 };
1086 
1087 
1088 // -----------------------------------------------------------------------------
1089 // Interface for heap object iterator to be implemented by all object space
1090 // object iterators.
1091 //
1092 // NOTE: The space specific object iterators also implements the own next()
1093 // method which is used to avoid using virtual functions
1094 // iterating a specific space.
1095 
1096 class ObjectIterator : public Malloced {
1097  public:
1098  virtual ~ObjectIterator() { }
1099 
1100  virtual HeapObject* next_object() = 0;
1101 };
1102 
1103 
1104 // -----------------------------------------------------------------------------
1105 // Heap object iterator in new/old/map spaces.
1106 //
1107 // A HeapObjectIterator iterates objects from the bottom of the given space
1108 // to its top or from the bottom of the given page to its top.
1109 //
1110 // If objects are allocated in the page during iteration the iterator may
1111 // or may not iterate over those objects. The caller must create a new
1112 // iterator in order to be sure to visit these new objects.
1114  public:
1115  // Creates a new object iterator in a given space.
1116  // If the size function is not given, the iterator calls the default
1117  // Object::Size().
1118  explicit HeapObjectIterator(PagedSpace* space);
1120  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1121 
1122  // Advance to the next object, skipping free spaces and other fillers and
1123  // skipping the special garbage section of which there is one per space.
1124  // Returns NULL when the iteration has ended.
1125  inline HeapObject* Next() {
1126  do {
1127  HeapObject* next_obj = FromCurrentPage();
1128  if (next_obj != NULL) return next_obj;
1129  } while (AdvanceToNextPage());
1130  return NULL;
1131  }
1132 
1134  return Next();
1135  }
1136 
1137  private:
1138  enum PageMode { kOnePageOnly, kAllPagesInSpace };
1139 
1140  Address cur_addr_; // Current iteration point.
1141  Address cur_end_; // End iteration point.
1142  HeapObjectCallback size_func_; // Size function or NULL.
1143  PagedSpace* space_;
1144  PageMode page_mode_;
1145 
1146  // Fast (inlined) path of next().
1147  inline HeapObject* FromCurrentPage();
1148 
1149  // Slow path of next(), goes into the next page. Returns false if the
1150  // iteration has ended.
1151  bool AdvanceToNextPage();
1152 
1153  // Initializes fields.
1154  inline void Initialize(PagedSpace* owner,
1155  Address start,
1156  Address end,
1157  PageMode mode,
1158  HeapObjectCallback size_func);
1159 };
1160 
1161 
1162 // -----------------------------------------------------------------------------
1163 // A PageIterator iterates the pages in a paged space.
1164 
1165 class PageIterator BASE_EMBEDDED {
1166  public:
1167  explicit inline PageIterator(PagedSpace* space);
1168 
1169  inline bool has_next();
1170  inline Page* next();
1171 
1172  private:
1173  PagedSpace* space_;
1174  Page* prev_page_; // Previous page returned.
1175  // Next page that will be returned. Cached here so that we can use this
1176  // iterator for operations that deallocate pages.
1177  Page* next_page_;
1178 };
1179 
1180 
1181 // -----------------------------------------------------------------------------
1182 // A space has a circular list of pages. The next page can be accessed via
1183 // Page::next_page() call.
1184 
1185 // An abstraction of allocation and relocation pointers in a page-structured
1186 // space.
1188  public:
1190  }
1191 
1192  Address top; // Current allocation top.
1193  Address limit; // Current allocation limit.
1194 
1195 #ifdef DEBUG
1196  bool VerifyPagedAllocation() {
1197  return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1198  && (top <= limit);
1199  }
1200 #endif
1201 };
1202 
1203 
1204 // An abstraction of the accounting statistics of a page-structured space.
1205 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1206 // including page bookkeeping structures) currently in the space. The 'size'
1207 // of a space is the number of allocated bytes, the 'waste' in the space is
1208 // the number of bytes that are not allocated and not available to
1209 // allocation without reorganizing the space via a GC (e.g. small blocks due
1210 // to internal fragmentation, top of page areas in map space), and the bytes
1211 // 'available' is the number of unallocated bytes that are not waste. The
1212 // capacity is the sum of size, waste, and available.
1213 //
1214 // The stats are only set by functions that ensure they stay balanced. These
1215 // functions increase or decrease one of the non-capacity stats in
1216 // conjunction with capacity, or else they always balance increases and
1217 // decreases to the non-capacity stats.
1218 class AllocationStats BASE_EMBEDDED {
1219  public:
1220  AllocationStats() { Clear(); }
1221 
1222  // Zero out all the allocation statistics (i.e., no capacity).
1223  void Clear() {
1224  capacity_ = 0;
1225  size_ = 0;
1226  waste_ = 0;
1227  }
1228 
1230  size_ = capacity_;
1231  waste_ = 0;
1232  }
1233 
1234  // Reset the allocation statistics (i.e., available = capacity with no
1235  // wasted or allocated bytes).
1236  void Reset() {
1237  size_ = 0;
1238  waste_ = 0;
1239  }
1240 
1241  // Accessors for the allocation statistics.
1242  intptr_t Capacity() { return capacity_; }
1243  intptr_t Size() { return size_; }
1244  intptr_t Waste() { return waste_; }
1245 
1246  // Grow the space by adding available bytes. They are initially marked as
1247  // being in use (part of the size), but will normally be immediately freed,
1248  // putting them on the free list and removing them from size_.
1249  void ExpandSpace(int size_in_bytes) {
1250  capacity_ += size_in_bytes;
1251  size_ += size_in_bytes;
1252  ASSERT(size_ >= 0);
1253  }
1254 
1255  // Shrink the space by removing available bytes. Since shrinking is done
1256  // during sweeping, bytes have been marked as being in use (part of the size)
1257  // and are hereby freed.
1258  void ShrinkSpace(int size_in_bytes) {
1259  capacity_ -= size_in_bytes;
1260  size_ -= size_in_bytes;
1261  ASSERT(size_ >= 0);
1262  }
1263 
1264  // Allocate from available bytes (available -> size).
1265  void AllocateBytes(intptr_t size_in_bytes) {
1266  size_ += size_in_bytes;
1267  ASSERT(size_ >= 0);
1268  }
1269 
1270  // Free allocated bytes, making them available (size -> available).
1271  void DeallocateBytes(intptr_t size_in_bytes) {
1272  size_ -= size_in_bytes;
1273  ASSERT(size_ >= 0);
1274  }
1275 
1276  // Waste free bytes (available -> waste).
1277  void WasteBytes(int size_in_bytes) {
1278  size_ -= size_in_bytes;
1279  waste_ += size_in_bytes;
1280  ASSERT(size_ >= 0);
1281  }
1282 
1283  private:
1284  intptr_t capacity_;
1285  intptr_t size_;
1286  intptr_t waste_;
1287 };
1288 
1289 
1290 // -----------------------------------------------------------------------------
1291 // Free lists for old object spaces
1292 //
1293 // Free-list nodes are free blocks in the heap. They look like heap objects
1294 // (free-list node pointers have the heap object tag, and they have a map like
1295 // a heap object). They have a size and a next pointer. The next pointer is
1296 // the raw address of the next free list node (or NULL).
1297 class FreeListNode: public HeapObject {
1298  public:
1299  // Obtain a free-list node from a raw address. This is not a cast because
1300  // it does not check nor require that the first word at the address is a map
1301  // pointer.
1303  return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1304  }
1305 
1306  static inline bool IsFreeListNode(HeapObject* object);
1307 
1308  // Set the size in bytes, which can be read with HeapObject::Size(). This
1309  // function also writes a map to the first word of the block so that it
1310  // looks like a heap object to the garbage collector and heap iteration
1311  // functions.
1312  void set_size(Heap* heap, int size_in_bytes);
1313 
1314  // Accessors for the next field.
1315  inline FreeListNode* next();
1316  inline FreeListNode** next_address();
1317  inline void set_next(FreeListNode* next);
1318 
1319  inline void Zap();
1320 
1321  private:
1322  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1323 
1324  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1325 };
1326 
1327 
1328 // The free list for the old space. The free list is organized in such a way
1329 // as to encourage objects allocated around the same time to be near each
1330 // other. The normal way to allocate is intended to be by bumping a 'top'
1331 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1332 // find a new space to allocate from. This is done with the free list, which
1333 // is divided up into rough categories to cut down on waste. Having finer
1334 // categories would scatter allocation more.
1335 
1336 // The old space free list is organized in categories.
1337 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1338 // They can be reclaimed by the compactor. However the distance between top
1339 // and limit may be this small.
1340 // 32-255 words: There is a list of spaces this large. It is used for top and
1341 // limit when the object we need to allocate is 1-31 words in size. These
1342 // spaces are called small.
1343 // 256-2047 words: There is a list of spaces this large. It is used for top and
1344 // limit when the object we need to allocate is 32-255 words in size. These
1345 // spaces are called medium.
1346 // 1048-16383 words: There is a list of spaces this large. It is used for top
1347 // and limit when the object we need to allocate is 256-2047 words in size.
1348 // These spaces are call large.
1349 // At least 16384 words. This list is for objects of 2048 words or larger.
1350 // Empty pages are added to this list. These spaces are called huge.
1351 class FreeList BASE_EMBEDDED {
1352  public:
1353  explicit FreeList(PagedSpace* owner);
1354 
1355  // Clear the free list.
1356  void Reset();
1357 
1358  // Return the number of bytes available on the free list.
1359  intptr_t available() { return available_; }
1360 
1361  // Place a node on the free list. The block of size 'size_in_bytes'
1362  // starting at 'start' is placed on the free list. The return value is the
1363  // number of bytes that have been lost due to internal fragmentation by
1364  // freeing the block. Bookkeeping information will be written to the block,
1365  // i.e., its contents will be destroyed. The start address should be word
1366  // aligned, and the size should be a non-zero multiple of the word size.
1367  int Free(Address start, int size_in_bytes);
1368 
1369  // Allocate a block of size 'size_in_bytes' from the free list. The block
1370  // is unitialized. A failure is returned if no block is available. The
1371  // number of bytes lost to fragmentation is returned in the output parameter
1372  // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1373  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1374 
1375 #ifdef DEBUG
1376  void Zap();
1377  static intptr_t SumFreeList(FreeListNode* node);
1378  static int FreeListLength(FreeListNode* cur);
1379  intptr_t SumFreeLists();
1380  bool IsVeryLong();
1381 #endif
1382 
1383  struct SizeStats {
1384  intptr_t Total() {
1385  return small_size_ + medium_size_ + large_size_ + huge_size_;
1386  }
1387 
1388  intptr_t small_size_;
1389  intptr_t medium_size_;
1390  intptr_t large_size_;
1391  intptr_t huge_size_;
1392  };
1393 
1394  void CountFreeListItems(Page* p, SizeStats* sizes);
1395 
1396  intptr_t EvictFreeListItems(Page* p);
1397 
1398  private:
1399  // The size range of blocks, in bytes.
1400  static const int kMinBlockSize = 3 * kPointerSize;
1401  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1402 
1403  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1404 
1405  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1406 
1407  PagedSpace* owner_;
1408  Heap* heap_;
1409 
1410  // Total available bytes in all blocks on this free list.
1411  int available_;
1412 
1413  static const int kSmallListMin = 0x20 * kPointerSize;
1414  static const int kSmallListMax = 0xff * kPointerSize;
1415  static const int kMediumListMax = 0x7ff * kPointerSize;
1416  static const int kLargeListMax = 0x3fff * kPointerSize;
1417  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1418  static const int kMediumAllocationMax = kSmallListMax;
1419  static const int kLargeAllocationMax = kMediumListMax;
1420  FreeListNode* small_list_;
1421  FreeListNode* medium_list_;
1422  FreeListNode* large_list_;
1423  FreeListNode* huge_list_;
1424 
1426 };
1427 
1428 
1429 class PagedSpace : public Space {
1430  public:
1431  // Creates a space with a maximum capacity, and an id.
1432  PagedSpace(Heap* heap,
1433  intptr_t max_capacity,
1434  AllocationSpace id,
1436 
1437  virtual ~PagedSpace() {}
1438 
1439  // Set up the space using the given address range of virtual memory (from
1440  // the memory allocator's initial chunk) if possible. If the block of
1441  // addresses is not big enough to contain a single page-aligned page, a
1442  // fresh chunk will be allocated.
1443  bool SetUp();
1444 
1445  // Returns true if the space has been successfully set up and not
1446  // subsequently torn down.
1447  bool HasBeenSetUp();
1448 
1449  // Cleans up the space, frees all pages in this space except those belonging
1450  // to the initial chunk, uncommits addresses in the initial chunk.
1451  void TearDown();
1452 
1453  // Checks whether an object/address is in this space.
1454  inline bool Contains(Address a);
1455  bool Contains(HeapObject* o) { return Contains(o->address()); }
1456 
1457  // Given an address occupied by a live object, return that object if it is
1458  // in this space, or Failure::Exception() if it is not. The implementation
1459  // iterates over objects in the page containing the address, the cost is
1460  // linear in the number of objects in the page. It may be slow.
1461  MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1462 
1463  // Prepares for a mark-compact GC.
1464  virtual void PrepareForMarkCompact();
1465 
1466  // Current capacity without growing (Size() + Available()).
1467  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1468 
1469  // Total amount of memory committed for this space. For paged
1470  // spaces this equals the capacity.
1471  intptr_t CommittedMemory() { return Capacity(); }
1472 
1473  // Sets the capacity, the available space and the wasted space to zero.
1474  // The stats are rebuilt during sweeping by adding each page to the
1475  // capacity and the size when it is encountered. As free spaces are
1476  // discovered during the sweeping they are subtracted from the size and added
1477  // to the available and wasted totals.
1478  void ClearStats() {
1479  accounting_stats_.ClearSizeWaste();
1480  }
1481 
1482  // Available bytes without growing. These are the bytes on the free list.
1483  // The bytes in the linear allocation area are not included in this total
1484  // because updating the stats would slow down allocation. New pages are
1485  // immediately added to the free list so they show up here.
1486  intptr_t Available() { return free_list_.available(); }
1487 
1488  // Allocated bytes in this space. Garbage bytes that were not found due to
1489  // lazy sweeping are counted as being allocated! The bytes in the current
1490  // linear allocation area (between top and limit) are also counted here.
1491  virtual intptr_t Size() { return accounting_stats_.Size(); }
1492 
1493  // As size, but the bytes in lazily swept pages are estimated and the bytes
1494  // in the current linear allocation area are not included.
1495  virtual intptr_t SizeOfObjects() {
1497  return Size() - unswept_free_bytes_ - (limit() - top());
1498  }
1499 
1500  // Wasted bytes in this space. These are just the bytes that were thrown away
1501  // due to being too small to use for allocation. They do not include the
1502  // free bytes that were not found at all due to lazy sweeping.
1503  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1504 
1505  // Returns the allocation pointer in this space.
1508 
1509  // Allocate the requested number of bytes in the space if possible, return a
1510  // failure object if not.
1511  MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1512 
1513  virtual bool ReserveSpace(int bytes);
1514 
1515  // Give a block of memory to the space's free list. It might be added to
1516  // the free list or accounted as waste.
1517  // If add_to_freelist is false then just accounting stats are updated and
1518  // no attempt to add area to free list is made.
1519  int Free(Address start, int size_in_bytes) {
1520  int wasted = free_list_.Free(start, size_in_bytes);
1521  accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1522  return size_in_bytes - wasted;
1523  }
1524 
1525  void ResetFreeList() {
1526  free_list_.Reset();
1527  }
1528 
1529  // Set space allocation info.
1531  ASSERT(top == limit ||
1532  Page::FromAddress(top) == Page::FromAddress(limit - 1));
1535  }
1536 
1537  void Allocate(int bytes) {
1538  accounting_stats_.AllocateBytes(bytes);
1539  }
1540 
1541  void IncreaseCapacity(int size) {
1542  accounting_stats_.ExpandSpace(size);
1543  }
1544 
1545  // Releases an unused page and shrinks the space.
1546  void ReleasePage(Page* page);
1547 
1548  // Releases all of the unused pages.
1549  void ReleaseAllUnusedPages();
1550 
1551  // The dummy page that anchors the linked list of pages.
1552  Page* anchor() { return &anchor_; }
1553 
1554 #ifdef DEBUG
1555  // Print meta info and objects in this space.
1556  virtual void Print();
1557 
1558  // Verify integrity of this space.
1559  virtual void Verify(ObjectVisitor* visitor);
1560 
1561  // Reports statistics for the space
1562  void ReportStatistics();
1563 
1564  // Overridden by subclasses to verify space-specific object
1565  // properties (e.g., only maps or free-list nodes are in map space).
1566  virtual void VerifyObject(HeapObject* obj) {}
1567 
1568  // Report code object related statistics
1569  void CollectCodeStatistics();
1570  static void ReportCodeStatistics();
1571  static void ResetCodeStatistics();
1572 #endif
1573 
1576 
1577  // Evacuation candidates are swept by evacuator. Needs to return a valid
1578  // result before _and_ after evacuation has finished.
1579  static bool ShouldBeSweptLazily(Page* p) {
1580  return !p->IsEvacuationCandidate() &&
1582  !p->WasSweptPrecisely();
1583  }
1584 
1585  void SetPagesToSweep(Page* first) {
1587  if (first == &anchor_) first = NULL;
1588  first_unswept_page_ = first;
1589  }
1590 
1592  unswept_free_bytes_ += by;
1593  }
1594 
1597  unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1598  }
1599 
1602  unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1603  }
1604 
1605  bool AdvanceSweeper(intptr_t bytes_to_sweep);
1606 
1608  return !first_unswept_page_->is_valid();
1609  }
1610 
1611  Page* FirstPage() { return anchor_.next_page(); }
1612  Page* LastPage() { return anchor_.prev_page(); }
1613 
1614  void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1615  free_list_.CountFreeListItems(p, sizes);
1616  }
1617 
1619 
1620  bool CanExpand();
1621 
1622  // Returns the number of total pages in this space.
1623  int CountTotalPages();
1624 
1625  // Return size of allocatable area on a page in this space.
1626  inline int AreaSize() {
1627  return area_size_;
1628  }
1629 
1630  protected:
1632 
1633  // Maximum capacity of this space.
1634  intptr_t max_capacity_;
1635 
1636  intptr_t SizeOfFirstPage();
1637 
1638  // Accounting information for this space.
1639  AllocationStats accounting_stats_;
1640 
1641  // The dummy page that anchors the double linked list of pages.
1643 
1644  // The space's free list.
1645  FreeList free_list_;
1646 
1647  // Normal allocation information.
1649 
1650  // Bytes of each page that cannot be allocated. Possibly non-zero
1651  // for pages in spaces with only fixed-size objects. Always zero
1652  // for pages in spaces with variable sized objects (those pages are
1653  // padded with free-list nodes).
1655 
1657 
1658  // The first page to be swept when the lazy sweeper advances. Is set
1659  // to NULL when all pages have been swept.
1661 
1662  // The number of free bytes which could be reclaimed by advancing the
1663  // lazy sweeper. This is only an estimation because lazy sweeping is
1664  // done conservatively.
1666 
1667  // Expands the space by allocating a fixed number of pages. Returns false if
1668  // it cannot allocate requested number of pages from OS, or if the hard heap
1669  // size limit has been hit.
1670  bool Expand();
1671 
1672  // Generic fast case allocation function that tries linear allocation at the
1673  // address denoted by top in allocation_info_.
1674  inline HeapObject* AllocateLinearly(int size_in_bytes);
1675 
1676  // Slow path of AllocateRaw. This function is space-dependent.
1677  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1678 
1679  friend class PageIterator;
1680 };
1681 
1682 
1683 class NumberAndSizeInfo BASE_EMBEDDED {
1684  public:
1685  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1686 
1687  int number() const { return number_; }
1688  void increment_number(int num) { number_ += num; }
1689 
1690  int bytes() const { return bytes_; }
1691  void increment_bytes(int size) { bytes_ += size; }
1692 
1693  void clear() {
1694  number_ = 0;
1695  bytes_ = 0;
1696  }
1697 
1698  private:
1699  int number_;
1700  int bytes_;
1701 };
1702 
1703 
1704 // HistogramInfo class for recording a single "bar" of a histogram. This
1705 // class is used for collecting statistics to print to the log file.
1706 class HistogramInfo: public NumberAndSizeInfo {
1707  public:
1708  HistogramInfo() : NumberAndSizeInfo() {}
1709 
1710  const char* name() { return name_; }
1711  void set_name(const char* name) { name_ = name; }
1712 
1713  private:
1714  const char* name_;
1715 };
1716 
1717 
1721 };
1722 
1723 
1724 class SemiSpace;
1725 
1726 
1727 class NewSpacePage : public MemoryChunk {
1728  public:
1729  // GC related flags copied from from-space to to-space when
1730  // flipping semispaces.
1731  static const intptr_t kCopyOnFlipFlagsMask =
1735 
1737 
1738  inline NewSpacePage* next_page() const {
1739  return static_cast<NewSpacePage*>(next_chunk());
1740  }
1741 
1742  inline void set_next_page(NewSpacePage* page) {
1743  set_next_chunk(page);
1744  }
1745 
1746  inline NewSpacePage* prev_page() const {
1747  return static_cast<NewSpacePage*>(prev_chunk());
1748  }
1749 
1750  inline void set_prev_page(NewSpacePage* page) {
1751  set_prev_chunk(page);
1752  }
1753 
1755  return reinterpret_cast<SemiSpace*>(owner());
1756  }
1757 
1758  bool is_anchor() { return !this->InNewSpace(); }
1759 
1760  static bool IsAtStart(Address addr) {
1761  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1762  == kObjectStartOffset;
1763  }
1764 
1765  static bool IsAtEnd(Address addr) {
1766  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1767  }
1768 
1770  return reinterpret_cast<Address>(this);
1771  }
1772 
1773  // Finds the NewSpacePage containg the given address.
1774  static inline NewSpacePage* FromAddress(Address address_in_page) {
1775  Address page_start =
1776  reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
1778  NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1779  return page;
1780  }
1781 
1782  // Find the page for a limit address. A limit address is either an address
1783  // inside a page, or the address right after the last byte of a page.
1784  static inline NewSpacePage* FromLimit(Address address_limit) {
1785  return NewSpacePage::FromAddress(address_limit - 1);
1786  }
1787 
1788  private:
1789  // Create a NewSpacePage object that is only used as anchor
1790  // for the doubly-linked list of real pages.
1791  explicit NewSpacePage(SemiSpace* owner) {
1792  InitializeAsAnchor(owner);
1793  }
1794 
1795  static NewSpacePage* Initialize(Heap* heap,
1796  Address start,
1798 
1799  // Intialize a fake NewSpacePage used as sentinel at the ends
1800  // of a doubly-linked list of real NewSpacePages.
1801  // Only uses the prev/next links, and sets flags to not be in new-space.
1802  void InitializeAsAnchor(SemiSpace* owner);
1803 
1804  friend class SemiSpace;
1805  friend class SemiSpaceIterator;
1806 };
1807 
1808 
1809 // -----------------------------------------------------------------------------
1810 // SemiSpace in young generation
1811 //
1812 // A semispace is a contiguous chunk of memory holding page-like memory
1813 // chunks. The mark-compact collector uses the memory of the first page in
1814 // the from space as a marking stack when tracing live objects.
1815 
1816 class SemiSpace : public Space {
1817  public:
1818  // Constructor.
1820  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1821  start_(NULL),
1822  age_mark_(NULL),
1823  id_(semispace),
1824  anchor_(this),
1825  current_page_(NULL) { }
1826 
1827  // Sets up the semispace using the given chunk.
1828  void SetUp(Address start, int initial_capacity, int maximum_capacity);
1829 
1830  // Tear down the space. Heap memory was not allocated by the space, so it
1831  // is not deallocated here.
1832  void TearDown();
1833 
1834  // True if the space has been set up but not torn down.
1835  bool HasBeenSetUp() { return start_ != NULL; }
1836 
1837  // Grow the semispace to the new capacity. The new capacity
1838  // requested must be larger than the current capacity and less than
1839  // the maximum capacity.
1840  bool GrowTo(int new_capacity);
1841 
1842  // Shrinks the semispace to the new capacity. The new capacity
1843  // requested must be more than the amount of used memory in the
1844  // semispace and less than the current capacity.
1845  bool ShrinkTo(int new_capacity);
1846 
1847  // Returns the start address of the first page of the space.
1849  ASSERT(anchor_.next_page() != &anchor_);
1850  return anchor_.next_page()->area_start();
1851  }
1852 
1853  // Returns the start address of the current page of the space.
1855  return current_page_->area_start();
1856  }
1857 
1858  // Returns one past the end address of the space.
1860  return anchor_.prev_page()->area_end();
1861  }
1862 
1863  // Returns one past the end address of the current page of the space.
1865  return current_page_->area_end();
1866  }
1867 
1868  bool AdvancePage() {
1869  NewSpacePage* next_page = current_page_->next_page();
1870  if (next_page == anchor()) return false;
1871  current_page_ = next_page;
1872  return true;
1873  }
1874 
1875  // Resets the space to using the first page.
1876  void Reset();
1877 
1878  // Age mark accessors.
1879  Address age_mark() { return age_mark_; }
1880  void set_age_mark(Address mark);
1881 
1882  // True if the address is in the address range of this semispace (not
1883  // necessarily below the allocation pointer).
1884  bool Contains(Address a) {
1885  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1886  == reinterpret_cast<uintptr_t>(start_);
1887  }
1888 
1889  // True if the object is a heap object in the address range of this
1890  // semispace (not necessarily below the allocation pointer).
1891  bool Contains(Object* o) {
1892  return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1893  }
1894 
1895  // If we don't have these here then SemiSpace will be abstract. However
1896  // they should never be called.
1897  virtual intptr_t Size() {
1898  UNREACHABLE();
1899  return 0;
1900  }
1901 
1902  virtual bool ReserveSpace(int bytes) {
1903  UNREACHABLE();
1904  return false;
1905  }
1906 
1907  bool is_committed() { return committed_; }
1908  bool Commit();
1909  bool Uncommit();
1910 
1911  NewSpacePage* first_page() { return anchor_.next_page(); }
1912  NewSpacePage* current_page() { return current_page_; }
1913 
1914 #ifdef DEBUG
1915  virtual void Print();
1916  virtual void Verify();
1917  // Validate a range of of addresses in a SemiSpace.
1918  // The "from" address must be on a page prior to the "to" address,
1919  // in the linked page order, or it must be earlier on the same page.
1920  static void AssertValidRange(Address from, Address to);
1921 #else
1922  // Do nothing.
1923  inline static void AssertValidRange(Address from, Address to) {}
1924 #endif
1925 
1926  // Returns the current capacity of the semi space.
1927  int Capacity() { return capacity_; }
1928 
1929  // Returns the maximum capacity of the semi space.
1930  int MaximumCapacity() { return maximum_capacity_; }
1931 
1932  // Returns the initial capacity of the semi space.
1933  int InitialCapacity() { return initial_capacity_; }
1934 
1935  SemiSpaceId id() { return id_; }
1936 
1937  static void Swap(SemiSpace* from, SemiSpace* to);
1938 
1939  private:
1940  // Flips the semispace between being from-space and to-space.
1941  // Copies the flags into the masked positions on all pages in the space.
1942  void FlipPages(intptr_t flags, intptr_t flag_mask);
1943 
1944  NewSpacePage* anchor() { return &anchor_; }
1945 
1946  // The current and maximum capacity of the space.
1947  int capacity_;
1948  int maximum_capacity_;
1949  int initial_capacity_;
1950 
1951  // The start address of the space.
1952  Address start_;
1953  // Used to govern object promotion during mark-compact collection.
1954  Address age_mark_;
1955 
1956  // Masks and comparison values to test for containment in this semispace.
1957  uintptr_t address_mask_;
1958  uintptr_t object_mask_;
1959  uintptr_t object_expected_;
1960 
1961  bool committed_;
1962  SemiSpaceId id_;
1963 
1964  NewSpacePage anchor_;
1965  NewSpacePage* current_page_;
1966 
1967  friend class SemiSpaceIterator;
1968  friend class NewSpacePageIterator;
1969  public:
1970  TRACK_MEMORY("SemiSpace")
1971 };
1972 
1973 
1974 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
1975 // semispace of the heap's new space. It iterates over the objects in the
1976 // semispace from a given start address (defaulting to the bottom of the
1977 // semispace) to the top of the semispace. New objects allocated after the
1978 // iterator is created are not iterated.
1980  public:
1981  // Create an iterator over the objects in the given space. If no start
1982  // address is given, the iterator starts from the bottom of the space. If
1983  // no size function is given, the iterator calls Object::Size().
1984 
1985  // Iterate over all of allocated to-space.
1986  explicit SemiSpaceIterator(NewSpace* space);
1987  // Iterate over all of allocated to-space, with a custome size function.
1988  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1989  // Iterate over part of allocated to-space, from start to the end
1990  // of allocation.
1991  SemiSpaceIterator(NewSpace* space, Address start);
1992  // Iterate from one address to another in the same semi-space.
1993  SemiSpaceIterator(Address from, Address to);
1994 
1996  if (current_ == limit_) return NULL;
1997  if (NewSpacePage::IsAtEnd(current_)) {
1998  NewSpacePage* page = NewSpacePage::FromLimit(current_);
1999  page = page->next_page();
2000  ASSERT(!page->is_anchor());
2001  current_ = page->area_start();
2002  if (current_ == limit_) return NULL;
2003  }
2004 
2005  HeapObject* object = HeapObject::FromAddress(current_);
2006  int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2007 
2008  current_ += size;
2009  return object;
2010  }
2011 
2012  // Implementation of the ObjectIterator functions.
2013  virtual HeapObject* next_object() { return Next(); }
2014 
2015  private:
2016  void Initialize(Address start,
2017  Address end,
2018  HeapObjectCallback size_func);
2019 
2020  // The current iteration point.
2021  Address current_;
2022  // The end of iteration.
2023  Address limit_;
2024  // The callback function.
2025  HeapObjectCallback size_func_;
2026 };
2027 
2028 
2029 // -----------------------------------------------------------------------------
2030 // A PageIterator iterates the pages in a semi-space.
2031 class NewSpacePageIterator BASE_EMBEDDED {
2032  public:
2033  // Make an iterator that runs over all pages in to-space.
2034  explicit inline NewSpacePageIterator(NewSpace* space);
2035 
2036  // Make an iterator that runs over all pages in the given semispace,
2037  // even those not used in allocation.
2038  explicit inline NewSpacePageIterator(SemiSpace* space);
2039 
2040  // Make iterator that iterates from the page containing start
2041  // to the page that contains limit in the same semispace.
2042  inline NewSpacePageIterator(Address start, Address limit);
2043 
2044  inline bool has_next();
2045  inline NewSpacePage* next();
2046 
2047  private:
2048  NewSpacePage* prev_page_; // Previous page returned.
2049  // Next page that will be returned. Cached here so that we can use this
2050  // iterator for operations that deallocate pages.
2051  NewSpacePage* next_page_;
2052  // Last page returned.
2053  NewSpacePage* last_page_;
2054 };
2055 
2056 
2057 // -----------------------------------------------------------------------------
2058 // The young generation space.
2059 //
2060 // The new space consists of a contiguous pair of semispaces. It simply
2061 // forwards most functions to the appropriate semispace.
2062 
2063 class NewSpace : public Space {
2064  public:
2065  // Constructor.
2066  explicit NewSpace(Heap* heap)
2067  : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2068  to_space_(heap, kToSpace),
2069  from_space_(heap, kFromSpace),
2070  reservation_(),
2071  inline_allocation_limit_step_(0) {}
2072 
2073  // Sets up the new space using the given chunk.
2074  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2075 
2076  // Tears down the space. Heap memory was not allocated by the space, so it
2077  // is not deallocated here.
2078  void TearDown();
2079 
2080  // True if the space has been set up but not torn down.
2081  bool HasBeenSetUp() {
2082  return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2083  }
2084 
2085  // Flip the pair of spaces.
2086  void Flip();
2087 
2088  // Grow the capacity of the semispaces. Assumes that they are not at
2089  // their maximum capacity.
2090  void Grow();
2091 
2092  // Shrink the capacity of the semispaces.
2093  void Shrink();
2094 
2095  // True if the address or object lies in the address range of either
2096  // semispace (not necessarily below the allocation pointer).
2097  bool Contains(Address a) {
2098  return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2099  == reinterpret_cast<uintptr_t>(start_);
2100  }
2101 
2102  bool Contains(Object* o) {
2103  Address a = reinterpret_cast<Address>(o);
2104  return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2105  }
2106 
2107  // Return the allocated bytes in the active semispace.
2108  virtual intptr_t Size() {
2109  return pages_used_ * NewSpacePage::kAreaSize +
2110  static_cast<int>(top() - to_space_.page_low());
2111  }
2112 
2113  // The same, but returning an int. We have to have the one that returns
2114  // intptr_t because it is inherited, but if we know we are dealing with the
2115  // new space, which can't get as big as the other spaces then this is useful:
2116  int SizeAsInt() { return static_cast<int>(Size()); }
2117 
2118  // Return the current capacity of a semispace.
2119  intptr_t EffectiveCapacity() {
2120  SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2121  return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2122  }
2123 
2124  // Return the current capacity of a semispace.
2125  intptr_t Capacity() {
2126  ASSERT(to_space_.Capacity() == from_space_.Capacity());
2127  return to_space_.Capacity();
2128  }
2129 
2130  // Return the total amount of memory committed for new space.
2131  intptr_t CommittedMemory() {
2132  if (from_space_.is_committed()) return 2 * Capacity();
2133  return Capacity();
2134  }
2135 
2136  // Return the available bytes without growing.
2137  intptr_t Available() {
2138  return Capacity() - Size();
2139  }
2140 
2141  // Return the maximum capacity of a semispace.
2143  ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2144  return to_space_.MaximumCapacity();
2145  }
2146 
2147  // Returns the initial capacity of a semispace.
2149  ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2150  return to_space_.InitialCapacity();
2151  }
2152 
2153  // Return the address of the allocation pointer in the active semispace.
2155  ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
2156  return allocation_info_.top;
2157  }
2158  // Return the address of the first object in the active semispace.
2159  Address bottom() { return to_space_.space_start(); }
2160 
2161  // Get the age mark of the inactive semispace.
2162  Address age_mark() { return from_space_.age_mark(); }
2163  // Set the age mark in the active semispace.
2164  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2165 
2166  // The start address of the space and a bit mask. Anding an address in the
2167  // new space with the mask will result in the start address.
2168  Address start() { return start_; }
2169  uintptr_t mask() { return address_mask_; }
2170 
2171  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2172  ASSERT(Contains(addr));
2174  IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2175  return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2176  }
2177 
2178  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2179  return reinterpret_cast<Address>(index << kPointerSizeLog2);
2180  }
2181 
2182  // The allocation top and limit addresses.
2183  Address* allocation_top_address() { return &allocation_info_.top; }
2184  Address* allocation_limit_address() { return &allocation_info_.limit; }
2185 
2186  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2187 
2188  // Reset the allocation pointer to the beginning of the active semispace.
2189  void ResetAllocationInfo();
2190 
2191  void LowerInlineAllocationLimit(intptr_t step) {
2192  inline_allocation_limit_step_ = step;
2193  if (step == 0) {
2194  allocation_info_.limit = to_space_.page_high();
2195  } else {
2196  allocation_info_.limit = Min(
2197  allocation_info_.top + inline_allocation_limit_step_,
2198  allocation_info_.limit);
2199  }
2200  top_on_previous_step_ = allocation_info_.top;
2201  }
2202 
2203  // Get the extent of the inactive semispace (for use as a marking stack,
2204  // or to zap it). Notice: space-addresses are not necessarily on the
2205  // same page, so FromSpaceStart() might be above FromSpaceEnd().
2206  Address FromSpacePageLow() { return from_space_.page_low(); }
2207  Address FromSpacePageHigh() { return from_space_.page_high(); }
2208  Address FromSpaceStart() { return from_space_.space_start(); }
2209  Address FromSpaceEnd() { return from_space_.space_end(); }
2210 
2211  // Get the extent of the active semispace's pages' memory.
2212  Address ToSpaceStart() { return to_space_.space_start(); }
2213  Address ToSpaceEnd() { return to_space_.space_end(); }
2214 
2215  inline bool ToSpaceContains(Address address) {
2216  return to_space_.Contains(address);
2217  }
2218  inline bool FromSpaceContains(Address address) {
2219  return from_space_.Contains(address);
2220  }
2221 
2222  // True if the object is a heap object in the address range of the
2223  // respective semispace (not necessarily below the allocation pointer of the
2224  // semispace).
2225  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2226  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2227 
2228  // Try to switch the active semispace to a new, empty, page.
2229  // Returns false if this isn't possible or reasonable (i.e., there
2230  // are no pages, or the current page is already empty), or true
2231  // if successful.
2232  bool AddFreshPage();
2233 
2234  virtual bool ReserveSpace(int bytes);
2235 
2236  // Resizes a sequential string which must be the most recent thing that was
2237  // allocated in new space.
2238  template <typename StringType>
2239  inline void ShrinkStringAtAllocationBoundary(String* string, int len);
2240 
2241 #ifdef DEBUG
2242  // Verify the active semispace.
2243  virtual void Verify();
2244  // Print the active semispace.
2245  virtual void Print() { to_space_.Print(); }
2246 #endif
2247 
2248  // Iterates the active semispace to collect statistics.
2249  void CollectStatistics();
2250  // Reports previously collected statistics of the active semispace.
2251  void ReportStatistics();
2252  // Clears previously collected statistics.
2253  void ClearHistograms();
2254 
2255  // Record the allocation or promotion of a heap object. Note that we don't
2256  // record every single allocation, but only those that happen in the
2257  // to space during a scavenge GC.
2258  void RecordAllocation(HeapObject* obj);
2259  void RecordPromotion(HeapObject* obj);
2260 
2261  // Return whether the operation succeded.
2263  if (from_space_.is_committed()) return true;
2264  return from_space_.Commit();
2265  }
2266 
2268  if (!from_space_.is_committed()) return true;
2269  return from_space_.Uncommit();
2270  }
2271 
2272  inline intptr_t inline_allocation_limit_step() {
2273  return inline_allocation_limit_step_;
2274  }
2275 
2276  SemiSpace* active_space() { return &to_space_; }
2277 
2278  private:
2279  // Update allocation info to match the current to-space page.
2280  void UpdateAllocationInfo();
2281 
2282  Address chunk_base_;
2283  uintptr_t chunk_size_;
2284 
2285  // The semispaces.
2286  SemiSpace to_space_;
2287  SemiSpace from_space_;
2288  VirtualMemory reservation_;
2289  int pages_used_;
2290 
2291  // Start address and bit mask for containment testing.
2292  Address start_;
2293  uintptr_t address_mask_;
2294  uintptr_t object_mask_;
2295  uintptr_t object_expected_;
2296 
2297  // Allocation pointer and limit for normal allocation and allocation during
2298  // mark-compact collection.
2299  AllocationInfo allocation_info_;
2300 
2301  // When incremental marking is active we will set allocation_info_.limit
2302  // to be lower than actual limit and then will gradually increase it
2303  // in steps to guarantee that we do incremental marking steps even
2304  // when all allocation is performed from inlined generated code.
2305  intptr_t inline_allocation_limit_step_;
2306 
2307  Address top_on_previous_step_;
2308 
2309  HistogramInfo* allocated_histogram_;
2310  HistogramInfo* promoted_histogram_;
2311 
2312  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2313 
2314  friend class SemiSpaceIterator;
2315 
2316  public:
2317  TRACK_MEMORY("NewSpace")
2318 };
2319 
2320 
2321 // -----------------------------------------------------------------------------
2322 // Old object space (excluding map objects)
2323 
2324 class OldSpace : public PagedSpace {
2325  public:
2326  // Creates an old space object with a given maximum capacity.
2327  // The constructor does not allocate pages from OS.
2329  intptr_t max_capacity,
2330  AllocationSpace id,
2332  : PagedSpace(heap, max_capacity, id, executable) {
2333  page_extra_ = 0;
2334  }
2335 
2336  // The limit of allocation for a page in this space.
2338  return page->area_end();
2339  }
2340 
2341  public:
2342  TRACK_MEMORY("OldSpace")
2343 };
2344 
2345 
2346 // For contiguous spaces, top should be in the space (or at the end) and limit
2347 // should be the end of the space.
2348 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2349  SLOW_ASSERT((space).page_low() <= (info).top \
2350  && (info).top <= (space).page_high() \
2351  && (info).limit <= (space).page_high())
2352 
2353 
2354 // -----------------------------------------------------------------------------
2355 // Old space for objects of a fixed size
2356 
2357 class FixedSpace : public PagedSpace {
2358  public:
2360  intptr_t max_capacity,
2361  AllocationSpace id,
2363  const char* name)
2364  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2365  object_size_in_bytes_(object_size_in_bytes),
2366  name_(name) {
2368  }
2369 
2370  // The limit of allocation for a page in this space.
2372  return page->area_end() - page_extra_;
2373  }
2374 
2375  int object_size_in_bytes() { return object_size_in_bytes_; }
2376 
2377  // Prepares for a mark-compact GC.
2378  virtual void PrepareForMarkCompact();
2379 
2380  private:
2381  // The size of objects in this space.
2382  int object_size_in_bytes_;
2383 
2384  // The name of this space.
2385  const char* name_;
2386 };
2387 
2388 
2389 // -----------------------------------------------------------------------------
2390 // Old space for all map objects
2391 
2392 class MapSpace : public FixedSpace {
2393  public:
2394  // Creates a map space object with a maximum capacity.
2395  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2396  : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
2397  max_map_space_pages_(kMaxMapPageIndex - 1) {
2398  }
2399 
2400  // Given an index, returns the page address.
2401  // TODO(1600): this limit is artifical just to keep code compilable
2402  static const int kMaxMapPageIndex = 1 << 16;
2403 
2404  virtual int RoundSizeDownToObjectAlignment(int size) {
2405  if (IsPowerOf2(Map::kSize)) {
2406  return RoundDown(size, Map::kSize);
2407  } else {
2408  return (size / Map::kSize) * Map::kSize;
2409  }
2410  }
2411 
2412  protected:
2413 #ifdef DEBUG
2414  virtual void VerifyObject(HeapObject* obj);
2415 #endif
2416 
2417  private:
2418  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2419 
2420  // Do map space compaction if there is a page gap.
2421  int CompactionThreshold() {
2422  return kMapsPerPage * (max_map_space_pages_ - 1);
2423  }
2424 
2425  const int max_map_space_pages_;
2426 
2427  public:
2428  TRACK_MEMORY("MapSpace")
2429 };
2430 
2431 
2432 // -----------------------------------------------------------------------------
2433 // Old space for all global object property cell objects
2434 
2435 class CellSpace : public FixedSpace {
2436  public:
2437  // Creates a property cell space object with a maximum capacity.
2438  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2439  : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2440  {}
2441 
2442  virtual int RoundSizeDownToObjectAlignment(int size) {
2444  return RoundDown(size, JSGlobalPropertyCell::kSize);
2445  } else {
2447  }
2448  }
2449 
2450  protected:
2451 #ifdef DEBUG
2452  virtual void VerifyObject(HeapObject* obj);
2453 #endif
2454 
2455  public:
2456  TRACK_MEMORY("CellSpace")
2457 };
2458 
2459 
2460 // -----------------------------------------------------------------------------
2461 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2462 // the large object space. A large object is allocated from OS heap with
2463 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2464 // A large object always starts at Page::kObjectStartOffset to a page.
2465 // Large objects do not move during garbage collections.
2466 
2467 class LargeObjectSpace : public Space {
2468  public:
2469  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2470  virtual ~LargeObjectSpace() {}
2471 
2472  // Initializes internal data structures.
2473  bool SetUp();
2474 
2475  // Releases internal resources, frees objects in this space.
2476  void TearDown();
2477 
2478  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2479  if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2480  return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2481  }
2482 
2483  // Shared implementation of AllocateRaw, AllocateRawCode and
2484  // AllocateRawFixedArray.
2485  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2487 
2488  // Available bytes for objects in this space.
2489  inline intptr_t Available();
2490 
2491  virtual intptr_t Size() {
2492  return size_;
2493  }
2494 
2495  virtual intptr_t SizeOfObjects() {
2496  return objects_size_;
2497  }
2498 
2499  int PageCount() {
2500  return page_count_;
2501  }
2502 
2503  // Finds an object for a given address, returns Failure::Exception()
2504  // if it is not found. The function iterates through all objects in this
2505  // space, may be slow.
2506  MaybeObject* FindObject(Address a);
2507 
2508  // Finds a large object page containing the given address, returns NULL
2509  // if such a page doesn't exist.
2511 
2512  // Frees unmarked objects.
2513  void FreeUnmarkedObjects();
2514 
2515  // Checks whether a heap object is in this space; O(1).
2516  bool Contains(HeapObject* obj);
2517 
2518  // Checks whether the space is empty.
2519  bool IsEmpty() { return first_page_ == NULL; }
2520 
2521  // See the comments for ReserveSpace in the Space class. This has to be
2522  // called after ReserveSpace has been called on the paged spaces, since they
2523  // may use some memory, leaving less for large objects.
2524  virtual bool ReserveSpace(int bytes);
2525 
2526  LargePage* first_page() { return first_page_; }
2527 
2528 #ifdef DEBUG
2529  virtual void Verify();
2530  virtual void Print();
2531  void ReportStatistics();
2532  void CollectCodeStatistics();
2533 #endif
2534  // Checks whether an address is in the object area in this space. It
2535  // iterates all objects in the space. May be slow.
2536  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2537 
2538  private:
2539  intptr_t max_capacity_;
2540  // The head of the linked list of large object chunks.
2541  LargePage* first_page_;
2542  intptr_t size_; // allocated bytes
2543  int page_count_; // number of chunks
2544  intptr_t objects_size_; // size of objects
2545  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2546  HashMap chunk_map_;
2547 
2548  friend class LargeObjectIterator;
2549 
2550  public:
2551  TRACK_MEMORY("LargeObjectSpace")
2552 };
2553 
2554 
2556  public:
2557  explicit LargeObjectIterator(LargeObjectSpace* space);
2559 
2560  HeapObject* Next();
2561 
2562  // implementation of ObjectIterator.
2563  virtual HeapObject* next_object() { return Next(); }
2564 
2565  private:
2566  LargePage* current_;
2567  HeapObjectCallback size_func_;
2568 };
2569 
2570 
2571 // Iterates over the chunks (pages and large object pages) that can contain
2572 // pointers to new space.
2573 class PointerChunkIterator BASE_EMBEDDED {
2574  public:
2575  inline explicit PointerChunkIterator(Heap* heap);
2576 
2577  // Return NULL when the iterator is done.
2579  switch (state_) {
2580  case kOldPointerState: {
2581  if (old_pointer_iterator_.has_next()) {
2582  return old_pointer_iterator_.next();
2583  }
2584  state_ = kMapState;
2585  // Fall through.
2586  }
2587  case kMapState: {
2588  if (map_iterator_.has_next()) {
2589  return map_iterator_.next();
2590  }
2591  state_ = kLargeObjectState;
2592  // Fall through.
2593  }
2594  case kLargeObjectState: {
2595  HeapObject* heap_object;
2596  do {
2597  heap_object = lo_iterator_.Next();
2598  if (heap_object == NULL) {
2599  state_ = kFinishedState;
2600  return NULL;
2601  }
2602  // Fixed arrays are the only pointer-containing objects in large
2603  // object space.
2604  } while (!heap_object->IsFixedArray());
2605  MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2606  return answer;
2607  }
2608  case kFinishedState:
2609  return NULL;
2610  default:
2611  break;
2612  }
2613  UNREACHABLE();
2614  return NULL;
2615  }
2616 
2617 
2618  private:
2619  enum State {
2620  kOldPointerState,
2621  kMapState,
2622  kLargeObjectState,
2623  kFinishedState
2624  };
2625  State state_;
2626  PageIterator old_pointer_iterator_;
2627  PageIterator map_iterator_;
2628  LargeObjectIterator lo_iterator_;
2629 };
2630 
2631 
2632 #ifdef DEBUG
2633 struct CommentStatistic {
2634  const char* comment;
2635  int size;
2636  int count;
2637  void Clear() {
2638  comment = NULL;
2639  size = 0;
2640  count = 0;
2641  }
2642  // Must be small, since an iteration is used for lookup.
2643  static const int kMaxComments = 64;
2644 };
2645 #endif
2646 
2647 
2648 } } // namespace v8::internal
2649 
2650 #endif // V8_SPACES_H_
byte * Address
Definition: globals.h:172
Address FromSpaceEnd()
Definition: spaces.h:2209
void increment_number(int num)
Definition: spaces.h:1688
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2185
static const int kHeaderSize
Definition: objects.h:3685
virtual Address PageAllocationLimit(Page *page)
Definition: spaces.h:2371
void ClearEvacuationCandidate()
Definition: spaces.h:593
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:548
virtual intptr_t Size()
Definition: spaces.h:2491
static const size_t kSlotsBufferOffset
Definition: spaces.h:489
Space(Heap *heap, AllocationSpace id, Executability executable)
Definition: spaces.h:761
void ShrinkStringAtAllocationBoundary(String *string, int len)
Definition: spaces-inl.h:338
#define SLOW_ASSERT(condition)
Definition: checks.h:276
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:2689
void Allocate(int bytes)
Definition: spaces.h:1537
void Reset()
Definition: flags.cc:1446
static const int kEvacuationCandidateMask
Definition: spaces.h:407
void set_next_page(Page *page)
Definition: spaces-inl.h:236
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:639
static int CellsForLength(int length)
Definition: spaces.h:182
bool GrowTo(int new_capacity)
Definition: spaces.cc:1387
static int SizeFor(int cells_count)
Definition: spaces.h:190
static int CodePageAreaSize()
Definition: spaces.h:1041
static void PrintWord(uint32_t word, uint32_t himask=0)
Definition: spaces.h:226
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:212
intptr_t Available()
Definition: spaces.h:1486
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1822
Address FromSpacePageHigh()
Definition: spaces.h:2207
CellType * cell()
Definition: spaces.h:131
bool Contains(Address addr)
Definition: spaces.h:365
friend class PageIterator
Definition: spaces.h:1679
FixedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, int object_size_in_bytes, const char *name)
Definition: spaces.h:2359
void PrintF(const char *format,...)
Definition: v8utils.cc:40
virtual intptr_t SizeOfObjects()
Definition: spaces.h:779
void increment_bytes(int size)
Definition: spaces.h:1691
void SetTop(Address top, Address limit)
Definition: spaces.h:1530
bool was_swept_conservatively()
Definition: spaces.h:1574
void set_next(FreeListNode *next)
Definition: spaces.cc:1874
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:659
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.h:2328
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2208
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
bool SetUp(const size_t requested_size)
Definition: spaces.cc:135
void ReleaseAllUnusedPages()
Definition: spaces.cc:953
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
virtual intptr_t Waste()
Definition: spaces.h:1503
MemoryChunk * next_chunk_
Definition: spaces.h:605
static const int kMaxMapPageIndex
Definition: spaces.h:2402
#define ASSERT_NOT_NULL(p)
Definition: checks.h:285
static bool ShouldBeSweptLazily(Page *p)
Definition: spaces.h:1579
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2547
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
Address FromSpaceStart()
Definition: spaces.h:2208
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
Definition: spaces.h:2171
static const intptr_t kSizeOffset
Definition: spaces.h:482
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:273
Address age_mark()
Definition: spaces.h:2162
INLINE(static uint32_t IndexToCell(uint32_t index))
Definition: spaces.h:194
void TakeControl(VirtualMemory *from)
Definition: platform.h:391
void set_name(const char *name)
Definition: spaces.h:1711
void ResetAllocationInfo()
Definition: spaces.cc:1189
void AddObject(Address addr, int size)
Definition: spaces.h:904
Address * allocation_top_address()
Definition: spaces.h:2183
Flag flags[]
Definition: flags.cc:1467
Address space_start()
Definition: spaces.h:1848
intptr_t SizeOfFirstPage()
Definition: spaces.cc:874
static const intptr_t kPageAlignmentMask
Definition: spaces.h:704
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
AllocationAction
Definition: v8.h:2683
virtual bool ReserveSpace(int bytes)=0
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2272
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:575
const int kBitsPerByteLog2
Definition: globals.h:252
#define ASSERT(condition)
Definition: checks.h:270
void set_reserved_memory(VirtualMemory *reservation)
Definition: spaces.h:345
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static const uint32_t kBitsPerCell
Definition: spaces.h:169
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:471
void ClearFlag(int flag)
Definition: spaces.h:421
#define ASSERT_PAGE_OFFSET(offset)
Definition: spaces.h:109
const int kPointerSizeLog2
Definition: globals.h:246
bool FromSpaceContains(Object *o)
Definition: spaces.h:2226
FreeListNode * next()
Definition: spaces.cc:1850
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:586
static const int kFlagsOffset
Definition: spaces.h:564
const char * comment() const
Definition: flags.cc:1362
bool WasSweptConservatively()
Definition: spaces.h:716
const intptr_t kCodeAlignment
Definition: v8globals.h:67
#define POINTER_SIZE_ALIGN(value)
Definition: v8globals.h:401
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1804
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:252
NewSpacePage * current_page()
Definition: spaces.h:1912
virtual bool ReserveSpace(int bytes)
Definition: spaces.h:1902
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:264
virtual HeapObject * next_object()
Definition: spaces.h:2013
intptr_t EffectiveCapacity()
Definition: spaces.h:2119
virtual HeapObject * next_object()
Definition: spaces.h:1133
Address OffsetToAddress(int offset)
Definition: spaces.h:687
static const int kPageSize
Definition: spaces.h:695
uint32_t CellType
Definition: spaces.h:126
INLINE(static Bitmap *FromAddress(Address addr))
Definition: spaces.h:214
static bool IsAlignedToPageSize(Address a)
Definition: spaces.h:676
CodeRange(Isolate *isolate)
Definition: spaces.cc:126
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
Definition: spaces.cc:356
void FreeMemory(VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
static bool IsAtEnd(Address addr)
Definition: spaces.h:1765
virtual intptr_t Size()=0
static const size_t kLength
Definition: spaces.h:175
void ClearSweptConservatively()
Definition: spaces.h:723
const int kIntSize
Definition: globals.h:231
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2576
friend class SemiSpace
Definition: spaces.h:1804
void IncrementUnsweptFreeBytes(int by)
Definition: spaces.h:1591
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1595
MarkBit Next()
Definition: spaces.h:146
AllocationStats accounting_stats_
Definition: spaces.h:1639
void Free(MemoryChunk *chunk)
Definition: spaces.cc:595
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Definition: spaces.cc:342
Executability executable()
Definition: spaces.h:769
NewSpacePage * first_page()
Definition: spaces.h:1911
SlotsBuffer * slots_buffer_
Definition: spaces.h:626
intptr_t AvailableExecutable()
Definition: spaces.h:970
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:679
void ClearSweptPrecisely()
Definition: spaces.h:722
#define UNREACHABLE()
Definition: checks.h:50
#define MAP_POINTER_ALIGN(value)
Definition: v8globals.h:405
static const uint32_t kBytesPerCellLog2
Definition: spaces.h:173
void AllocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1265
static const size_t kHeaderSize
Definition: spaces.h:491
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:245
virtual HeapObject * next_object()=0
void SetFlagTo(int flag, bool value)
Definition: spaces.h:425
LargePage * FindPage(Address a)
Definition: spaces.cc:2673
FreeListNode ** next_address()
Definition: spaces.cc:1863
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2274
intptr_t CommittedMemory()
Definition: spaces.h:1471
const intptr_t kFailureTagMask
Definition: v8globals.h:73
bool Contains(Address a)
Definition: spaces.h:2097
#define MUST_USE_RESULT
Definition: globals.h:360
bool Contains(Address a)
Definition: spaces-inl.h:178
Address ToSpaceEnd()
Definition: spaces.h:2213
void SetFlag(int flag)
Definition: spaces.h:417
intptr_t CommittedMemory()
Definition: spaces.h:2131
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:1774
Address ToSpaceStart()
Definition: spaces.h:2212
friend class NewSpacePageIterator
Definition: spaces.h:1968
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:632
virtual intptr_t SizeOfObjects()
Definition: spaces.h:1495
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2191
static int CodePageAreaStartOffset()
Definition: spaces.cc:715
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2395
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2304
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:781
bool ContainsLimit(Address addr)
Definition: spaces.h:372
const int kPointerSize
Definition: globals.h:234
virtual intptr_t Size()
Definition: spaces.h:2108
bool IsFlagSet(int flag)
Definition: spaces.h:433
intptr_t OffsetFrom(T x)
Definition: utils.h:126
void MarkEvacuationCandidate()
Definition: spaces.h:588
static int CodePageGuardSize()
Definition: spaces.cc:710
bool IsAligned(T value, U alignment)
Definition: utils.h:206
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition: globals.h:332
void InitializeReservedMemory()
Definition: spaces.h:341
void CountFreeListItems(Page *p, FreeList::SizeStats *sizes)
Definition: spaces.h:1614
intptr_t Available()
Definition: spaces.h:2137
bool Contains(Object *o)
Definition: spaces.h:2102
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2242
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1600
bool WasSweptPrecisely()
Definition: spaces.h:715
bool ShouldSkipEvacuationSlotRecording()
Definition: spaces.h:568
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:351
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
size_t size() const
Definition: spaces.h:504
void IncreaseCapacity(int size)
Definition: spaces.h:1541
SemiSpaceId id()
Definition: spaces.h:1935
static const int kSize
Definition: objects.h:4972
void set_age_mark(Address mark)
Definition: spaces.h:2164
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1812
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:701
bool contains(Address address)
Definition: spaces.h:830
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2442
const int kBitsPerByte
Definition: globals.h:251
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
Definition: spaces.cc:211
#define TRACK_MEMORY(name)
Definition: v8globals.h:429
bool IsPowerOf2(T x)
Definition: utils.h:50
static const intptr_t kAlignmentMask
Definition: spaces.h:480
bool WasSwept()
Definition: spaces.h:717
#define BASE_EMBEDDED
Definition: allocation.h:68
static int CodePageAreaEndOffset()
Definition: spaces.cc:722
bool FromSpaceContains(Address address)
Definition: spaces.h:2218
bool ToSpaceContains(Address address)
Definition: spaces.h:2215
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1519
HeapObject * GetObject()
Definition: spaces.h:738
static const uint32_t kBytesPerCell
Definition: spaces.h:172
void ReleasePage(Page *page)
Definition: spaces.cc:917
MemoryChunk * prev_chunk_
Definition: spaces.h:606
void MarkSweptConservatively()
Definition: spaces.h:720
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
bool ToSpaceContains(Object *o)
Definition: spaces.h:2225
MaybeObject * FindObject(Address a)
Definition: spaces.cc:2664
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:510
VirtualMemory reservation_
Definition: spaces.h:615
static const int kObjectStartOffset
Definition: spaces.h:501
void ExpandSpace(int size_in_bytes)
Definition: spaces.h:1249
bool SlowContains(Address addr)
Definition: spaces.h:2536
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:1750
bool Contains(HeapObject *obj)
Definition: spaces.cc:2743
Space * owner() const
Definition: spaces.h:321
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1493
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:386
void Print(uint32_t pos, uint32_t cell)
Definition: spaces.h:238
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:410
void MarkSweptPrecisely()
Definition: spaces.h:719
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1583
INLINE(static Page *FromAllocationTop(Address top))
Definition: spaces.h:664
static const intptr_t kLiveBytesOffset
Definition: spaces.h:484
intptr_t Capacity()
Definition: spaces.h:2125
bool Contains(HeapObject *o)
Definition: spaces.h:1455
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2478
INLINE(int Offset(Address a))
Definition: spaces.h:681
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1742
void ShrinkSpace(int size_in_bytes)
Definition: spaces.h:1258
bool UncommitFromSpace()
Definition: spaces.h:2267
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
Definition: spaces.cc:490
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:1784
virtual intptr_t Size()
Definition: spaces.h:1491
Heap * heap() const
Definition: spaces.h:766
Page * prev_page()
Definition: spaces-inl.h:230
bool Contains(Address a)
Definition: spaces.h:1884
void IncrementLiveBytes(int by)
Definition: spaces.h:456
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1320
CellType mask()
Definition: spaces.h:132
SemiSpace * semi_space()
Definition: spaces.h:1754
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:584
virtual bool ReserveSpace(int bytes)
Definition: spaces.cc:2267
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1302
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:1819
NewSpacePage * next_page() const
Definition: spaces.h:1738
virtual HeapObject * next_object()
Definition: spaces.h:2563
INLINE(static uint32_t CellToIndex(uint32_t index))
Definition: spaces.h:198
static const intptr_t kAlignment
Definition: spaces.h:477
void set_owner(Space *space)
Definition: spaces.h:330
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Definition: objects-inl.h:3682
static bool IsSeq(uint32_t cell)
Definition: spaces.h:268
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:764
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2438
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:646
static const int kAreaSize
Definition: spaces.h:1736
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1731
LargePage * next_page() const
Definition: spaces.h:742
static const int kPointersFromHereAreInterestingMask
Definition: spaces.h:404
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static const int kBodyOffset
Definition: spaces.h:494
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:319
bool IsEvacuationCandidate()
Definition: spaces.h:566
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1163
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:775
T RoundDown(T x, intptr_t m)
Definition: utils.h:142
void set_size(size_t size)
Definition: spaces.h:506
virtual intptr_t Size()
Definition: spaces.h:1897
Address MarkbitIndexToAddress(uint32_t index)
Definition: spaces.h:555
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:470
void set_next_page(LargePage *page)
Definition: spaces.h:746
void set_was_swept_conservatively(bool b)
Definition: spaces.h:1575
MarkBit(CellType *cell, CellType mask, bool data_only)
Definition: spaces.h:128
INLINE(MarkBit::CellType *cells())
Definition: spaces.h:206
INLINE(Address MarkbitIndexToAddress(uint32_t index))
Definition: spaces.h:2178
static const uint32_t kBitIndexMask
Definition: spaces.h:171
Page * next_page()
Definition: spaces-inl.h:224
SemiSpace * active_space()
Definition: spaces.h:2276
ObjectSpace
Definition: v8.h:2670
const int kFailureTag
Definition: v8globals.h:71
virtual Address PageAllocationLimit(Page *page)
Definition: spaces.h:2337
virtual void PrepareForMarkCompact()
Definition: spaces.cc:2506
INLINE(Address address())
Definition: spaces.h:210
void set_age_mark(Address mark)
Definition: spaces.cc:1514
static int CodePageGuardStartOffset()
Definition: spaces.cc:703
void SetPagesToSweep(Page *first)
Definition: spaces.h:1585
NewSpacePage * prev_page() const
Definition: spaces.h:1746
const int kPageSizeBits
Definition: v8globals.h:100
SkipList * skip_list()
Definition: spaces.h:572
intptr_t unswept_free_bytes_
Definition: spaces.h:1665
INLINE(static Page *FromAddress(Address a))
Definition: spaces.h:656
Address * allocation_limit_address()
Definition: spaces.h:2184
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:620
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2404
AllocationInfo allocation_info_
Definition: spaces.h:1648
const char * name()
Definition: spaces.h:1710
INLINE(static uint32_t CellAlignIndex(uint32_t index))
Definition: spaces.h:202
static const uint32_t kBitsPerCellLog2
Definition: spaces.h:170
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:245
static const int kPointersToHereAreInterestingMask
Definition: spaces.h:401
static const int kObjectStartAlignment
Definition: spaces.h:500
Executability executable()
Definition: spaces.h:515
void set_store_buffer_counter(int counter)
Definition: spaces.h:361
NewSpace(Heap *heap)
Definition: spaces.h:2066
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
Definition: spaces.h:218
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1043
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:354
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
Definition: spaces.cc:822
#define CODE_POINTER_ALIGN(value)
Definition: v8globals.h:409
static bool IsAtStart(Address addr)
Definition: spaces.h:1760
T Min(T a, T b)
Definition: utils.h:229
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2495
static void Update(Address addr, int size)
Definition: spaces.h:916
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1425
void DeallocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1271
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:42
bool Contains(Object *o)
Definition: spaces.h:1891
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2613
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:440
void WasteBytes(int size_in_bytes)
Definition: spaces.h:1277
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:544
SkipList * skip_list_
Definition: spaces.h:627
uintptr_t mask()
Definition: spaces.h:2169
void set_skip_list(SkipList *skip_list)
Definition: spaces.h:576
MemoryChunk * prev_chunk() const
Definition: spaces.h:316
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
Definition: spaces.cc:729
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:318
VirtualMemory * reserved_memory()
Definition: spaces.h:337
Address StartFor(Address addr)
Definition: spaces.h:900
static const size_t kSize
Definition: spaces.h:178
SlotsBuffer * slots_buffer()
Definition: spaces.h:580
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1923
MemoryChunk * next()
Definition: spaces.h:2578
static MemoryChunk * FromAnyPointerAddress(Address addr)
Definition: spaces-inl.h:197
static int RegionNumber(Address addr)
Definition: spaces.h:912
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:668
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2262
AllocationSpace identity()
Definition: spaces.h:772
static const int kNonCodeObjectAreaSize
Definition: spaces.h:698
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2319
MemoryChunk * next_chunk() const
Definition: spaces.h:315
Address FromSpacePageLow()
Definition: spaces.h:2206
virtual ~Space()
Definition: spaces.h:764