v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_
30 
31 #include "compiler-intrinsics.h"
32 #include "spaces.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // Callback function, returns whether an object is alive. The heap size
38 // of the object is returned in size. It optionally updates the offset
39 // to the first live object in the page (only used for old and map objects).
40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
41 
42 // Forward declarations.
43 class CodeFlusher;
44 class GCTracer;
46 class MarkingVisitor;
47 class RootMarkingVisitor;
48 
49 
50 class Marking {
51  public:
52  explicit Marking(Heap* heap)
53  : heap_(heap) {
54  }
55 
56  static inline MarkBit MarkBitFrom(Address addr);
57 
58  static inline MarkBit MarkBitFrom(HeapObject* obj) {
59  return MarkBitFrom(reinterpret_cast<Address>(obj));
60  }
61 
62  // Impossible markbits: 01
63  static const char* kImpossibleBitPattern;
64  static inline bool IsImpossible(MarkBit mark_bit) {
65  return !mark_bit.Get() && mark_bit.Next().Get();
66  }
67 
68  // Black markbits: 10 - this is required by the sweeper.
69  static const char* kBlackBitPattern;
70  static inline bool IsBlack(MarkBit mark_bit) {
71  return mark_bit.Get() && !mark_bit.Next().Get();
72  }
73 
74  // White markbits: 00 - this is required by the mark bit clearer.
75  static const char* kWhiteBitPattern;
76  static inline bool IsWhite(MarkBit mark_bit) {
77  return !mark_bit.Get();
78  }
79 
80  // Grey markbits: 11
81  static const char* kGreyBitPattern;
82  static inline bool IsGrey(MarkBit mark_bit) {
83  return mark_bit.Get() && mark_bit.Next().Get();
84  }
85 
86  static inline void MarkBlack(MarkBit mark_bit) {
87  mark_bit.Set();
88  mark_bit.Next().Clear();
89  }
90 
91  static inline void BlackToGrey(MarkBit markbit) {
92  markbit.Next().Set();
93  }
94 
95  static inline void WhiteToGrey(MarkBit markbit) {
96  markbit.Set();
97  markbit.Next().Set();
98  }
99 
100  static inline void GreyToBlack(MarkBit markbit) {
101  markbit.Next().Clear();
102  }
103 
104  static inline void BlackToGrey(HeapObject* obj) {
105  BlackToGrey(MarkBitFrom(obj));
106  }
107 
108  static inline void AnyToGrey(MarkBit markbit) {
109  markbit.Set();
110  markbit.Next().Set();
111  }
112 
113  // Returns true if the the object whose mark is transferred is marked black.
114  bool TransferMark(Address old_start, Address new_start);
115 
116 #ifdef DEBUG
117  enum ObjectColor {
118  BLACK_OBJECT,
119  WHITE_OBJECT,
120  GREY_OBJECT,
121  IMPOSSIBLE_COLOR
122  };
123 
124  static const char* ColorName(ObjectColor color) {
125  switch (color) {
126  case BLACK_OBJECT: return "black";
127  case WHITE_OBJECT: return "white";
128  case GREY_OBJECT: return "grey";
129  case IMPOSSIBLE_COLOR: return "impossible";
130  }
131  return "error";
132  }
133 
134  static ObjectColor Color(HeapObject* obj) {
135  return Color(Marking::MarkBitFrom(obj));
136  }
137 
138  static ObjectColor Color(MarkBit mark_bit) {
139  if (IsBlack(mark_bit)) return BLACK_OBJECT;
140  if (IsWhite(mark_bit)) return WHITE_OBJECT;
141  if (IsGrey(mark_bit)) return GREY_OBJECT;
142  UNREACHABLE();
143  return IMPOSSIBLE_COLOR;
144  }
145 #endif
146 
147  // Returns true if the transferred color is black.
148  INLINE(static bool TransferColor(HeapObject* from,
149  HeapObject* to)) {
150  MarkBit from_mark_bit = MarkBitFrom(from);
151  MarkBit to_mark_bit = MarkBitFrom(to);
152  bool is_black = false;
153  if (from_mark_bit.Get()) {
154  to_mark_bit.Set();
155  is_black = true; // Looks black so far.
156  }
157  if (from_mark_bit.Next().Get()) {
158  to_mark_bit.Next().Set();
159  is_black = false; // Was actually gray.
160  }
161  return is_black;
162  }
163 
164  private:
165  Heap* heap_;
166 };
167 
168 // ----------------------------------------------------------------------------
169 // Marking deque for tracing live objects.
171  public:
173  : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
174 
175  void Initialize(Address low, Address high) {
176  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
177  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
178  array_ = obj_low;
179  mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
180  top_ = bottom_ = 0;
181  overflowed_ = false;
182  }
183 
184  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
185 
186  inline bool IsEmpty() { return top_ == bottom_; }
187 
188  bool overflowed() const { return overflowed_; }
189 
190  void ClearOverflowed() { overflowed_ = false; }
191 
192  void SetOverflowed() { overflowed_ = true; }
193 
194  // Push the (marked) object on the marking stack if there is room,
195  // otherwise mark the object as overflowed and wait for a rescan of the
196  // heap.
197  inline void PushBlack(HeapObject* object) {
198  ASSERT(object->IsHeapObject());
199  if (IsFull()) {
200  Marking::BlackToGrey(object);
201  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
202  SetOverflowed();
203  } else {
204  array_[top_] = object;
205  top_ = ((top_ + 1) & mask_);
206  }
207  }
208 
209  inline void PushGrey(HeapObject* object) {
210  ASSERT(object->IsHeapObject());
211  if (IsFull()) {
212  SetOverflowed();
213  } else {
214  array_[top_] = object;
215  top_ = ((top_ + 1) & mask_);
216  }
217  }
218 
219  inline HeapObject* Pop() {
220  ASSERT(!IsEmpty());
221  top_ = ((top_ - 1) & mask_);
222  HeapObject* object = array_[top_];
223  ASSERT(object->IsHeapObject());
224  return object;
225  }
226 
227  inline void UnshiftGrey(HeapObject* object) {
228  ASSERT(object->IsHeapObject());
229  if (IsFull()) {
230  SetOverflowed();
231  } else {
232  bottom_ = ((bottom_ - 1) & mask_);
233  array_[bottom_] = object;
234  }
235  }
236 
237  HeapObject** array() { return array_; }
238  int bottom() { return bottom_; }
239  int top() { return top_; }
240  int mask() { return mask_; }
241  void set_top(int top) { top_ = top; }
242 
243  private:
244  HeapObject** array_;
245  // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
246  // empty when top_ == bottom_. It is full when top_ + 1 == bottom
247  // (mod mask + 1).
248  int top_;
249  int bottom_;
250  int mask_;
251  bool overflowed_;
252 
253  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
254 };
255 
256 
258  public:
259  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
260  void DeallocateBuffer(SlotsBuffer* buffer);
261 
262  void DeallocateChain(SlotsBuffer** buffer_address);
263 };
264 
265 
266 // SlotsBuffer records a sequence of slots that has to be updated
267 // after live objects were relocated from evacuation candidates.
268 // All slots are either untyped or typed:
269 // - Untyped slots are expected to contain a tagged object pointer.
270 // They are recorded by an address.
271 // - Typed slots are expected to contain an encoded pointer to a heap
272 // object where the way of encoding depends on the type of the slot.
273 // They are recorded as a pair (SlotType, slot address).
274 // We assume that zero-page is never mapped this allows us to distinguish
275 // untyped slots from typed slots during iteration by a simple comparison:
276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
277 // is the first element of typed slot's pair.
278 class SlotsBuffer {
279  public:
280  typedef Object** ObjectSlot;
281 
282  explicit SlotsBuffer(SlotsBuffer* next_buffer)
283  : idx_(0), chain_length_(1), next_(next_buffer) {
284  if (next_ != NULL) {
285  chain_length_ = next_->chain_length_ + 1;
286  }
287  }
288 
290  }
291 
292  void Add(ObjectSlot slot) {
293  ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
294  slots_[idx_++] = slot;
295  }
296 
297  enum SlotType {
305  };
306 
307  void UpdateSlots(Heap* heap);
308 
309  void UpdateSlotsWithFilter(Heap* heap);
310 
311  SlotsBuffer* next() { return next_; }
312 
313  static int SizeOfChain(SlotsBuffer* buffer) {
314  if (buffer == NULL) return 0;
315  return static_cast<int>(buffer->idx_ +
316  (buffer->chain_length_ - 1) * kNumberOfElements);
317  }
318 
319  inline bool IsFull() {
320  return idx_ == kNumberOfElements;
321  }
322 
323  inline bool HasSpaceForTypedSlot() {
324  return idx_ < kNumberOfElements - 1;
325  }
326 
327  static void UpdateSlotsRecordedIn(Heap* heap,
328  SlotsBuffer* buffer,
329  bool code_slots_filtering_required) {
330  while (buffer != NULL) {
331  if (code_slots_filtering_required) {
332  buffer->UpdateSlotsWithFilter(heap);
333  } else {
334  buffer->UpdateSlots(heap);
335  }
336  buffer = buffer->next();
337  }
338  }
339 
343  };
344 
346  return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
347  }
348 
349  static bool AddTo(SlotsBufferAllocator* allocator,
350  SlotsBuffer** buffer_address,
351  ObjectSlot slot,
352  AdditionMode mode) {
353  SlotsBuffer* buffer = *buffer_address;
354  if (buffer == NULL || buffer->IsFull()) {
355  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
356  allocator->DeallocateChain(buffer_address);
357  return false;
358  }
359  buffer = allocator->AllocateBuffer(buffer);
360  *buffer_address = buffer;
361  }
362  buffer->Add(slot);
363  return true;
364  }
365 
366  static bool IsTypedSlot(ObjectSlot slot);
367 
368  static bool AddTo(SlotsBufferAllocator* allocator,
369  SlotsBuffer** buffer_address,
370  SlotType type,
371  Address addr,
372  AdditionMode mode);
373 
374  static const int kNumberOfElements = 1021;
375 
376  private:
377  static const int kChainLengthThreshold = 15;
378 
379  intptr_t idx_;
380  intptr_t chain_length_;
381  SlotsBuffer* next_;
383 };
384 
385 
386 // -------------------------------------------------------------------------
387 // Marker shared between incremental and non-incremental marking
388 template<class BaseMarker> class Marker {
389  public:
390  Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
391  : base_marker_(base_marker),
392  mark_compact_collector_(mark_compact_collector) {}
393 
394  // Mark pointers in a Map and its DescriptorArray together, possibly
395  // treating transitions or back pointers weak.
396  void MarkMapContents(Map* map);
397  void MarkDescriptorArray(DescriptorArray* descriptors);
398  void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
399 
400  private:
401  BaseMarker* base_marker() {
402  return base_marker_;
403  }
404 
405  MarkCompactCollector* mark_compact_collector() {
406  return mark_compact_collector_;
407  }
408 
409  BaseMarker* base_marker_;
410  MarkCompactCollector* mark_compact_collector_;
411 };
412 
413 
414 // Defined in isolate.h.
415 class ThreadLocalTop;
416 
417 
418 // -------------------------------------------------------------------------
419 // Mark-Compact collector
421  public:
422  // Type of functions to compute forwarding addresses of objects in
423  // compacted spaces. Given an object and its size, return a (non-failure)
424  // Object* that will be the object after forwarding. There is a separate
425  // allocation function for each (compactable) space based on the location
426  // of the object before compaction.
427  typedef MaybeObject* (*AllocationFunction)(Heap* heap,
428  HeapObject* object,
429  int object_size);
430 
431  // Type of functions to encode the forwarding address for an object.
432  // Given the object, its size, and the new (non-failure) object it will be
433  // forwarded to, encode the forwarding address. For paged spaces, the
434  // 'offset' input/output parameter contains the offset of the forwarded
435  // object from the forwarding address of the previous live object in the
436  // page as input, and is updated to contain the offset to be used for the
437  // next live object in the same page. For spaces using a different
438  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
439  typedef void (*EncodingFunction)(Heap* heap,
440  HeapObject* old_object,
441  int object_size,
442  Object* new_object,
443  int* offset);
444 
445  // Type of functions to process non-live objects.
446  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
447 
448  // Pointer to member function, used in IterateLiveObjects.
450 
451  // Set the global flags, it must be called before Prepare to take effect.
452  inline void SetFlags(int flags);
453 
454  static void Initialize();
455 
457 
458  void AddEvacuationCandidate(Page* p);
459 
460  // Prepares for GC by resetting relocation info in old and map spaces and
461  // choosing spaces to compact.
462  void Prepare(GCTracer* tracer);
463 
464  // Performs a global garbage collection.
465  void CollectGarbage();
466 
470  };
471 
472  bool StartCompaction(CompactionMode mode);
473 
474  void AbortCompaction();
475 
476  // During a full GC, there is a stack-allocated GCTracer that is used for
477  // bookkeeping information. Return a pointer to that tracer.
478  GCTracer* tracer() { return tracer_; }
479 
480 #ifdef DEBUG
481  // Checks whether performing mark-compact collection.
482  bool in_use() { return state_ > PREPARE_GC; }
483  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
484 #endif
485 
486  // Determine type of object and emit deletion log event.
487  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
488 
489  // Distinguishable invalid map encodings (for single word and multiple words)
490  // that indicate free regions.
491  static const uint32_t kSingleFreeEncoding = 0;
492  static const uint32_t kMultiFreeEncoding = 1;
493 
494  static inline bool IsMarked(Object* obj);
495 
496  inline Heap* heap() const { return heap_; }
497 
498  CodeFlusher* code_flusher() { return code_flusher_; }
499  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
500  void EnableCodeFlushing(bool enable);
501 
502  enum SweeperType {
506  };
507 
508 #ifdef DEBUG
509  void VerifyMarkbitsAreClean();
510  static void VerifyMarkbitsAreClean(PagedSpace* space);
511  static void VerifyMarkbitsAreClean(NewSpace* space);
512 #endif
513 
514  // Sweep a single page from the given space conservatively.
515  // Return a number of reclaimed bytes.
516  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
517 
518  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
519  return Page::FromAddress(reinterpret_cast<Address>(anchor))->
520  ShouldSkipEvacuationSlotRecording();
521  }
522 
523  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
524  return Page::FromAddress(reinterpret_cast<Address>(host))->
525  ShouldSkipEvacuationSlotRecording();
526  }
527 
528  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
529  return Page::FromAddress(reinterpret_cast<Address>(obj))->
530  IsEvacuationCandidate();
531  }
532 
534  if (FLAG_trace_fragmentation) {
535  PrintF("Page %p is too popular. Disabling evacuation.\n",
536  reinterpret_cast<void*>(page));
537  }
538 
539  // TODO(gc) If all evacuation candidates are too popular we
540  // should stop slots recording entirely.
541  page->ClearEvacuationCandidate();
542 
543  // We were not collecting slots on this page that point
544  // to other evacuation candidates thus we have to
545  // rescan the page after evacuation to discover and update all
546  // pointers to evacuated objects.
547  if (page->owner()->identity() == OLD_DATA_SPACE) {
548  evacuation_candidates_.RemoveElement(page);
549  } else {
551  }
552  }
553 
554  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
555  void RecordCodeEntrySlot(Address slot, Code* target);
556 
557  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
558 
559  void MigrateObject(Address dst,
560  Address src,
561  int size,
562  AllocationSpace to_old_space);
563 
564  bool TryPromoteObject(HeapObject* object, int object_size);
565 
566  inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
567  inline void set_encountered_weak_maps(Object* weak_map) {
568  encountered_weak_maps_ = weak_map;
569  }
570 
571  void InvalidateCode(Code* code);
572 
573  void ClearMarkbits();
574 
575  bool is_compacting() const { return compacting_; }
576 
577  private:
580 
581  bool MarkInvalidatedCode();
582  void RemoveDeadInvalidatedCode();
583  void ProcessInvalidatedCode(ObjectVisitor* visitor);
584 
585 
586 #ifdef DEBUG
587  enum CollectorState {
588  IDLE,
589  PREPARE_GC,
590  MARK_LIVE_OBJECTS,
591  SWEEP_SPACES,
592  ENCODE_FORWARDING_ADDRESSES,
593  UPDATE_POINTERS,
594  RELOCATE_OBJECTS
595  };
596 
597  // The current stage of the collector.
598  CollectorState state_;
599 #endif
600 
601  // Global flag that forces sweeping to be precise, so we can traverse the
602  // heap.
603  bool sweep_precisely_;
604 
605  bool reduce_memory_footprint_;
606 
607  bool abort_incremental_marking_;
608 
609  // True if we are collecting slots to perform evacuation from evacuation
610  // candidates.
611  bool compacting_;
612 
613  bool was_marked_incrementally_;
614 
615  bool flush_monomorphic_ics_;
616 
617  // A pointer to the current stack-allocated GC tracer object during a full
618  // collection (NULL before and after).
619  GCTracer* tracer_;
620 
621  SlotsBufferAllocator slots_buffer_allocator_;
622 
623  SlotsBuffer* migration_slots_buffer_;
624 
625  // Finishes GC, performs heap verification if enabled.
626  void Finish();
627 
628  // -----------------------------------------------------------------------
629  // Phase 1: Marking live objects.
630  //
631  // Before: The heap has been prepared for garbage collection by
632  // MarkCompactCollector::Prepare() and is otherwise in its
633  // normal state.
634  //
635  // After: Live objects are marked and non-live objects are unmarked.
636 
637  friend class RootMarkingVisitor;
638  friend class MarkingVisitor;
639  friend class StaticMarkingVisitor;
640  friend class CodeMarkingVisitor;
642  friend class Marker<IncrementalMarking>;
644 
645  // Mark non-optimize code for functions inlined into the given optimized
646  // code. This will prevent it from being flushed.
647  void MarkInlinedFunctionsCode(Code* code);
648 
649  // Mark code objects that are active on the stack to prevent them
650  // from being flushed.
651  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
652 
653  void PrepareForCodeFlushing();
654 
655  // Marking operations for objects reachable from roots.
656  void MarkLiveObjects();
657 
658  void AfterMarking();
659 
660  // Marks the object black and pushes it on the marking stack.
661  // Returns true if object needed marking and false otherwise.
662  // This is for non-incremental marking only.
663  INLINE(bool MarkObjectAndPush(HeapObject* obj));
664 
665  // Marks the object black and pushes it on the marking stack.
666  // This is for non-incremental marking only.
667  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
668 
669  // Marks the object black without pushing it on the marking stack.
670  // Returns true if object needed marking and false otherwise.
671  // This is for non-incremental marking only.
672  INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
673 
674  // Marks the object black assuming that it is not yet marked.
675  // This is for non-incremental marking only.
676  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
677 
678  void ProcessNewlyMarkedObject(HeapObject* obj);
679 
680  // Mark the heap roots and all objects reachable from them.
681  void MarkRoots(RootMarkingVisitor* visitor);
682 
683  // Mark the symbol table specially. References to symbols from the
684  // symbol table are weak.
685  void MarkSymbolTable();
686 
687  // Mark objects in object groups that have at least one object in the
688  // group marked.
689  void MarkObjectGroups();
690 
691  // Mark objects in implicit references groups if their parent object
692  // is marked.
693  void MarkImplicitRefGroups();
694 
695  // Mark all objects which are reachable due to host application
696  // logic like object groups or implicit references' groups.
697  void ProcessExternalMarking();
698 
699  // Mark objects reachable (transitively) from objects in the marking stack
700  // or overflowed in the heap.
701  void ProcessMarkingDeque();
702 
703  // Mark objects reachable (transitively) from objects in the marking
704  // stack. This function empties the marking stack, but may leave
705  // overflowed objects in the heap, in which case the marking stack's
706  // overflow flag will be set.
707  void EmptyMarkingDeque();
708 
709  // Refill the marking stack with overflowed objects from the heap. This
710  // function either leaves the marking stack full or clears the overflow
711  // flag on the marking stack.
712  void RefillMarkingDeque();
713 
714  // After reachable maps have been marked process per context object
715  // literal map caches removing unmarked entries.
716  void ProcessMapCaches();
717 
718  // Callback function for telling whether the object *p is an unmarked
719  // heap object.
720  static bool IsUnmarkedHeapObject(Object** p);
721 
722  // Map transitions from a live map to a dead map must be killed.
723  // We replace them with a null descriptor, with the same key.
724  void ClearNonLiveTransitions();
725  void ClearNonLivePrototypeTransitions(Map* map);
726  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
727 
728  // Marking detaches initial maps from SharedFunctionInfo objects
729  // to make this reference weak. We need to reattach initial maps
730  // back after collection. This is either done during
731  // ClearNonLiveTransitions pass or by calling this function.
732  void ReattachInitialMaps();
733 
734  // Mark all values associated with reachable keys in weak maps encountered
735  // so far. This might push new object or even new weak maps onto the
736  // marking stack.
737  void ProcessWeakMaps();
738 
739  // After all reachable objects have been marked those weak map entries
740  // with an unreachable key are removed from all encountered weak maps.
741  // The linked list of all encountered weak maps is destroyed.
742  void ClearWeakMaps();
743 
744  // -----------------------------------------------------------------------
745  // Phase 2: Sweeping to clear mark bits and free non-live objects for
746  // a non-compacting collection.
747  //
748  // Before: Live objects are marked and non-live objects are unmarked.
749  //
750  // After: Live objects are unmarked, non-live regions have been added to
751  // their space's free list. Active eden semispace is compacted by
752  // evacuation.
753  //
754 
755  // If we are not compacting the heap, we simply sweep the spaces except
756  // for the large object space, clearing mark bits and adding unmarked
757  // regions to each space's free list.
758  void SweepSpaces();
759 
760  void EvacuateNewSpace();
761 
762  void EvacuateLiveObjectsFromPage(Page* p);
763 
764  void EvacuatePages();
765 
766  void EvacuateNewSpaceAndCandidates();
767 
768  void SweepSpace(PagedSpace* space, SweeperType sweeper);
769 
770 #ifdef DEBUG
771  friend class MarkObjectVisitor;
772  static void VisitObject(HeapObject* obj);
773 
774  friend class UnmarkObjectVisitor;
775  static void UnmarkObject(HeapObject* obj);
776 #endif
777 
778  Heap* heap_;
779  MarkingDeque marking_deque_;
780  CodeFlusher* code_flusher_;
781  Object* encountered_weak_maps_;
783 
784  List<Page*> evacuation_candidates_;
785  List<Code*> invalidated_code_;
786 
787  friend class Heap;
788 };
789 
790 
791 const char* AllocationSpaceName(AllocationSpace space);
792 
793 } } // namespace v8::internal
794 
795 #endif // V8_MARK_COMPACT_H_
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:172
static void BlackToGrey(HeapObject *obj)
Definition: mark-compact.h:104
void ClearEvacuationCandidate()
Definition: spaces.h:593
static const char * kGreyBitPattern
Definition: mark-compact.h:81
static bool IsTypedSlot(ObjectSlot slot)
uint32_t RoundDownToPowerOf2(uint32_t x)
Definition: utils.h:198
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
void Prepare(GCTracer *tracer)
static const uint32_t kSingleFreeEncoding
Definition: mark-compact.h:491
static void WhiteToGrey(MarkBit markbit)
Definition: mark-compact.h:95
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
static void BlackToGrey(MarkBit markbit)
Definition: mark-compact.h:91
Flag flags[]
Definition: flags.cc:1467
static MarkBit MarkBitFrom(HeapObject *obj)
Definition: mark-compact.h:58
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
#define ASSERT(condition)
Definition: checks.h:270
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:471
bool StartCompaction(CompactionMode mode)
void UpdateSlotsWithFilter(Heap *heap)
static void AnyToGrey(MarkBit markbit)
Definition: mark-compact.h:108
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static const char * kBlackBitPattern
Definition: mark-compact.h:69
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
Definition: mark-compact.h:523
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:313
MarkBit Next()
Definition: spaces.h:146
static MarkBit MarkBitFrom(Address addr)
bool TryPromoteObject(HeapObject *object, int object_size)
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
Definition: mark-compact.h:148
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
Definition: mark-compact.h:518
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
#define UNREACHABLE()
Definition: checks.h:50
void(* EncodingFunction)(Heap *heap, HeapObject *old_object, int object_size, Object *new_object, int *offset)
Definition: mark-compact.h:439
SlotsBuffer(SlotsBuffer *next_buffer)
Definition: mark-compact.h:282
void SetFlag(int flag)
Definition: spaces.h:417
void PushBlack(HeapObject *object)
Definition: mark-compact.h:197
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
void CollectEvacuationCandidates(PagedSpace *space)
static const uint32_t kMultiFreeEncoding
Definition: mark-compact.h:492
bool TransferMark(Address old_start, Address new_start)
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
void DeallocateBuffer(SlotsBuffer *buffer)
void MarkDescriptorArray(DescriptorArray *descriptors)
void(* ProcessNonLiveFunction)(HeapObject *object, Isolate *isolate)
Definition: mark-compact.h:446
static const int kNumberOfElements
Definition: mark-compact.h:374
static bool IsImpossible(MarkBit mark_bit)
Definition: mark-compact.h:64
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void DeallocateChain(SlotsBuffer **buffer_address)
void MigrateObject(Address dst, Address src, int size, AllocationSpace to_old_space)
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
Definition: mark-compact.h:40
void Add(ObjectSlot slot)
Definition: mark-compact.h:292
Space * owner() const
Definition: spaces.h:321
void set_encountered_weak_maps(Object *weak_map)
Definition: mark-compact.h:567
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:345
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
void EvictEvacuationCandidate(Page *page)
Definition: mark-compact.h:533
void PushGrey(HeapObject *object)
Definition: mark-compact.h:209
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
void MarkAccessorPairSlot(AccessorPair *accessors, int offset)
void MarkMapContents(Map *map)
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:327
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
Definition: mark-compact.h:528
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode)
Definition: mark-compact.h:349
Marker(BaseMarker *base_marker, MarkCompactCollector *mark_compact_collector)
Definition: mark-compact.h:390
FlagType type() const
Definition: flags.cc:1358
AllocationSpace identity()
Definition: spaces.h:772
void UnshiftGrey(HeapObject *object)
Definition: mark-compact.h:227
void RecordCodeEntrySlot(Address slot, Code *target)
int(MarkCompactCollector::* LiveObjectCallback)(HeapObject *obj)
Definition: mark-compact.h:449