v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_
30 
31 #include "compiler-intrinsics.h"
32 #include "spaces.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // Callback function, returns whether an object is alive. The heap size
38 // of the object is returned in size. It optionally updates the offset
39 // to the first live object in the page (only used for old and map objects).
40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
41 
42 // Forward declarations.
43 class CodeFlusher;
44 class GCTracer;
46 class MarkingVisitor;
47 class RootMarkingVisitor;
48 
49 
50 class Marking {
51  public:
52  explicit Marking(Heap* heap)
53  : heap_(heap) {
54  }
55 
56  static inline MarkBit MarkBitFrom(Address addr);
57 
58  static inline MarkBit MarkBitFrom(HeapObject* obj) {
59  return MarkBitFrom(reinterpret_cast<Address>(obj));
60  }
61 
62  // Impossible markbits: 01
63  static const char* kImpossibleBitPattern;
64  static inline bool IsImpossible(MarkBit mark_bit) {
65  return !mark_bit.Get() && mark_bit.Next().Get();
66  }
67 
68  // Black markbits: 10 - this is required by the sweeper.
69  static const char* kBlackBitPattern;
70  static inline bool IsBlack(MarkBit mark_bit) {
71  return mark_bit.Get() && !mark_bit.Next().Get();
72  }
73 
74  // White markbits: 00 - this is required by the mark bit clearer.
75  static const char* kWhiteBitPattern;
76  static inline bool IsWhite(MarkBit mark_bit) {
77  return !mark_bit.Get();
78  }
79 
80  // Grey markbits: 11
81  static const char* kGreyBitPattern;
82  static inline bool IsGrey(MarkBit mark_bit) {
83  return mark_bit.Get() && mark_bit.Next().Get();
84  }
85 
86  static inline void MarkBlack(MarkBit mark_bit) {
87  mark_bit.Set();
88  mark_bit.Next().Clear();
89  }
90 
91  static inline void BlackToGrey(MarkBit markbit) {
92  markbit.Next().Set();
93  }
94 
95  static inline void WhiteToGrey(MarkBit markbit) {
96  markbit.Set();
97  markbit.Next().Set();
98  }
99 
100  static inline void GreyToBlack(MarkBit markbit) {
101  markbit.Next().Clear();
102  }
103 
104  static inline void BlackToGrey(HeapObject* obj) {
105  BlackToGrey(MarkBitFrom(obj));
106  }
107 
108  static inline void AnyToGrey(MarkBit markbit) {
109  markbit.Set();
110  markbit.Next().Set();
111  }
112 
113  // Returns true if the the object whose mark is transferred is marked black.
114  bool TransferMark(Address old_start, Address new_start);
115 
116 #ifdef DEBUG
117  enum ObjectColor {
118  BLACK_OBJECT,
119  WHITE_OBJECT,
120  GREY_OBJECT,
121  IMPOSSIBLE_COLOR
122  };
123 
124  static const char* ColorName(ObjectColor color) {
125  switch (color) {
126  case BLACK_OBJECT: return "black";
127  case WHITE_OBJECT: return "white";
128  case GREY_OBJECT: return "grey";
129  case IMPOSSIBLE_COLOR: return "impossible";
130  }
131  return "error";
132  }
133 
134  static ObjectColor Color(HeapObject* obj) {
135  return Color(Marking::MarkBitFrom(obj));
136  }
137 
138  static ObjectColor Color(MarkBit mark_bit) {
139  if (IsBlack(mark_bit)) return BLACK_OBJECT;
140  if (IsWhite(mark_bit)) return WHITE_OBJECT;
141  if (IsGrey(mark_bit)) return GREY_OBJECT;
142  UNREACHABLE();
143  return IMPOSSIBLE_COLOR;
144  }
145 #endif
146 
147  // Returns true if the transferred color is black.
148  INLINE(static bool TransferColor(HeapObject* from,
149  HeapObject* to)) {
150  MarkBit from_mark_bit = MarkBitFrom(from);
151  MarkBit to_mark_bit = MarkBitFrom(to);
152  bool is_black = false;
153  if (from_mark_bit.Get()) {
154  to_mark_bit.Set();
155  is_black = true; // Looks black so far.
156  }
157  if (from_mark_bit.Next().Get()) {
158  to_mark_bit.Next().Set();
159  is_black = false; // Was actually gray.
160  }
161  return is_black;
162  }
163 
164  private:
165  Heap* heap_;
166 };
167 
168 // ----------------------------------------------------------------------------
169 // Marking deque for tracing live objects.
171  public:
173  : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
174 
175  void Initialize(Address low, Address high) {
176  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
177  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
178  array_ = obj_low;
179  mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
180  top_ = bottom_ = 0;
181  overflowed_ = false;
182  }
183 
184  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
185 
186  inline bool IsEmpty() { return top_ == bottom_; }
187 
188  bool overflowed() const { return overflowed_; }
189 
190  void ClearOverflowed() { overflowed_ = false; }
191 
192  void SetOverflowed() { overflowed_ = true; }
193 
194  // Push the (marked) object on the marking stack if there is room,
195  // otherwise mark the object as overflowed and wait for a rescan of the
196  // heap.
197  inline void PushBlack(HeapObject* object) {
198  ASSERT(object->IsHeapObject());
199  if (IsFull()) {
200  Marking::BlackToGrey(object);
201  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
202  SetOverflowed();
203  } else {
204  array_[top_] = object;
205  top_ = ((top_ + 1) & mask_);
206  }
207  }
208 
209  inline void PushGrey(HeapObject* object) {
210  ASSERT(object->IsHeapObject());
211  if (IsFull()) {
212  SetOverflowed();
213  } else {
214  array_[top_] = object;
215  top_ = ((top_ + 1) & mask_);
216  }
217  }
218 
219  inline HeapObject* Pop() {
220  ASSERT(!IsEmpty());
221  top_ = ((top_ - 1) & mask_);
222  HeapObject* object = array_[top_];
223  ASSERT(object->IsHeapObject());
224  return object;
225  }
226 
227  inline void UnshiftGrey(HeapObject* object) {
228  ASSERT(object->IsHeapObject());
229  if (IsFull()) {
230  SetOverflowed();
231  } else {
232  bottom_ = ((bottom_ - 1) & mask_);
233  array_[bottom_] = object;
234  }
235  }
236 
237  HeapObject** array() { return array_; }
238  int bottom() { return bottom_; }
239  int top() { return top_; }
240  int mask() { return mask_; }
241  void set_top(int top) { top_ = top; }
242 
243  private:
244  HeapObject** array_;
245  // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
246  // empty when top_ == bottom_. It is full when top_ + 1 == bottom
247  // (mod mask + 1).
248  int top_;
249  int bottom_;
250  int mask_;
251  bool overflowed_;
252 
253  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
254 };
255 
256 
258  public:
259  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
260  void DeallocateBuffer(SlotsBuffer* buffer);
261 
262  void DeallocateChain(SlotsBuffer** buffer_address);
263 };
264 
265 
266 // SlotsBuffer records a sequence of slots that has to be updated
267 // after live objects were relocated from evacuation candidates.
268 // All slots are either untyped or typed:
269 // - Untyped slots are expected to contain a tagged object pointer.
270 // They are recorded by an address.
271 // - Typed slots are expected to contain an encoded pointer to a heap
272 // object where the way of encoding depends on the type of the slot.
273 // They are recorded as a pair (SlotType, slot address).
274 // We assume that zero-page is never mapped this allows us to distinguish
275 // untyped slots from typed slots during iteration by a simple comparison:
276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
277 // is the first element of typed slot's pair.
278 class SlotsBuffer {
279  public:
280  typedef Object** ObjectSlot;
281 
282  explicit SlotsBuffer(SlotsBuffer* next_buffer)
283  : idx_(0), chain_length_(1), next_(next_buffer) {
284  if (next_ != NULL) {
285  chain_length_ = next_->chain_length_ + 1;
286  }
287  }
288 
290  }
291 
292  void Add(ObjectSlot slot) {
293  ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
294  slots_[idx_++] = slot;
295  }
296 
297  enum SlotType {
305  };
306 
307  static const char* SlotTypeToString(SlotType type) {
308  switch (type) {
310  return "EMBEDDED_OBJECT_SLOT";
312  return "RELOCATED_CODE_OBJECT";
313  case CODE_TARGET_SLOT:
314  return "CODE_TARGET_SLOT";
315  case CODE_ENTRY_SLOT:
316  return "CODE_ENTRY_SLOT";
317  case DEBUG_TARGET_SLOT:
318  return "DEBUG_TARGET_SLOT";
319  case JS_RETURN_SLOT:
320  return "JS_RETURN_SLOT";
322  return "NUMBER_OF_SLOT_TYPES";
323  }
324  return "UNKNOWN SlotType";
325  }
326 
327  void UpdateSlots(Heap* heap);
328 
329  void UpdateSlotsWithFilter(Heap* heap);
330 
331  SlotsBuffer* next() { return next_; }
332 
333  static int SizeOfChain(SlotsBuffer* buffer) {
334  if (buffer == NULL) return 0;
335  return static_cast<int>(buffer->idx_ +
336  (buffer->chain_length_ - 1) * kNumberOfElements);
337  }
338 
339  inline bool IsFull() {
340  return idx_ == kNumberOfElements;
341  }
342 
343  inline bool HasSpaceForTypedSlot() {
344  return idx_ < kNumberOfElements - 1;
345  }
346 
347  static void UpdateSlotsRecordedIn(Heap* heap,
348  SlotsBuffer* buffer,
349  bool code_slots_filtering_required) {
350  while (buffer != NULL) {
351  if (code_slots_filtering_required) {
352  buffer->UpdateSlotsWithFilter(heap);
353  } else {
354  buffer->UpdateSlots(heap);
355  }
356  buffer = buffer->next();
357  }
358  }
359 
363  };
364 
366  return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
367  }
368 
369  static bool AddTo(SlotsBufferAllocator* allocator,
370  SlotsBuffer** buffer_address,
371  ObjectSlot slot,
372  AdditionMode mode) {
373  SlotsBuffer* buffer = *buffer_address;
374  if (buffer == NULL || buffer->IsFull()) {
375  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
376  allocator->DeallocateChain(buffer_address);
377  return false;
378  }
379  buffer = allocator->AllocateBuffer(buffer);
380  *buffer_address = buffer;
381  }
382  buffer->Add(slot);
383  return true;
384  }
385 
386  static bool IsTypedSlot(ObjectSlot slot);
387 
388  static bool AddTo(SlotsBufferAllocator* allocator,
389  SlotsBuffer** buffer_address,
390  SlotType type,
391  Address addr,
392  AdditionMode mode);
393 
394  static const int kNumberOfElements = 1021;
395 
396  private:
397  static const int kChainLengthThreshold = 15;
398 
399  intptr_t idx_;
400  intptr_t chain_length_;
401  SlotsBuffer* next_;
403 };
404 
405 
406 // CodeFlusher collects candidates for code flushing during marking and
407 // processes those candidates after marking has completed in order to
408 // reset those functions referencing code objects that would otherwise
409 // be unreachable. Code objects can be referenced in two ways:
410 // - SharedFunctionInfo references unoptimized code.
411 // - JSFunction references either unoptimized or optimized code.
412 // We are not allowed to flush unoptimized code for functions that got
413 // optimized or inlined into optimized code, because we might bailout
414 // into the unoptimized code again during deoptimization.
415 class CodeFlusher {
416  public:
417  explicit CodeFlusher(Isolate* isolate)
418  : isolate_(isolate),
419  jsfunction_candidates_head_(NULL),
420  shared_function_info_candidates_head_(NULL) {}
421 
422  void AddCandidate(SharedFunctionInfo* shared_info) {
423  SetNextCandidate(shared_info, shared_function_info_candidates_head_);
424  shared_function_info_candidates_head_ = shared_info;
425  }
426 
427  void AddCandidate(JSFunction* function) {
428  ASSERT(function->code() == function->shared()->code());
429  ASSERT(function->next_function_link()->IsUndefined());
430  SetNextCandidate(function, jsfunction_candidates_head_);
431  jsfunction_candidates_head_ = function;
432  }
433 
435  ProcessSharedFunctionInfoCandidates();
436  ProcessJSFunctionCandidates();
437  }
438 
439  private:
440  void ProcessJSFunctionCandidates();
441  void ProcessSharedFunctionInfoCandidates();
442 
443  static JSFunction* GetNextCandidate(JSFunction* candidate) {
444  Object* next_candidate = candidate->next_function_link();
445  return reinterpret_cast<JSFunction*>(next_candidate);
446  }
447 
448  static void SetNextCandidate(JSFunction* candidate,
449  JSFunction* next_candidate) {
450  candidate->set_next_function_link(next_candidate);
451  }
452 
453  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
454  ASSERT(undefined->IsUndefined());
455  candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
456  }
457 
458  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
459  Object* next_candidate = candidate->code()->gc_metadata();
460  return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
461  }
462 
463  static void SetNextCandidate(SharedFunctionInfo* candidate,
464  SharedFunctionInfo* next_candidate) {
465  candidate->code()->set_gc_metadata(next_candidate);
466  }
467 
468  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
469  candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
470  }
471 
472  Isolate* isolate_;
473  JSFunction* jsfunction_candidates_head_;
474  SharedFunctionInfo* shared_function_info_candidates_head_;
475 
476  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
477 };
478 
479 
480 // Defined in isolate.h.
481 class ThreadLocalTop;
482 
483 
484 // -------------------------------------------------------------------------
485 // Mark-Compact collector
487  public:
488  // Type of functions to compute forwarding addresses of objects in
489  // compacted spaces. Given an object and its size, return a (non-failure)
490  // Object* that will be the object after forwarding. There is a separate
491  // allocation function for each (compactable) space based on the location
492  // of the object before compaction.
493  typedef MaybeObject* (*AllocationFunction)(Heap* heap,
494  HeapObject* object,
495  int object_size);
496 
497  // Type of functions to encode the forwarding address for an object.
498  // Given the object, its size, and the new (non-failure) object it will be
499  // forwarded to, encode the forwarding address. For paged spaces, the
500  // 'offset' input/output parameter contains the offset of the forwarded
501  // object from the forwarding address of the previous live object in the
502  // page as input, and is updated to contain the offset to be used for the
503  // next live object in the same page. For spaces using a different
504  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
505  typedef void (*EncodingFunction)(Heap* heap,
506  HeapObject* old_object,
507  int object_size,
508  Object* new_object,
509  int* offset);
510 
511  // Type of functions to process non-live objects.
512  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
513 
514  // Pointer to member function, used in IterateLiveObjects.
516 
517  // Set the global flags, it must be called before Prepare to take effect.
518  inline void SetFlags(int flags);
519 
520  static void Initialize();
521 
523 
524  void AddEvacuationCandidate(Page* p);
525 
526  // Prepares for GC by resetting relocation info in old and map spaces and
527  // choosing spaces to compact.
528  void Prepare(GCTracer* tracer);
529 
530  // Performs a global garbage collection.
531  void CollectGarbage();
532 
536  };
537 
538  bool StartCompaction(CompactionMode mode);
539 
540  void AbortCompaction();
541 
542  // During a full GC, there is a stack-allocated GCTracer that is used for
543  // bookkeeping information. Return a pointer to that tracer.
544  GCTracer* tracer() { return tracer_; }
545 
546 #ifdef DEBUG
547  // Checks whether performing mark-compact collection.
548  bool in_use() { return state_ > PREPARE_GC; }
549  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
550 #endif
551 
552  // Determine type of object and emit deletion log event.
553  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
554 
555  // Distinguishable invalid map encodings (for single word and multiple words)
556  // that indicate free regions.
557  static const uint32_t kSingleFreeEncoding = 0;
558  static const uint32_t kMultiFreeEncoding = 1;
559 
560  static inline bool IsMarked(Object* obj);
561 
562  inline Heap* heap() const { return heap_; }
563 
564  CodeFlusher* code_flusher() { return code_flusher_; }
565  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
566  void EnableCodeFlushing(bool enable);
567 
568  enum SweeperType {
572  };
573 
574 #ifdef VERIFY_HEAP
575  void VerifyMarkbitsAreClean();
576  static void VerifyMarkbitsAreClean(PagedSpace* space);
577  static void VerifyMarkbitsAreClean(NewSpace* space);
578 #endif
579 
580  // Sweep a single page from the given space conservatively.
581  // Return a number of reclaimed bytes.
582  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
583 
584  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
585  return Page::FromAddress(reinterpret_cast<Address>(anchor))->
586  ShouldSkipEvacuationSlotRecording();
587  }
588 
589  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
590  return Page::FromAddress(reinterpret_cast<Address>(host))->
591  ShouldSkipEvacuationSlotRecording();
592  }
593 
594  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
595  return Page::FromAddress(reinterpret_cast<Address>(obj))->
596  IsEvacuationCandidate();
597  }
598 
600  if (FLAG_trace_fragmentation) {
601  PrintF("Page %p is too popular. Disabling evacuation.\n",
602  reinterpret_cast<void*>(page));
603  }
604 
605  // TODO(gc) If all evacuation candidates are too popular we
606  // should stop slots recording entirely.
607  page->ClearEvacuationCandidate();
608 
609  // We were not collecting slots on this page that point
610  // to other evacuation candidates thus we have to
611  // rescan the page after evacuation to discover and update all
612  // pointers to evacuated objects.
613  if (page->owner()->identity() == OLD_DATA_SPACE) {
614  evacuation_candidates_.RemoveElement(page);
615  } else {
617  }
618  }
619 
620  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
621  void RecordCodeEntrySlot(Address slot, Code* target);
622  void RecordCodeTargetPatch(Address pc, Code* target);
623 
624  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
625 
626  void MigrateObject(Address dst,
627  Address src,
628  int size,
629  AllocationSpace to_old_space);
630 
631  bool TryPromoteObject(HeapObject* object, int object_size);
632 
633  inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
634  inline void set_encountered_weak_maps(Object* weak_map) {
635  encountered_weak_maps_ = weak_map;
636  }
637 
638  void InvalidateCode(Code* code);
639 
640  void ClearMarkbits();
641 
642  bool is_compacting() const { return compacting_; }
643 
644  private:
647 
648  bool MarkInvalidatedCode();
649  void RemoveDeadInvalidatedCode();
650  void ProcessInvalidatedCode(ObjectVisitor* visitor);
651 
652 
653 #ifdef DEBUG
654  enum CollectorState {
655  IDLE,
656  PREPARE_GC,
657  MARK_LIVE_OBJECTS,
658  SWEEP_SPACES,
659  ENCODE_FORWARDING_ADDRESSES,
660  UPDATE_POINTERS,
661  RELOCATE_OBJECTS
662  };
663 
664  // The current stage of the collector.
665  CollectorState state_;
666 #endif
667 
668  // Global flag that forces sweeping to be precise, so we can traverse the
669  // heap.
670  bool sweep_precisely_;
671 
672  bool reduce_memory_footprint_;
673 
674  bool abort_incremental_marking_;
675 
676  // True if we are collecting slots to perform evacuation from evacuation
677  // candidates.
678  bool compacting_;
679 
680  bool was_marked_incrementally_;
681 
682  // A pointer to the current stack-allocated GC tracer object during a full
683  // collection (NULL before and after).
684  GCTracer* tracer_;
685 
686  SlotsBufferAllocator slots_buffer_allocator_;
687 
688  SlotsBuffer* migration_slots_buffer_;
689 
690  // Finishes GC, performs heap verification if enabled.
691  void Finish();
692 
693  // -----------------------------------------------------------------------
694  // Phase 1: Marking live objects.
695  //
696  // Before: The heap has been prepared for garbage collection by
697  // MarkCompactCollector::Prepare() and is otherwise in its
698  // normal state.
699  //
700  // After: Live objects are marked and non-live objects are unmarked.
701 
702  friend class RootMarkingVisitor;
703  friend class MarkingVisitor;
705  friend class CodeMarkingVisitor;
707 
708  // Mark code objects that are active on the stack to prevent them
709  // from being flushed.
710  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
711 
712  void PrepareForCodeFlushing();
713 
714  // Marking operations for objects reachable from roots.
715  void MarkLiveObjects();
716 
717  void AfterMarking();
718 
719  // Marks the object black and pushes it on the marking stack.
720  // This is for non-incremental marking only.
721  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
722 
723  // Marks the object black assuming that it is not yet marked.
724  // This is for non-incremental marking only.
725  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
726 
727  // Mark the heap roots and all objects reachable from them.
728  void MarkRoots(RootMarkingVisitor* visitor);
729 
730  // Mark the symbol table specially. References to symbols from the
731  // symbol table are weak.
732  void MarkSymbolTable();
733 
734  // Mark objects in object groups that have at least one object in the
735  // group marked.
736  void MarkObjectGroups();
737 
738  // Mark objects in implicit references groups if their parent object
739  // is marked.
740  void MarkImplicitRefGroups();
741 
742  // Mark all objects which are reachable due to host application
743  // logic like object groups or implicit references' groups.
744  void ProcessExternalMarking();
745 
746  // Mark objects reachable (transitively) from objects in the marking stack
747  // or overflowed in the heap.
748  void ProcessMarkingDeque();
749 
750  // Mark objects reachable (transitively) from objects in the marking
751  // stack. This function empties the marking stack, but may leave
752  // overflowed objects in the heap, in which case the marking stack's
753  // overflow flag will be set.
754  void EmptyMarkingDeque();
755 
756  // Refill the marking stack with overflowed objects from the heap. This
757  // function either leaves the marking stack full or clears the overflow
758  // flag on the marking stack.
759  void RefillMarkingDeque();
760 
761  // After reachable maps have been marked process per context object
762  // literal map caches removing unmarked entries.
763  void ProcessMapCaches();
764 
765  // Callback function for telling whether the object *p is an unmarked
766  // heap object.
767  static bool IsUnmarkedHeapObject(Object** p);
768 
769  // Map transitions from a live map to a dead map must be killed.
770  // We replace them with a null descriptor, with the same key.
771  void ClearNonLiveTransitions();
772  void ClearNonLivePrototypeTransitions(Map* map);
773  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
774 
775  // Marking detaches initial maps from SharedFunctionInfo objects
776  // to make this reference weak. We need to reattach initial maps
777  // back after collection. This is either done during
778  // ClearNonLiveTransitions pass or by calling this function.
779  void ReattachInitialMaps();
780 
781  // Mark all values associated with reachable keys in weak maps encountered
782  // so far. This might push new object or even new weak maps onto the
783  // marking stack.
784  void ProcessWeakMaps();
785 
786  // After all reachable objects have been marked those weak map entries
787  // with an unreachable key are removed from all encountered weak maps.
788  // The linked list of all encountered weak maps is destroyed.
789  void ClearWeakMaps();
790 
791  // -----------------------------------------------------------------------
792  // Phase 2: Sweeping to clear mark bits and free non-live objects for
793  // a non-compacting collection.
794  //
795  // Before: Live objects are marked and non-live objects are unmarked.
796  //
797  // After: Live objects are unmarked, non-live regions have been added to
798  // their space's free list. Active eden semispace is compacted by
799  // evacuation.
800  //
801 
802  // If we are not compacting the heap, we simply sweep the spaces except
803  // for the large object space, clearing mark bits and adding unmarked
804  // regions to each space's free list.
805  void SweepSpaces();
806 
807  void EvacuateNewSpace();
808 
809  void EvacuateLiveObjectsFromPage(Page* p);
810 
811  void EvacuatePages();
812 
813  void EvacuateNewSpaceAndCandidates();
814 
815  void SweepSpace(PagedSpace* space, SweeperType sweeper);
816 
817 #ifdef DEBUG
818  friend class MarkObjectVisitor;
819  static void VisitObject(HeapObject* obj);
820 
821  friend class UnmarkObjectVisitor;
822  static void UnmarkObject(HeapObject* obj);
823 #endif
824 
825  Heap* heap_;
826  MarkingDeque marking_deque_;
827  CodeFlusher* code_flusher_;
828  Object* encountered_weak_maps_;
829 
830  List<Page*> evacuation_candidates_;
831  List<Code*> invalidated_code_;
832 
833  friend class Heap;
834 };
835 
836 
837 const char* AllocationSpaceName(AllocationSpace space);
838 
839 } } // namespace v8::internal
840 
841 #endif // V8_MARK_COMPACT_H_
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
static void BlackToGrey(HeapObject *obj)
Definition: mark-compact.h:104
void ClearEvacuationCandidate()
Definition: spaces.h:608
static const char * kGreyBitPattern
Definition: mark-compact.h:81
static bool IsTypedSlot(ObjectSlot slot)
uint32_t RoundDownToPowerOf2(uint32_t x)
Definition: utils.h:198
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void AddCandidate(JSFunction *function)
Definition: mark-compact.h:427
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
void Prepare(GCTracer *tracer)
static const uint32_t kSingleFreeEncoding
Definition: mark-compact.h:557
static void WhiteToGrey(MarkBit markbit)
Definition: mark-compact.h:95
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
static void BlackToGrey(MarkBit markbit)
Definition: mark-compact.h:91
static MarkBit MarkBitFrom(HeapObject *obj)
Definition: mark-compact.h:58
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
#define ASSERT(condition)
Definition: checks.h:270
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
bool StartCompaction(CompactionMode mode)
void UpdateSlotsWithFilter(Heap *heap)
static void AnyToGrey(MarkBit markbit)
Definition: mark-compact.h:108
static bool IsGrey(MarkBit mark_bit)
Definition: mark-compact.h:82
static const char * kBlackBitPattern
Definition: mark-compact.h:69
static bool IsWhite(MarkBit mark_bit)
Definition: mark-compact.h:76
void AddCandidate(SharedFunctionInfo *shared_info)
Definition: mark-compact.h:422
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
Definition: mark-compact.h:589
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:333
MarkBit Next()
Definition: spaces.h:143
static MarkBit MarkBitFrom(Address addr)
bool TryPromoteObject(HeapObject *object, int object_size)
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
Definition: mark-compact.h:148
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
Definition: mark-compact.h:584
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
#define UNREACHABLE()
Definition: checks.h:50
void(* EncodingFunction)(Heap *heap, HeapObject *old_object, int object_size, Object *new_object, int *offset)
Definition: mark-compact.h:505
SlotsBuffer(SlotsBuffer *next_buffer)
Definition: mark-compact.h:282
void SetFlag(int flag)
Definition: spaces.h:421
void PushBlack(HeapObject *object)
Definition: mark-compact.h:197
void Initialize(Address low, Address high)
Definition: mark-compact.h:175
void CollectEvacuationCandidates(PagedSpace *space)
static const uint32_t kMultiFreeEncoding
Definition: mark-compact.h:558
const Register pc
bool TransferMark(Address old_start, Address new_start)
static void MarkBlack(MarkBit mark_bit)
Definition: mark-compact.h:86
static void GreyToBlack(MarkBit markbit)
Definition: mark-compact.h:100
void DeallocateBuffer(SlotsBuffer *buffer)
void(* ProcessNonLiveFunction)(HeapObject *object, Isolate *isolate)
Definition: mark-compact.h:512
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static const int kNumberOfElements
Definition: mark-compact.h:394
static bool IsImpossible(MarkBit mark_bit)
Definition: mark-compact.h:64
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void DeallocateChain(SlotsBuffer **buffer_address)
void MigrateObject(Address dst, Address src, int size, AllocationSpace to_old_space)
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
Definition: mark-compact.h:40
void Add(ObjectSlot slot)
Definition: mark-compact.h:292
Space * owner() const
Definition: spaces.h:320
void set_encountered_weak_maps(Object *weak_map)
Definition: mark-compact.h:634
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:365
void RecordCodeTargetPatch(Address pc, Code *target)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const char * SlotTypeToString(SlotType type)
Definition: mark-compact.h:307
void EvictEvacuationCandidate(Page *page)
Definition: mark-compact.h:599
void PushGrey(HeapObject *object)
Definition: mark-compact.h:209
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
CodeFlusher(Isolate *isolate)
Definition: mark-compact.h:417
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:347
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
Definition: mark-compact.h:594
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode)
Definition: mark-compact.h:369
AllocationSpace identity()
Definition: spaces.h:788
void UnshiftGrey(HeapObject *object)
Definition: mark-compact.h:227
void RecordCodeEntrySlot(Address slot, Code *target)
int(MarkCompactCollector::* LiveObjectCallback)(HeapObject *obj)
Definition: mark-compact.h:515