v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
mark-compact.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_
30 
31 #include "compiler-intrinsics.h"
32 #include "spaces.h"
33 
34 namespace v8 {
35 namespace internal {
36 
37 // Callback function, returns whether an object is alive. The heap size
38 // of the object is returned in size. It optionally updates the offset
39 // to the first live object in the page (only used for old and map objects).
40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
41 
42 // Forward declarations.
43 class CodeFlusher;
44 class GCTracer;
46 class MarkingVisitor;
47 class RootMarkingVisitor;
48 
49 
50 class Marking {
51  public:
52  explicit Marking(Heap* heap)
53  : heap_(heap) {
54  }
55 
56  INLINE(static MarkBit MarkBitFrom(Address addr));
57 
58  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
59  return MarkBitFrom(reinterpret_cast<Address>(obj));
60  }
61 
62  // Impossible markbits: 01
63  static const char* kImpossibleBitPattern;
64  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
65  return !mark_bit.Get() && mark_bit.Next().Get();
66  }
67 
68  // Black markbits: 10 - this is required by the sweeper.
69  static const char* kBlackBitPattern;
70  INLINE(static bool IsBlack(MarkBit mark_bit)) {
71  return mark_bit.Get() && !mark_bit.Next().Get();
72  }
73 
74  // White markbits: 00 - this is required by the mark bit clearer.
75  static const char* kWhiteBitPattern;
76  INLINE(static bool IsWhite(MarkBit mark_bit)) {
77  return !mark_bit.Get();
78  }
79 
80  // Grey markbits: 11
81  static const char* kGreyBitPattern;
82  INLINE(static bool IsGrey(MarkBit mark_bit)) {
83  return mark_bit.Get() && mark_bit.Next().Get();
84  }
85 
86  INLINE(static void MarkBlack(MarkBit mark_bit)) {
87  mark_bit.Set();
88  mark_bit.Next().Clear();
89  }
90 
91  INLINE(static void BlackToGrey(MarkBit markbit)) {
92  markbit.Next().Set();
93  }
94 
95  INLINE(static void WhiteToGrey(MarkBit markbit)) {
96  markbit.Set();
97  markbit.Next().Set();
98  }
99 
100  INLINE(static void GreyToBlack(MarkBit markbit)) {
101  markbit.Next().Clear();
102  }
103 
104  INLINE(static void BlackToGrey(HeapObject* obj)) {
105  BlackToGrey(MarkBitFrom(obj));
106  }
107 
108  INLINE(static void AnyToGrey(MarkBit markbit)) {
109  markbit.Set();
110  markbit.Next().Set();
111  }
112 
113  void TransferMark(Address old_start, Address new_start);
114 
115 #ifdef DEBUG
116  enum ObjectColor {
117  BLACK_OBJECT,
118  WHITE_OBJECT,
119  GREY_OBJECT,
120  IMPOSSIBLE_COLOR
121  };
122 
123  static const char* ColorName(ObjectColor color) {
124  switch (color) {
125  case BLACK_OBJECT: return "black";
126  case WHITE_OBJECT: return "white";
127  case GREY_OBJECT: return "grey";
128  case IMPOSSIBLE_COLOR: return "impossible";
129  }
130  return "error";
131  }
132 
133  static ObjectColor Color(HeapObject* obj) {
134  return Color(Marking::MarkBitFrom(obj));
135  }
136 
137  static ObjectColor Color(MarkBit mark_bit) {
138  if (IsBlack(mark_bit)) return BLACK_OBJECT;
139  if (IsWhite(mark_bit)) return WHITE_OBJECT;
140  if (IsGrey(mark_bit)) return GREY_OBJECT;
141  UNREACHABLE();
142  return IMPOSSIBLE_COLOR;
143  }
144 #endif
145 
146  // Returns true if the transferred color is black.
147  INLINE(static bool TransferColor(HeapObject* from,
148  HeapObject* to)) {
149  MarkBit from_mark_bit = MarkBitFrom(from);
150  MarkBit to_mark_bit = MarkBitFrom(to);
151  bool is_black = false;
152  if (from_mark_bit.Get()) {
153  to_mark_bit.Set();
154  is_black = true; // Looks black so far.
155  }
156  if (from_mark_bit.Next().Get()) {
157  to_mark_bit.Next().Set();
158  is_black = false; // Was actually gray.
159  }
160  return is_black;
161  }
162 
163  private:
164  Heap* heap_;
165 };
166 
167 // ----------------------------------------------------------------------------
168 // Marking deque for tracing live objects.
170  public:
172  : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
173 
174  void Initialize(Address low, Address high) {
175  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
176  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
177  array_ = obj_low;
178  mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
179  top_ = bottom_ = 0;
180  overflowed_ = false;
181  }
182 
183  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
184 
185  inline bool IsEmpty() { return top_ == bottom_; }
186 
187  bool overflowed() const { return overflowed_; }
188 
189  void ClearOverflowed() { overflowed_ = false; }
190 
191  void SetOverflowed() { overflowed_ = true; }
192 
193  // Push the (marked) object on the marking stack if there is room,
194  // otherwise mark the object as overflowed and wait for a rescan of the
195  // heap.
196  INLINE(void PushBlack(HeapObject* object)) {
197  ASSERT(object->IsHeapObject());
198  if (IsFull()) {
199  Marking::BlackToGrey(object);
200  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
201  SetOverflowed();
202  } else {
203  array_[top_] = object;
204  top_ = ((top_ + 1) & mask_);
205  }
206  }
207 
208  INLINE(void PushGrey(HeapObject* object)) {
209  ASSERT(object->IsHeapObject());
210  if (IsFull()) {
211  SetOverflowed();
212  } else {
213  array_[top_] = object;
214  top_ = ((top_ + 1) & mask_);
215  }
216  }
217 
218  INLINE(HeapObject* Pop()) {
219  ASSERT(!IsEmpty());
220  top_ = ((top_ - 1) & mask_);
221  HeapObject* object = array_[top_];
222  ASSERT(object->IsHeapObject());
223  return object;
224  }
225 
226  INLINE(void UnshiftGrey(HeapObject* object)) {
227  ASSERT(object->IsHeapObject());
228  if (IsFull()) {
229  SetOverflowed();
230  } else {
231  bottom_ = ((bottom_ - 1) & mask_);
232  array_[bottom_] = object;
233  }
234  }
235 
236  HeapObject** array() { return array_; }
237  int bottom() { return bottom_; }
238  int top() { return top_; }
239  int mask() { return mask_; }
240  void set_top(int top) { top_ = top; }
241 
242  private:
243  HeapObject** array_;
244  // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
245  // empty when top_ == bottom_. It is full when top_ + 1 == bottom
246  // (mod mask + 1).
247  int top_;
248  int bottom_;
249  int mask_;
250  bool overflowed_;
251 
252  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
253 };
254 
255 
257  public:
258  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
259  void DeallocateBuffer(SlotsBuffer* buffer);
260 
261  void DeallocateChain(SlotsBuffer** buffer_address);
262 };
263 
264 
265 // SlotsBuffer records a sequence of slots that has to be updated
266 // after live objects were relocated from evacuation candidates.
267 // All slots are either untyped or typed:
268 // - Untyped slots are expected to contain a tagged object pointer.
269 // They are recorded by an address.
270 // - Typed slots are expected to contain an encoded pointer to a heap
271 // object where the way of encoding depends on the type of the slot.
272 // They are recorded as a pair (SlotType, slot address).
273 // We assume that zero-page is never mapped this allows us to distinguish
274 // untyped slots from typed slots during iteration by a simple comparison:
275 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
276 // is the first element of typed slot's pair.
277 class SlotsBuffer {
278  public:
279  typedef Object** ObjectSlot;
280 
281  explicit SlotsBuffer(SlotsBuffer* next_buffer)
282  : idx_(0), chain_length_(1), next_(next_buffer) {
283  if (next_ != NULL) {
284  chain_length_ = next_->chain_length_ + 1;
285  }
286  }
287 
289  }
290 
291  void Add(ObjectSlot slot) {
292  ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
293  slots_[idx_++] = slot;
294  }
295 
296  enum SlotType {
304  };
305 
306  static const char* SlotTypeToString(SlotType type) {
307  switch (type) {
309  return "EMBEDDED_OBJECT_SLOT";
311  return "RELOCATED_CODE_OBJECT";
312  case CODE_TARGET_SLOT:
313  return "CODE_TARGET_SLOT";
314  case CODE_ENTRY_SLOT:
315  return "CODE_ENTRY_SLOT";
316  case DEBUG_TARGET_SLOT:
317  return "DEBUG_TARGET_SLOT";
318  case JS_RETURN_SLOT:
319  return "JS_RETURN_SLOT";
321  return "NUMBER_OF_SLOT_TYPES";
322  }
323  return "UNKNOWN SlotType";
324  }
325 
326  void UpdateSlots(Heap* heap);
327 
328  void UpdateSlotsWithFilter(Heap* heap);
329 
330  SlotsBuffer* next() { return next_; }
331 
332  static int SizeOfChain(SlotsBuffer* buffer) {
333  if (buffer == NULL) return 0;
334  return static_cast<int>(buffer->idx_ +
335  (buffer->chain_length_ - 1) * kNumberOfElements);
336  }
337 
338  inline bool IsFull() {
339  return idx_ == kNumberOfElements;
340  }
341 
342  inline bool HasSpaceForTypedSlot() {
343  return idx_ < kNumberOfElements - 1;
344  }
345 
346  static void UpdateSlotsRecordedIn(Heap* heap,
347  SlotsBuffer* buffer,
348  bool code_slots_filtering_required) {
349  while (buffer != NULL) {
350  if (code_slots_filtering_required) {
351  buffer->UpdateSlotsWithFilter(heap);
352  } else {
353  buffer->UpdateSlots(heap);
354  }
355  buffer = buffer->next();
356  }
357  }
358 
362  };
363 
365  return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
366  }
367 
368  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
369  SlotsBuffer** buffer_address,
370  ObjectSlot slot,
371  AdditionMode mode)) {
372  SlotsBuffer* buffer = *buffer_address;
373  if (buffer == NULL || buffer->IsFull()) {
374  if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
375  allocator->DeallocateChain(buffer_address);
376  return false;
377  }
378  buffer = allocator->AllocateBuffer(buffer);
379  *buffer_address = buffer;
380  }
381  buffer->Add(slot);
382  return true;
383  }
384 
385  static bool IsTypedSlot(ObjectSlot slot);
386 
387  static bool AddTo(SlotsBufferAllocator* allocator,
388  SlotsBuffer** buffer_address,
389  SlotType type,
390  Address addr,
392 
393  static const int kNumberOfElements = 1021;
394 
395  private:
396  static const int kChainLengthThreshold = 15;
397 
398  intptr_t idx_;
399  intptr_t chain_length_;
400  SlotsBuffer* next_;
402 };
403 
404 
405 // CodeFlusher collects candidates for code flushing during marking and
406 // processes those candidates after marking has completed in order to
407 // reset those functions referencing code objects that would otherwise
408 // be unreachable. Code objects can be referenced in three ways:
409 // - SharedFunctionInfo references unoptimized code.
410 // - JSFunction references either unoptimized or optimized code.
411 // - OptimizedCodeMap references optimized code.
412 // We are not allowed to flush unoptimized code for functions that got
413 // optimized or inlined into optimized code, because we might bailout
414 // into the unoptimized code again during deoptimization.
415 class CodeFlusher {
416  public:
417  explicit CodeFlusher(Isolate* isolate)
418  : isolate_(isolate),
419  jsfunction_candidates_head_(NULL),
420  shared_function_info_candidates_head_(NULL),
421  optimized_code_map_holder_head_(NULL) {}
422 
423  void AddCandidate(SharedFunctionInfo* shared_info) {
424  if (GetNextCandidate(shared_info) == NULL) {
425  SetNextCandidate(shared_info, shared_function_info_candidates_head_);
426  shared_function_info_candidates_head_ = shared_info;
427  }
428  }
429 
430  void AddCandidate(JSFunction* function) {
431  ASSERT(function->code() == function->shared()->code());
432  if (GetNextCandidate(function)->IsUndefined()) {
433  SetNextCandidate(function, jsfunction_candidates_head_);
434  jsfunction_candidates_head_ = function;
435  }
436  }
437 
438  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
439  if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
440  SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
441  optimized_code_map_holder_head_ = code_map_holder;
442  }
443  }
444 
445  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
446  void EvictCandidate(SharedFunctionInfo* shared_info);
447  void EvictCandidate(JSFunction* function);
448 
450  ProcessOptimizedCodeMaps();
451  ProcessSharedFunctionInfoCandidates();
452  ProcessJSFunctionCandidates();
453  }
454 
456  EvictOptimizedCodeMaps();
457  EvictJSFunctionCandidates();
458  EvictSharedFunctionInfoCandidates();
459  }
460 
461  void IteratePointersToFromSpace(ObjectVisitor* v);
462 
463  private:
464  void ProcessOptimizedCodeMaps();
465  void ProcessJSFunctionCandidates();
466  void ProcessSharedFunctionInfoCandidates();
467  void EvictOptimizedCodeMaps();
468  void EvictJSFunctionCandidates();
469  void EvictSharedFunctionInfoCandidates();
470 
471  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
472  return reinterpret_cast<JSFunction**>(
474  }
475 
476  static JSFunction* GetNextCandidate(JSFunction* candidate) {
477  Object* next_candidate = candidate->next_function_link();
478  return reinterpret_cast<JSFunction*>(next_candidate);
479  }
480 
481  static void SetNextCandidate(JSFunction* candidate,
482  JSFunction* next_candidate) {
483  candidate->set_next_function_link(next_candidate);
484  }
485 
486  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
487  ASSERT(undefined->IsUndefined());
488  candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
489  }
490 
491  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
492  Object* next_candidate = candidate->code()->gc_metadata();
493  return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
494  }
495 
496  static void SetNextCandidate(SharedFunctionInfo* candidate,
497  SharedFunctionInfo* next_candidate) {
498  candidate->code()->set_gc_metadata(next_candidate);
499  }
500 
501  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
502  candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
503  }
504 
505  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
506  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
507  Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
508  return reinterpret_cast<SharedFunctionInfo*>(next_map);
509  }
510 
511  static void SetNextCodeMap(SharedFunctionInfo* holder,
512  SharedFunctionInfo* next_holder) {
513  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
514  code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
515  }
516 
517  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
518  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
520  }
521 
522  Isolate* isolate_;
523  JSFunction* jsfunction_candidates_head_;
524  SharedFunctionInfo* shared_function_info_candidates_head_;
525  SharedFunctionInfo* optimized_code_map_holder_head_;
526 
527  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
528 };
529 
530 
531 // Defined in isolate.h.
532 class ThreadLocalTop;
533 
534 
535 // -------------------------------------------------------------------------
536 // Mark-Compact collector
538  public:
539  // Type of functions to compute forwarding addresses of objects in
540  // compacted spaces. Given an object and its size, return a (non-failure)
541  // Object* that will be the object after forwarding. There is a separate
542  // allocation function for each (compactable) space based on the location
543  // of the object before compaction.
544  typedef MaybeObject* (*AllocationFunction)(Heap* heap,
545  HeapObject* object,
546  int object_size);
547 
548  // Type of functions to encode the forwarding address for an object.
549  // Given the object, its size, and the new (non-failure) object it will be
550  // forwarded to, encode the forwarding address. For paged spaces, the
551  // 'offset' input/output parameter contains the offset of the forwarded
552  // object from the forwarding address of the previous live object in the
553  // page as input, and is updated to contain the offset to be used for the
554  // next live object in the same page. For spaces using a different
555  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
556  typedef void (*EncodingFunction)(Heap* heap,
557  HeapObject* old_object,
558  int object_size,
559  Object* new_object,
560  int* offset);
561 
562  // Type of functions to process non-live objects.
563  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
564 
565  // Pointer to member function, used in IterateLiveObjects.
567 
568  // Set the global flags, it must be called before Prepare to take effect.
569  inline void SetFlags(int flags);
570 
571  static void Initialize();
572 
573  void SetUp();
574 
575  void TearDown();
576 
578 
579  void AddEvacuationCandidate(Page* p);
580 
581  // Prepares for GC by resetting relocation info in old and map spaces and
582  // choosing spaces to compact.
583  void Prepare(GCTracer* tracer);
584 
585  // Performs a global garbage collection.
586  void CollectGarbage();
587 
591  };
592 
594 
595  void AbortCompaction();
596 
597  // During a full GC, there is a stack-allocated GCTracer that is used for
598  // bookkeeping information. Return a pointer to that tracer.
599  GCTracer* tracer() { return tracer_; }
600 
601 #ifdef DEBUG
602  // Checks whether performing mark-compact collection.
603  bool in_use() { return state_ > PREPARE_GC; }
604  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
605 #endif
606 
607  // Determine type of object and emit deletion log event.
608  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
609 
610  // Distinguishable invalid map encodings (for single word and multiple words)
611  // that indicate free regions.
612  static const uint32_t kSingleFreeEncoding = 0;
613  static const uint32_t kMultiFreeEncoding = 1;
614 
615  static inline bool IsMarked(Object* obj);
616 
617  inline Heap* heap() const { return heap_; }
618  inline Isolate* isolate() const;
619 
620  CodeFlusher* code_flusher() { return code_flusher_; }
621  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
622  void EnableCodeFlushing(bool enable);
623 
624  enum SweeperType {
630  };
631 
635  };
636 
637 #ifdef VERIFY_HEAP
638  void VerifyMarkbitsAreClean();
639  static void VerifyMarkbitsAreClean(PagedSpace* space);
640  static void VerifyMarkbitsAreClean(NewSpace* space);
641  void VerifyWeakEmbeddedObjectsInOptimizedCode();
642  void VerifyOmittedMapChecks();
643 #endif
644 
645  // Sweep a single page from the given space conservatively.
646  // Return a number of reclaimed bytes.
647  template<SweepingParallelism type>
648  static intptr_t SweepConservatively(PagedSpace* space,
649  FreeList* free_list,
650  Page* p);
651 
652  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
653  return Page::FromAddress(reinterpret_cast<Address>(anchor))->
654  ShouldSkipEvacuationSlotRecording();
655  }
656 
657  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
658  return Page::FromAddress(reinterpret_cast<Address>(host))->
659  ShouldSkipEvacuationSlotRecording();
660  }
661 
662  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
663  return Page::FromAddress(reinterpret_cast<Address>(obj))->
664  IsEvacuationCandidate();
665  }
666 
667  INLINE(void EvictEvacuationCandidate(Page* page)) {
668  if (FLAG_trace_fragmentation) {
669  PrintF("Page %p is too popular. Disabling evacuation.\n",
670  reinterpret_cast<void*>(page));
671  }
672 
673  // TODO(gc) If all evacuation candidates are too popular we
674  // should stop slots recording entirely.
675  page->ClearEvacuationCandidate();
676 
677  // We were not collecting slots on this page that point
678  // to other evacuation candidates thus we have to
679  // rescan the page after evacuation to discover and update all
680  // pointers to evacuated objects.
681  if (page->owner()->identity() == OLD_DATA_SPACE) {
682  evacuation_candidates_.RemoveElement(page);
683  } else {
685  }
686  }
687 
688  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
689  void RecordCodeEntrySlot(Address slot, Code* target);
690  void RecordCodeTargetPatch(Address pc, Code* target);
691 
692  INLINE(void RecordSlot(Object** anchor_slot,
693  Object** slot,
694  Object* object,
697 
698  void MigrateObject(HeapObject* dst,
699  HeapObject* src,
700  int size,
701  AllocationSpace to_old_space);
702 
703  bool TryPromoteObject(HeapObject* object, int object_size);
704 
706  return encountered_weak_collections_;
707  }
708  inline void set_encountered_weak_collections(Object* weak_collection) {
709  encountered_weak_collections_ = weak_collection;
710  }
711 
712  void InvalidateCode(Code* code);
713 
714  void ClearMarkbits();
715 
716  bool abort_incremental_marking() const { return abort_incremental_marking_; }
717 
718  bool is_compacting() const { return compacting_; }
719 
720  MarkingParity marking_parity() { return marking_parity_; }
721 
722  // Concurrent and parallel sweeping support.
723  void SweepInParallel(PagedSpace* space);
724 
726 
727  intptr_t RefillFreeLists(PagedSpace* space);
728 
730 
732 
734  sequential_sweeping_ = sequential_sweeping;
735  }
736 
737  bool sequential_sweeping() const {
738  return sequential_sweeping_;
739  }
740 
741  // Mark the global table which maps weak objects to dependent code without
742  // marking its contents.
744 
745  // Special case for processing weak references in a full collection. We need
746  // to artifically keep AllocationSites alive for a time.
748 
749  private:
750  class SweeperTask;
751 
752  explicit MarkCompactCollector(Heap* heap);
754 
755  bool MarkInvalidatedCode();
756  bool WillBeDeoptimized(Code* code);
757  void RemoveDeadInvalidatedCode();
758  void ProcessInvalidatedCode(ObjectVisitor* visitor);
759 
760  void UnlinkEvacuationCandidates();
761  void ReleaseEvacuationCandidates();
762 
763  void StartSweeperThreads();
764 
765 #ifdef DEBUG
766  enum CollectorState {
767  IDLE,
768  PREPARE_GC,
769  MARK_LIVE_OBJECTS,
770  SWEEP_SPACES,
771  ENCODE_FORWARDING_ADDRESSES,
772  UPDATE_POINTERS,
773  RELOCATE_OBJECTS
774  };
775 
776  // The current stage of the collector.
777  CollectorState state_;
778 #endif
779 
780  // Global flag that forces sweeping to be precise, so we can traverse the
781  // heap.
782  bool sweep_precisely_;
783 
784  bool reduce_memory_footprint_;
785 
786  bool abort_incremental_marking_;
787 
788  MarkingParity marking_parity_;
789 
790  // True if we are collecting slots to perform evacuation from evacuation
791  // candidates.
792  bool compacting_;
793 
794  bool was_marked_incrementally_;
795 
796  // True if concurrent or parallel sweeping is currently in progress.
797  bool sweeping_pending_;
798 
799  Semaphore pending_sweeper_jobs_semaphore_;
800 
801  bool sequential_sweeping_;
802 
803  // A pointer to the current stack-allocated GC tracer object during a full
804  // collection (NULL before and after).
805  GCTracer* tracer_;
806 
807  SlotsBufferAllocator slots_buffer_allocator_;
808 
809  SlotsBuffer* migration_slots_buffer_;
810 
811  // Finishes GC, performs heap verification if enabled.
812  void Finish();
813 
814  // -----------------------------------------------------------------------
815  // Phase 1: Marking live objects.
816  //
817  // Before: The heap has been prepared for garbage collection by
818  // MarkCompactCollector::Prepare() and is otherwise in its
819  // normal state.
820  //
821  // After: Live objects are marked and non-live objects are unmarked.
822 
823  friend class RootMarkingVisitor;
824  friend class MarkingVisitor;
826  friend class CodeMarkingVisitor;
828 
829  // Mark code objects that are active on the stack to prevent them
830  // from being flushed.
831  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
832 
833  void PrepareForCodeFlushing();
834 
835  // Marking operations for objects reachable from roots.
836  void MarkLiveObjects();
837 
838  void AfterMarking();
839 
840  // Marks the object black and pushes it on the marking stack.
841  // This is for non-incremental marking only.
842  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
843 
844  // Marks the object black assuming that it is not yet marked.
845  // This is for non-incremental marking only.
846  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
847 
848  // Mark the heap roots and all objects reachable from them.
849  void MarkRoots(RootMarkingVisitor* visitor);
850 
851  // Mark the string table specially. References to internalized strings from
852  // the string table are weak.
853  void MarkStringTable(RootMarkingVisitor* visitor);
854 
855  // Mark objects in implicit references groups if their parent object
856  // is marked.
857  void MarkImplicitRefGroups();
858 
859  // Mark objects reachable (transitively) from objects in the marking stack
860  // or overflowed in the heap.
861  void ProcessMarkingDeque();
862 
863  // Mark objects reachable (transitively) from objects in the marking stack
864  // or overflowed in the heap. This respects references only considered in
865  // the final atomic marking pause including the following:
866  // - Processing of objects reachable through Harmony WeakMaps.
867  // - Objects reachable due to host application logic like object groups
868  // or implicit references' groups.
869  void ProcessEphemeralMarking(ObjectVisitor* visitor);
870 
871  // If the call-site of the top optimized code was not prepared for
872  // deoptimization, then treat the maps in the code as strong pointers,
873  // otherwise a map can die and deoptimize the code.
874  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
875 
876  // Mark objects reachable (transitively) from objects in the marking
877  // stack. This function empties the marking stack, but may leave
878  // overflowed objects in the heap, in which case the marking stack's
879  // overflow flag will be set.
880  void EmptyMarkingDeque();
881 
882  // Refill the marking stack with overflowed objects from the heap. This
883  // function either leaves the marking stack full or clears the overflow
884  // flag on the marking stack.
885  void RefillMarkingDeque();
886 
887  // After reachable maps have been marked process per context object
888  // literal map caches removing unmarked entries.
889  void ProcessMapCaches();
890 
891  // Callback function for telling whether the object *p is an unmarked
892  // heap object.
893  static bool IsUnmarkedHeapObject(Object** p);
894  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
895 
896  // Map transitions from a live map to a dead map must be killed.
897  // We replace them with a null descriptor, with the same key.
898  void ClearNonLiveReferences();
899  void ClearNonLivePrototypeTransitions(Map* map);
900  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
901 
902  void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
903  void ClearNonLiveDependentCode(DependentCode* dependent_code);
904 
905  // Marking detaches initial maps from SharedFunctionInfo objects
906  // to make this reference weak. We need to reattach initial maps
907  // back after collection. This is either done during
908  // ClearNonLiveTransitions pass or by calling this function.
909  void ReattachInitialMaps();
910 
911  // Mark all values associated with reachable keys in weak collections
912  // encountered so far. This might push new object or even new weak maps onto
913  // the marking stack.
914  void ProcessWeakCollections();
915 
916  // After all reachable objects have been marked those weak map entries
917  // with an unreachable key are removed from all encountered weak maps.
918  // The linked list of all encountered weak maps is destroyed.
919  void ClearWeakCollections();
920 
921  // -----------------------------------------------------------------------
922  // Phase 2: Sweeping to clear mark bits and free non-live objects for
923  // a non-compacting collection.
924  //
925  // Before: Live objects are marked and non-live objects are unmarked.
926  //
927  // After: Live objects are unmarked, non-live regions have been added to
928  // their space's free list. Active eden semispace is compacted by
929  // evacuation.
930  //
931 
932  // If we are not compacting the heap, we simply sweep the spaces except
933  // for the large object space, clearing mark bits and adding unmarked
934  // regions to each space's free list.
935  void SweepSpaces();
936 
937  int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
938  NewSpacePage* p);
939 
940  void EvacuateNewSpace();
941 
942  void EvacuateLiveObjectsFromPage(Page* p);
943 
944  void EvacuatePages();
945 
946  void EvacuateNewSpaceAndCandidates();
947 
948  void SweepSpace(PagedSpace* space, SweeperType sweeper);
949 
950  // Finalizes the parallel sweeping phase. Marks all the pages that were
951  // swept in parallel.
952  void ParallelSweepSpacesComplete();
953 
954  void ParallelSweepSpaceComplete(PagedSpace* space);
955 
956 #ifdef DEBUG
957  friend class MarkObjectVisitor;
958  static void VisitObject(HeapObject* obj);
959 
960  friend class UnmarkObjectVisitor;
961  static void UnmarkObject(HeapObject* obj);
962 #endif
963 
964  Heap* heap_;
965  MarkingDeque marking_deque_;
966  CodeFlusher* code_flusher_;
967  Object* encountered_weak_collections_;
968  bool have_code_to_deoptimize_;
969 
970  List<Page*> evacuation_candidates_;
971  List<Code*> invalidated_code_;
972 
973  SmartPointer<FreeList> free_list_old_data_space_;
974  SmartPointer<FreeList> free_list_old_pointer_space_;
975 
976  friend class Heap;
977 };
978 
979 
980 class MarkBitCellIterator BASE_EMBEDDED {
981  public:
983  : chunk_(chunk) {
984  last_cell_index_ = Bitmap::IndexToCell(
985  Bitmap::CellAlignIndex(
986  chunk_->AddressToMarkbitIndex(chunk_->area_end())));
987  cell_base_ = chunk_->area_start();
988  cell_index_ = Bitmap::IndexToCell(
989  Bitmap::CellAlignIndex(
990  chunk_->AddressToMarkbitIndex(cell_base_)));
991  cells_ = chunk_->markbits()->cells();
992  }
993 
994  inline bool Done() { return cell_index_ == last_cell_index_; }
995 
996  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
997 
999  ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
1000  chunk_->AddressToMarkbitIndex(cell_base_))));
1001  return &cells_[cell_index_];
1002  }
1003 
1005  ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
1006  chunk_->AddressToMarkbitIndex(cell_base_))));
1007  return cell_base_;
1008  }
1009 
1010  inline void Advance() {
1011  cell_index_++;
1012  cell_base_ += 32 * kPointerSize;
1013  }
1014 
1015  private:
1016  MemoryChunk* chunk_;
1017  MarkBit::CellType* cells_;
1018  unsigned int last_cell_index_;
1019  unsigned int cell_index_;
1020  Address cell_base_;
1021 };
1022 
1023 
1024 class SequentialSweepingScope BASE_EMBEDDED {
1025  public:
1027  collector_(collector) {
1028  collector_->set_sequential_sweeping(true);
1029  }
1030 
1032  collector_->set_sequential_sweeping(false);
1033  }
1034 
1035  private:
1036  MarkCompactCollector* collector_;
1037 };
1038 
1039 
1040 const char* AllocationSpaceName(AllocationSpace space);
1041 
1042 } } // namespace v8::internal
1043 
1044 #endif // V8_MARK_COMPACT_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void ClearEvacuationCandidate()
Definition: spaces.h:684
INLINE(static void GreyToBlack(MarkBit markbit))
Definition: mark-compact.h:100
static const char * kGreyBitPattern
Definition: mark-compact.h:81
static bool IsTypedSlot(ObjectSlot slot)
void EvictCandidate(SharedFunctionInfo *shared_info)
INLINE(static void AnyToGrey(MarkBit markbit))
Definition: mark-compact.h:108
INLINE(static void WhiteToGrey(MarkBit markbit))
Definition: mark-compact.h:95
uint32_t RoundDownToPowerOf2(uint32_t x)
Definition: utils.h:203
void set(int index, Object *value)
Definition: objects-inl.h:2147
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
void AddCandidate(JSFunction *function)
Definition: mark-compact.h:430
INLINE(static bool IsGrey(MarkBit mark_bit))
Definition: mark-compact.h:82
static const char * kWhiteBitPattern
Definition: mark-compact.h:75
void Prepare(GCTracer *tracer)
static const uint32_t kSingleFreeEncoding
Definition: mark-compact.h:612
INLINE(static bool IsWhite(MarkBit mark_bit))
Definition: mark-compact.h:76
void AddOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
Definition: mark-compact.h:438
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
INLINE(static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode))
Definition: mark-compact.h:368
kSerializedDataOffset Object
Definition: objects-inl.h:5016
const char * AllocationSpaceName(AllocationSpace space)
void UpdateSlots(Heap *heap)
#define ASSERT(condition)
Definition: checks.h:329
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:557
bool StartCompaction(CompactionMode mode)
void UpdateSlotsWithFilter(Heap *heap)
static const char * kBlackBitPattern
Definition: mark-compact.h:69
void AddCandidate(SharedFunctionInfo *shared_info)
Definition: mark-compact.h:423
INLINE(static bool IsBlack(MarkBit mark_bit))
Definition: mark-compact.h:70
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
Definition: mark-compact.h:657
uint32_t CellType
Definition: spaces.h:125
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1199
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:332
MarkBit Next()
Definition: spaces.h:145
bool TryPromoteObject(HeapObject *object, int object_size)
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
Definition: mark-compact.h:147
void set_encountered_weak_collections(Object *weak_collection)
Definition: mark-compact.h:708
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
Definition: mark-compact.h:652
static bool IsMarked(Object *obj)
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
#define UNREACHABLE()
Definition: checks.h:52
void(* EncodingFunction)(Heap *heap, HeapObject *old_object, int object_size, Object *new_object, int *offset)
Definition: mark-compact.h:556
SlotsBuffer(SlotsBuffer *next_buffer)
Definition: mark-compact.h:281
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
void set_undefined(int index)
Definition: objects-inl.h:2394
void SetFlag(int flag)
Definition: spaces.h:440
void Initialize(Address low, Address high)
Definition: mark-compact.h:174
void CollectEvacuationCandidates(PagedSpace *space)
const int kPointerSize
Definition: globals.h:268
MarkBit::CellType * CurrentCell()
Definition: mark-compact.h:998
INLINE(void PushBlack(HeapObject *object))
Definition: mark-compact.h:196
static const uint32_t kMultiFreeEncoding
Definition: mark-compact.h:613
INLINE(HeapObject *Pop())
Definition: mark-compact.h:218
INLINE(void PushGrey(HeapObject *object))
Definition: mark-compact.h:208
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register pc
void IteratePointersToFromSpace(ObjectVisitor *v)
void DeallocateBuffer(SlotsBuffer *buffer)
static const int kNextFunctionLinkOffset
Definition: objects.h:7526
#define BASE_EMBEDDED
Definition: allocation.h:68
INLINE(static void BlackToGrey(HeapObject *obj))
Definition: mark-compact.h:104
INLINE(static MarkBit MarkBitFrom(HeapObject *obj))
Definition: mark-compact.h:58
INLINE(void EvictEvacuationCandidate(Page *page))
Definition: mark-compact.h:667
void(* ProcessNonLiveFunction)(HeapObject *object, Isolate *isolate)
Definition: mark-compact.h:563
static const int kNumberOfElements
Definition: mark-compact.h:393
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
INLINE(static void BlackToGrey(MarkBit markbit))
Definition: mark-compact.h:91
void DeallocateChain(SlotsBuffer **buffer_address)
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
Definition: mark-compact.h:40
void Add(ObjectSlot slot)
Definition: mark-compact.h:291
Space * owner() const
Definition: spaces.h:332
void MarkAllocationSite(AllocationSite *site)
INLINE(static bool IsImpossible(MarkBit mark_bit))
Definition: mark-compact.h:64
INLINE(static void MarkBlack(MarkBit mark_bit))
Definition: mark-compact.h:86
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:364
MarkBitCellIterator(MemoryChunk *chunk)
Definition: mark-compact.h:982
SequentialSweepingScope(MarkCompactCollector *collector)
intptr_t RefillFreeLists(PagedSpace *space)
void set_sequential_sweeping(bool sequential_sweeping)
Definition: mark-compact.h:733
static FixedArray * cast(Object *obj)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
Definition: objects-inl.h:5047
void RecordCodeTargetPatch(Address pc, Code *target)
static const int kNextMapIndex
Definition: objects.h:6741
static const char * SlotTypeToString(SlotType type)
Definition: mark-compact.h:306
HeapObject * obj
static const char * kImpossibleBitPattern
Definition: mark-compact.h:63
CodeFlusher(Isolate *isolate)
Definition: mark-compact.h:417
static intptr_t SweepConservatively(PagedSpace *space, FreeList *free_list, Page *p)
INLINE(void UnshiftGrey(HeapObject *object))
Definition: mark-compact.h:226
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:346
INLINE(static MarkBit MarkBitFrom(Address addr))
void SweepInParallel(PagedSpace *space)
void TransferMark(Address old_start, Address new_start)
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
Definition: mark-compact.h:662
AllocationSpace identity()
Definition: spaces.h:906
void RecordCodeEntrySlot(Address slot, Code *target)
int(MarkCompactCollector::* LiveObjectCallback)(HeapObject *obj)
Definition: mark-compact.h:566