v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
heap.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "deoptimizer.h"
37 #include "global-handles.h"
38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "liveobjectlist-inl.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
60 #endif
61 
62 namespace v8 {
63 namespace internal {
64 
65 
66 Heap::Heap()
67  : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if defined(V8_TARGET_ARCH_X64)
71 #define LUMP_OF_MEMORY (2 * MB)
72  code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75  code_range_size_(0),
76 #endif
77 #if defined(ANDROID)
78  reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79  max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80  initial_semispace_size_(Page::kPageSize),
81  max_old_generation_size_(192*MB),
82  max_executable_size_(max_old_generation_size_),
83 #else
84  reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85  max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86  initial_semispace_size_(Page::kPageSize),
87  max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88  max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90 
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95  survived_since_last_expansion_(0),
96  sweep_generation_(0),
97  always_allocate_scope_depth_(0),
98  linear_allocation_scope_depth_(0),
99  contexts_disposed_(0),
100  global_ic_age_(0),
101  flush_monomorphic_ics_(false),
102  scan_on_scavenge_pages_(0),
103  new_space_(this),
104  old_pointer_space_(NULL),
105  old_data_space_(NULL),
106  code_space_(NULL),
107  map_space_(NULL),
108  cell_space_(NULL),
109  lo_space_(NULL),
110  gc_state_(NOT_IN_GC),
111  gc_post_processing_depth_(0),
112  ms_count_(0),
113  gc_count_(0),
114  remembered_unmapped_pages_index_(0),
115  unflattened_strings_length_(0),
116 #ifdef DEBUG
117  allocation_allowed_(true),
118  allocation_timeout_(0),
119  disallow_allocation_failure_(false),
120  debug_utils_(NULL),
121 #endif // DEBUG
122  new_space_high_promotion_mode_active_(false),
123  old_gen_promotion_limit_(kMinimumPromotionLimit),
124  old_gen_allocation_limit_(kMinimumAllocationLimit),
125  old_gen_limit_factor_(1),
126  size_of_old_gen_at_last_old_space_gc_(0),
127  external_allocation_limit_(0),
128  amount_of_external_allocated_memory_(0),
129  amount_of_external_allocated_memory_at_last_global_gc_(0),
130  old_gen_exhausted_(false),
131  store_buffer_rebuilder_(store_buffer()),
132  hidden_symbol_(NULL),
133  global_gc_prologue_callback_(NULL),
134  global_gc_epilogue_callback_(NULL),
135  gc_safe_size_of_old_object_(NULL),
136  total_regexp_code_generated_(0),
137  tracer_(NULL),
138  young_survivors_after_last_gc_(0),
139  high_survival_rate_period_length_(0),
140  survival_rate_(0),
141  previous_survival_rate_trend_(Heap::STABLE),
142  survival_rate_trend_(Heap::STABLE),
143  max_gc_pause_(0),
144  total_gc_time_ms_(0),
145  max_alive_after_gc_(0),
146  min_in_mutator_(kMaxInt),
147  alive_after_last_gc_(0),
148  last_gc_end_timestamp_(0.0),
149  store_buffer_(this),
150  marking_(this),
151  incremental_marking_(this),
152  number_idle_notifications_(0),
153  last_idle_notification_gc_count_(0),
154  last_idle_notification_gc_count_init_(false),
155  mark_sweeps_since_idle_round_started_(0),
156  ms_count_at_last_idle_notification_(0),
157  gc_count_at_last_idle_gc_(0),
158  scavenges_since_last_idle_round_(kIdleScavengeThreshold),
159  promotion_queue_(this),
160  configured_(false),
161  chunks_queued_for_free_(NULL),
162  relocation_mutex_(NULL) {
163  // Allow build-time customization of the max semispace size. Building
164  // V8 with snapshots and a non-default max semispace size is much
165  // easier if you can define it as part of the build environment.
166 #if defined(V8_MAX_SEMISPACE_SIZE)
167  max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
168 #endif
169 
170  intptr_t max_virtual = OS::MaxVirtualMemory();
171 
172  if (max_virtual > 0) {
173  if (code_range_size_ > 0) {
174  // Reserve no more than 1/8 of the memory for the code range.
175  code_range_size_ = Min(code_range_size_, max_virtual >> 3);
176  }
177  }
178 
179  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
180  native_contexts_list_ = NULL;
181  mark_compact_collector_.heap_ = this;
182  external_string_table_.heap_ = this;
183  // Put a dummy entry in the remembered pages so we can find the list the
184  // minidump even if there are no real unmapped pages.
185  RememberUnmappedPage(NULL, false);
186 
187  ClearObjectStats(true);
188 }
189 
190 
191 intptr_t Heap::Capacity() {
192  if (!HasBeenSetUp()) return 0;
193 
194  return new_space_.Capacity() +
195  old_pointer_space_->Capacity() +
196  old_data_space_->Capacity() +
197  code_space_->Capacity() +
198  map_space_->Capacity() +
199  cell_space_->Capacity();
200 }
201 
202 
204  if (!HasBeenSetUp()) return 0;
205 
206  return new_space_.CommittedMemory() +
207  old_pointer_space_->CommittedMemory() +
208  old_data_space_->CommittedMemory() +
209  code_space_->CommittedMemory() +
210  map_space_->CommittedMemory() +
211  cell_space_->CommittedMemory() +
212  lo_space_->Size();
213 }
214 
216  if (!HasBeenSetUp()) return 0;
217 
218  return isolate()->memory_allocator()->SizeExecutable();
219 }
220 
221 
222 intptr_t Heap::Available() {
223  if (!HasBeenSetUp()) return 0;
224 
225  return new_space_.Available() +
226  old_pointer_space_->Available() +
227  old_data_space_->Available() +
228  code_space_->Available() +
229  map_space_->Available() +
230  cell_space_->Available();
231 }
232 
233 
235  return old_pointer_space_ != NULL &&
236  old_data_space_ != NULL &&
237  code_space_ != NULL &&
238  map_space_ != NULL &&
239  cell_space_ != NULL &&
240  lo_space_ != NULL;
241 }
242 
243 
244 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
245  if (IntrusiveMarking::IsMarked(object)) {
247  }
248  return object->SizeFromMap(object->map());
249 }
250 
251 
252 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
253  const char** reason) {
254  // Is global GC requested?
255  if (space != NEW_SPACE) {
256  isolate_->counters()->gc_compactor_caused_by_request()->Increment();
257  *reason = "GC in old space requested";
258  return MARK_COMPACTOR;
259  }
260 
261  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
262  *reason = "GC in old space forced by flags";
263  return MARK_COMPACTOR;
264  }
265 
266  // Is enough data promoted to justify a global GC?
268  isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
269  *reason = "promotion limit reached";
270  return MARK_COMPACTOR;
271  }
272 
273  // Have allocation in OLD and LO failed?
274  if (old_gen_exhausted_) {
275  isolate_->counters()->
276  gc_compactor_caused_by_oldspace_exhaustion()->Increment();
277  *reason = "old generations exhausted";
278  return MARK_COMPACTOR;
279  }
280 
281  // Is there enough space left in OLD to guarantee that a scavenge can
282  // succeed?
283  //
284  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
285  // for object promotion. It counts only the bytes that the memory
286  // allocator has not yet allocated from the OS and assigned to any space,
287  // and does not count available bytes already in the old space or code
288  // space. Undercounting is safe---we may get an unrequested full GC when
289  // a scavenge would have succeeded.
290  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
291  isolate_->counters()->
292  gc_compactor_caused_by_oldspace_exhaustion()->Increment();
293  *reason = "scavenge might not succeed";
294  return MARK_COMPACTOR;
295  }
296 
297  // Default
298  *reason = NULL;
299  return SCAVENGER;
300 }
301 
302 
303 // TODO(1238405): Combine the infrastructure for --heap-stats and
304 // --log-gc to avoid the complicated preprocessor and flag testing.
305 void Heap::ReportStatisticsBeforeGC() {
306  // Heap::ReportHeapStatistics will also log NewSpace statistics when
307  // compiled --log-gc is set. The following logic is used to avoid
308  // double logging.
309 #ifdef DEBUG
310  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
311  if (FLAG_heap_stats) {
312  ReportHeapStatistics("Before GC");
313  } else if (FLAG_log_gc) {
314  new_space_.ReportStatistics();
315  }
316  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
317 #else
318  if (FLAG_log_gc) {
319  new_space_.CollectStatistics();
320  new_space_.ReportStatistics();
321  new_space_.ClearHistograms();
322  }
323 #endif // DEBUG
324 }
325 
326 
328  if (!FLAG_trace_gc_verbose) return;
329  PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
330  ", available: %6" V8_PTR_PREFIX "d KB\n",
331  isolate_->memory_allocator()->Size() / KB,
332  isolate_->memory_allocator()->Available() / KB);
333  PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
334  ", available: %6" V8_PTR_PREFIX "d KB"
335  ", committed: %6" V8_PTR_PREFIX "d KB\n",
336  new_space_.Size() / KB,
337  new_space_.Available() / KB,
338  new_space_.CommittedMemory() / KB);
339  PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
340  ", available: %6" V8_PTR_PREFIX "d KB"
341  ", committed: %6" V8_PTR_PREFIX "d KB\n",
342  old_pointer_space_->SizeOfObjects() / KB,
343  old_pointer_space_->Available() / KB,
344  old_pointer_space_->CommittedMemory() / KB);
345  PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
346  ", available: %6" V8_PTR_PREFIX "d KB"
347  ", committed: %6" V8_PTR_PREFIX "d KB\n",
348  old_data_space_->SizeOfObjects() / KB,
349  old_data_space_->Available() / KB,
350  old_data_space_->CommittedMemory() / KB);
351  PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
352  ", available: %6" V8_PTR_PREFIX "d KB"
353  ", committed: %6" V8_PTR_PREFIX "d KB\n",
354  code_space_->SizeOfObjects() / KB,
355  code_space_->Available() / KB,
356  code_space_->CommittedMemory() / KB);
357  PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
358  ", available: %6" V8_PTR_PREFIX "d KB"
359  ", committed: %6" V8_PTR_PREFIX "d KB\n",
360  map_space_->SizeOfObjects() / KB,
361  map_space_->Available() / KB,
362  map_space_->CommittedMemory() / KB);
363  PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
364  ", available: %6" V8_PTR_PREFIX "d KB"
365  ", committed: %6" V8_PTR_PREFIX "d KB\n",
366  cell_space_->SizeOfObjects() / KB,
367  cell_space_->Available() / KB,
368  cell_space_->CommittedMemory() / KB);
369  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
370  ", available: %6" V8_PTR_PREFIX "d KB"
371  ", committed: %6" V8_PTR_PREFIX "d KB\n",
372  lo_space_->SizeOfObjects() / KB,
373  lo_space_->Available() / KB,
374  lo_space_->CommittedMemory() / KB);
375  PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
376  ", available: %6" V8_PTR_PREFIX "d KB"
377  ", committed: %6" V8_PTR_PREFIX "d KB\n",
378  this->SizeOfObjects() / KB,
379  this->Available() / KB,
380  this->CommittedMemory() / KB);
381  PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
382 }
383 
384 
385 // TODO(1238405): Combine the infrastructure for --heap-stats and
386 // --log-gc to avoid the complicated preprocessor and flag testing.
387 void Heap::ReportStatisticsAfterGC() {
388  // Similar to the before GC, we use some complicated logic to ensure that
389  // NewSpace statistics are logged exactly once when --log-gc is turned on.
390 #if defined(DEBUG)
391  if (FLAG_heap_stats) {
392  new_space_.CollectStatistics();
393  ReportHeapStatistics("After GC");
394  } else if (FLAG_log_gc) {
395  new_space_.ReportStatistics();
396  }
397 #else
398  if (FLAG_log_gc) new_space_.ReportStatistics();
399 #endif // DEBUG
400 }
401 
402 
404  isolate_->transcendental_cache()->Clear();
406  gc_count_++;
407  unflattened_strings_length_ = 0;
408 
409 #ifdef VERIFY_HEAP
410  if (FLAG_verify_heap) {
411  Verify();
412  }
413 #endif
414 
415 #ifdef DEBUG
416  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
417  allow_allocation(false);
418 
419  if (FLAG_gc_verbose) Print();
420 
421  ReportStatisticsBeforeGC();
422 #endif // DEBUG
423 
426 }
427 
428 
430  intptr_t total = 0;
431  AllSpaces spaces;
432  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
433  total += space->SizeOfObjects();
434  }
435  return total;
436 }
437 
438 
440  PagedSpaces spaces;
441  for (PagedSpace* space = spaces.next();
442  space != NULL;
443  space = spaces.next()) {
444  space->RepairFreeListsAfterBoot();
445  }
446 }
447 
448 
452 
453  // In release mode, we only zap the from space under heap verification.
454  if (Heap::ShouldZapGarbage()) {
455  ZapFromSpace();
456  }
457 
458 #ifdef VERIFY_HEAP
459  if (FLAG_verify_heap) {
460  Verify();
461  }
462 #endif
463 
464 #ifdef DEBUG
465  allow_allocation(true);
466  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
467  if (FLAG_print_handles) PrintHandles();
468  if (FLAG_gc_verbose) Print();
469  if (FLAG_code_stats) ReportCodeStatistics("After GC");
470 #endif
471 
472  isolate_->counters()->alive_after_last_gc()->Set(
473  static_cast<int>(SizeOfObjects()));
474 
475  isolate_->counters()->symbol_table_capacity()->Set(
476  symbol_table()->Capacity());
477  isolate_->counters()->number_of_symbols()->Set(
478  symbol_table()->NumberOfElements());
479 
480  if (CommittedMemory() > 0) {
481  isolate_->counters()->external_fragmentation_total()->AddSample(
482  static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
483 
484  isolate_->counters()->heap_fraction_map_space()->AddSample(
485  static_cast<int>(
486  (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
487  isolate_->counters()->heap_fraction_cell_space()->AddSample(
488  static_cast<int>(
489  (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
490 
491  isolate_->counters()->heap_sample_total_committed()->AddSample(
492  static_cast<int>(CommittedMemory() / KB));
493  isolate_->counters()->heap_sample_total_used()->AddSample(
494  static_cast<int>(SizeOfObjects() / KB));
495  isolate_->counters()->heap_sample_map_space_committed()->AddSample(
496  static_cast<int>(map_space()->CommittedMemory() / KB));
497  isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
498  static_cast<int>(cell_space()->CommittedMemory() / KB));
499  }
500 
501 #define UPDATE_COUNTERS_FOR_SPACE(space) \
502  isolate_->counters()->space##_bytes_available()->Set( \
503  static_cast<int>(space()->Available())); \
504  isolate_->counters()->space##_bytes_committed()->Set( \
505  static_cast<int>(space()->CommittedMemory())); \
506  isolate_->counters()->space##_bytes_used()->Set( \
507  static_cast<int>(space()->SizeOfObjects()));
508 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
509  if (space()->CommittedMemory() > 0) { \
510  isolate_->counters()->external_fragmentation_##space()->AddSample( \
511  static_cast<int>(100 - \
512  (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
513  }
514 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
515  UPDATE_COUNTERS_FOR_SPACE(space) \
516  UPDATE_FRAGMENTATION_FOR_SPACE(space)
517 
525 #undef UPDATE_COUNTERS_FOR_SPACE
526 #undef UPDATE_FRAGMENTATION_FOR_SPACE
527 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
528 
529 #if defined(DEBUG)
530  ReportStatisticsAfterGC();
531 #endif // DEBUG
532 #ifdef ENABLE_DEBUGGER_SUPPORT
533  isolate_->debug()->AfterGarbageCollection();
534 #endif // ENABLE_DEBUGGER_SUPPORT
535 }
536 
537 
538 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
539  // Since we are ignoring the return value, the exact choice of space does
540  // not matter, so long as we do not specify NEW_SPACE, which would not
541  // cause a full GC.
542  mark_compact_collector_.SetFlags(flags);
543  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
544  mark_compact_collector_.SetFlags(kNoGCFlags);
545 }
546 
547 
548 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
549  // Since we are ignoring the return value, the exact choice of space does
550  // not matter, so long as we do not specify NEW_SPACE, which would not
551  // cause a full GC.
552  // Major GC would invoke weak handle callbacks on weakly reachable
553  // handles, but won't collect weakly reachable objects until next
554  // major GC. Therefore if we collect aggressively and weak handle callback
555  // has been invoked, we rerun major GC to release objects which become
556  // garbage.
557  // Note: as weak callbacks can execute arbitrary code, we cannot
558  // hope that eventually there will be no weak callbacks invocations.
559  // Therefore stop recollecting after several attempts.
562  isolate_->compilation_cache()->Clear();
563  const int kMaxNumberOfAttempts = 7;
564  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
566  break;
567  }
568  }
570  new_space_.Shrink();
572  Shrink();
574 }
575 
576 
578  GarbageCollector collector,
579  const char* gc_reason,
580  const char* collector_reason) {
581  // The VM is in the GC state until exiting this function.
582  VMState state(isolate_, GC);
583 
584 #ifdef DEBUG
585  // Reset the allocation timeout to the GC interval, but make sure to
586  // allow at least a few allocations after a collection. The reason
587  // for this is that we have a lot of allocation sequences and we
588  // assume that a garbage collection will allow the subsequent
589  // allocation attempts to go through.
590  allocation_timeout_ = Max(6, FLAG_gc_interval);
591 #endif
592 
593  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
594  if (FLAG_trace_incremental_marking) {
595  PrintF("[IncrementalMarking] Scavenge during marking.\n");
596  }
597  }
598 
599  if (collector == MARK_COMPACTOR &&
600  !mark_compact_collector()->abort_incremental_marking_ &&
601  !incremental_marking()->IsStopped() &&
602  !incremental_marking()->should_hurry() &&
603  FLAG_incremental_marking_steps) {
604  // Make progress in incremental marking.
605  const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
606  incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
608  if (!incremental_marking()->IsComplete()) {
609  if (FLAG_trace_incremental_marking) {
610  PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
611  }
612  collector = SCAVENGER;
613  collector_reason = "incremental marking delaying mark-sweep";
614  }
615  }
616 
617  bool next_gc_likely_to_collect_more = false;
618 
619  { GCTracer tracer(this, gc_reason, collector_reason);
621  // The GC count was incremented in the prologue. Tell the tracer about
622  // it.
623  tracer.set_gc_count(gc_count_);
624 
625  // Tell the tracer which collector we've selected.
626  tracer.set_collector(collector);
627 
628  HistogramTimer* rate = (collector == SCAVENGER)
629  ? isolate_->counters()->gc_scavenger()
630  : isolate_->counters()->gc_compactor();
631  rate->Start();
632  next_gc_likely_to_collect_more =
633  PerformGarbageCollection(collector, &tracer);
634  rate->Stop();
635 
636  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
637 
638  // This can do debug callbacks and restart incremental marking.
640  }
641 
642  if (incremental_marking()->IsStopped()) {
643  if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
645  }
646  }
647 
648  return next_gc_likely_to_collect_more;
649 }
650 
651 
653  GCTracer tracer(this, NULL, NULL);
654  if (incremental_marking()->IsStopped()) {
655  PerformGarbageCollection(SCAVENGER, &tracer);
656  } else {
657  PerformGarbageCollection(MARK_COMPACTOR, &tracer);
658  }
659 }
660 
661 
662 #ifdef VERIFY_HEAP
663 // Helper class for verifying the symbol table.
664 class SymbolTableVerifier : public ObjectVisitor {
665  public:
666  void VisitPointers(Object** start, Object** end) {
667  // Visit all HeapObject pointers in [start, end).
668  for (Object** p = start; p < end; p++) {
669  if ((*p)->IsHeapObject()) {
670  // Check that the symbol is actually a symbol.
671  CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
672  }
673  }
674  }
675 };
676 
677 
678 static void VerifySymbolTable() {
679  SymbolTableVerifier verifier;
680  HEAP->symbol_table()->IterateElements(&verifier);
681 }
682 #endif // VERIFY_HEAP
683 
684 
685 static bool AbortIncrementalMarkingAndCollectGarbage(
686  Heap* heap,
687  AllocationSpace space,
688  const char* gc_reason = NULL) {
689  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
690  bool result = heap->CollectGarbage(space, gc_reason);
691  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
692  return result;
693 }
694 
695 
697  int *sizes,
698  Address *locations_out) {
699  bool gc_performed = true;
700  int counter = 0;
701  static const int kThreshold = 20;
702  while (gc_performed && counter++ < kThreshold) {
703  gc_performed = false;
705  for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
706  if (sizes[space] != 0) {
707  MaybeObject* allocation;
708  if (space == NEW_SPACE) {
709  allocation = new_space()->AllocateRaw(sizes[space]);
710  } else {
711  allocation = paged_space(space)->AllocateRaw(sizes[space]);
712  }
713  FreeListNode* node;
714  if (!allocation->To<FreeListNode>(&node)) {
715  if (space == NEW_SPACE) {
717  "failed to reserve space in the new space");
718  } else {
719  AbortIncrementalMarkingAndCollectGarbage(
720  this,
721  static_cast<AllocationSpace>(space),
722  "failed to reserve space in paged space");
723  }
724  gc_performed = true;
725  break;
726  } else {
727  // Mark with a free list node, in case we have a GC before
728  // deserializing.
729  node->set_size(this, sizes[space]);
730  locations_out[space] = node->address();
731  }
732  }
733  }
734  }
735 
736  if (gc_performed) {
737  // Failed to reserve the space after several attempts.
738  V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
739  }
740 }
741 
742 
744  if (new_space_.CommitFromSpaceIfNeeded()) return;
745 
746  // Committing memory to from space failed.
747  // Try shrinking and try again.
748  Shrink();
749  if (new_space_.CommitFromSpaceIfNeeded()) return;
750 
751  // Committing memory to from space failed again.
752  // Memory is exhausted and we will die.
753  V8::FatalProcessOutOfMemory("Committing semi space failed.");
754 }
755 
756 
758  if (isolate_->bootstrapper()->IsActive()) return;
759 
760  Object* context = native_contexts_list_;
761  while (!context->IsUndefined()) {
762  // Get the caches for this context. GC can happen when the context
763  // is not fully initialized, so the caches can be undefined.
764  Object* caches_or_undefined =
766  if (!caches_or_undefined->IsUndefined()) {
767  FixedArray* caches = FixedArray::cast(caches_or_undefined);
768  // Clear the caches:
769  int length = caches->length();
770  for (int i = 0; i < length; i++) {
771  JSFunctionResultCache::cast(caches->get(i))->Clear();
772  }
773  }
774  // Get the next context:
775  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
776  }
777 }
778 
779 
780 
782  if (isolate_->bootstrapper()->IsActive() &&
783  !incremental_marking()->IsMarking()) {
784  return;
785  }
786 
787  Object* context = native_contexts_list_;
788  while (!context->IsUndefined()) {
789  // GC can happen when the context is not fully initialized,
790  // so the cache can be undefined.
791  Object* cache =
793  if (!cache->IsUndefined()) {
795  }
796  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
797  }
798 }
799 
800 
801 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
802  double survival_rate =
803  (static_cast<double>(young_survivors_after_last_gc_) * 100) /
804  start_new_space_size;
805 
806  if (survival_rate > kYoungSurvivalRateHighThreshold) {
807  high_survival_rate_period_length_++;
808  } else {
809  high_survival_rate_period_length_ = 0;
810  }
811 
812  if (survival_rate < kYoungSurvivalRateLowThreshold) {
813  low_survival_rate_period_length_++;
814  } else {
815  low_survival_rate_period_length_ = 0;
816  }
817 
818  double survival_rate_diff = survival_rate_ - survival_rate;
819 
820  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
821  set_survival_rate_trend(DECREASING);
822  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
823  set_survival_rate_trend(INCREASING);
824  } else {
825  set_survival_rate_trend(STABLE);
826  }
827 
828  survival_rate_ = survival_rate;
829 }
830 
831 bool Heap::PerformGarbageCollection(GarbageCollector collector,
832  GCTracer* tracer) {
833  bool next_gc_likely_to_collect_more = false;
834 
835  if (collector != SCAVENGER) {
836  PROFILE(isolate_, CodeMovingGCEvent());
837  }
838 
839 #ifdef VERIFY_HEAP
840  if (FLAG_verify_heap) {
841  VerifySymbolTable();
842  }
843 #endif
844 
845  if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
846  ASSERT(!allocation_allowed_);
847  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
848  global_gc_prologue_callback_();
849  }
850 
851  GCType gc_type =
853 
854  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
855  if (gc_type & gc_prologue_callbacks_[i].gc_type) {
856  gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
857  }
858  }
859 
861 
862  int start_new_space_size = Heap::new_space()->SizeAsInt();
863 
864  if (IsHighSurvivalRate()) {
865  // We speed up the incremental marker if it is running so that it
866  // does not fall behind the rate of promotion, which would cause a
867  // constantly growing old space.
869  }
870 
871  if (collector == MARK_COMPACTOR) {
872  // Perform mark-sweep with optional compaction.
873  MarkCompact(tracer);
874  sweep_generation_++;
875  bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
876  IsStableOrIncreasingSurvivalTrend();
877 
878  UpdateSurvivalRateTrend(start_new_space_size);
879 
880  size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
881 
882  if (high_survival_rate_during_scavenges &&
883  IsStableOrIncreasingSurvivalTrend()) {
884  // Stable high survival rates of young objects both during partial and
885  // full collection indicate that mutator is either building or modifying
886  // a structure with a long lifetime.
887  // In this case we aggressively raise old generation memory limits to
888  // postpone subsequent mark-sweep collection and thus trade memory
889  // space for the mutation speed.
890  old_gen_limit_factor_ = 2;
891  } else {
892  old_gen_limit_factor_ = 1;
893  }
894 
895  old_gen_promotion_limit_ =
896  OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
897  old_gen_allocation_limit_ =
898  OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
899 
900  old_gen_exhausted_ = false;
901  } else {
902  tracer_ = tracer;
903  Scavenge();
904  tracer_ = NULL;
905 
906  UpdateSurvivalRateTrend(start_new_space_size);
907  }
908 
909  if (!new_space_high_promotion_mode_active_ &&
910  new_space_.Capacity() == new_space_.MaximumCapacity() &&
911  IsStableOrIncreasingSurvivalTrend() &&
912  IsHighSurvivalRate()) {
913  // Stable high survival rates even though young generation is at
914  // maximum capacity indicates that most objects will be promoted.
915  // To decrease scavenger pauses and final mark-sweep pauses, we
916  // have to limit maximal capacity of the young generation.
917  new_space_high_promotion_mode_active_ = true;
918  if (FLAG_trace_gc) {
919  PrintPID("Limited new space size due to high promotion rate: %d MB\n",
920  new_space_.InitialCapacity() / MB);
921  }
922  } else if (new_space_high_promotion_mode_active_ &&
923  IsStableOrDecreasingSurvivalTrend() &&
924  IsLowSurvivalRate()) {
925  // Decreasing low survival rates might indicate that the above high
926  // promotion mode is over and we should allow the young generation
927  // to grow again.
928  new_space_high_promotion_mode_active_ = false;
929  if (FLAG_trace_gc) {
930  PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
931  new_space_.MaximumCapacity() / MB);
932  }
933  }
934 
935  if (new_space_high_promotion_mode_active_ &&
936  new_space_.Capacity() > new_space_.InitialCapacity()) {
937  new_space_.Shrink();
938  }
939 
940  isolate_->counters()->objs_since_last_young()->Set(0);
941 
942  gc_post_processing_depth_++;
943  { DisableAssertNoAllocation allow_allocation;
944  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
945  next_gc_likely_to_collect_more =
946  isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
947  }
948  gc_post_processing_depth_--;
949 
950  // Update relocatables.
951  Relocatable::PostGarbageCollectionProcessing();
952 
953  if (collector == MARK_COMPACTOR) {
954  // Register the amount of external allocated memory.
955  amount_of_external_allocated_memory_at_last_global_gc_ =
956  amount_of_external_allocated_memory_;
957  }
958 
959  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
960  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
961  if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
962  gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
963  }
964  }
965 
966  if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
967  ASSERT(!allocation_allowed_);
968  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
969  global_gc_epilogue_callback_();
970  }
971 
972 #ifdef VERIFY_HEAP
973  if (FLAG_verify_heap) {
974  VerifySymbolTable();
975  }
976 #endif
977 
978  return next_gc_likely_to_collect_more;
979 }
980 
981 
982 void Heap::MarkCompact(GCTracer* tracer) {
983  gc_state_ = MARK_COMPACT;
984  LOG(isolate_, ResourceEvent("markcompact", "begin"));
985 
986  mark_compact_collector_.Prepare(tracer);
987 
988  ms_count_++;
989  tracer->set_full_gc_count(ms_count_);
990 
991  MarkCompactPrologue();
992 
993  mark_compact_collector_.CollectGarbage();
994 
995  LOG(isolate_, ResourceEvent("markcompact", "end"));
996 
997  gc_state_ = NOT_IN_GC;
998 
999  isolate_->counters()->objs_since_last_full()->Set(0);
1000 
1001  contexts_disposed_ = 0;
1002 
1003  flush_monomorphic_ics_ = false;
1004 }
1005 
1006 
1007 void Heap::MarkCompactPrologue() {
1008  // At any old GC clear the keyed lookup cache to enable collection of unused
1009  // maps.
1010  isolate_->keyed_lookup_cache()->Clear();
1011  isolate_->context_slot_cache()->Clear();
1012  isolate_->descriptor_lookup_cache()->Clear();
1013  RegExpResultsCache::Clear(string_split_cache());
1014  RegExpResultsCache::Clear(regexp_multiple_cache());
1015 
1016  isolate_->compilation_cache()->MarkCompactPrologue();
1017 
1019 
1020  FlushNumberStringCache();
1021  if (FLAG_cleanup_code_caches_at_gc) {
1022  polymorphic_code_cache()->set_cache(undefined_value());
1023  }
1024 
1026 }
1027 
1028 
1030  return isolate()->inner_pointer_to_code_cache()->
1031  GcSafeFindCodeForInnerPointer(a);
1032 }
1033 
1034 
1035 // Helper class for copying HeapObjects
1036 class ScavengeVisitor: public ObjectVisitor {
1037  public:
1038  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1039 
1040  void VisitPointer(Object** p) { ScavengePointer(p); }
1041 
1042  void VisitPointers(Object** start, Object** end) {
1043  // Copy all HeapObject pointers in [start, end)
1044  for (Object** p = start; p < end; p++) ScavengePointer(p);
1045  }
1046 
1047  private:
1048  void ScavengePointer(Object** p) {
1049  Object* object = *p;
1050  if (!heap_->InNewSpace(object)) return;
1051  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1052  reinterpret_cast<HeapObject*>(object));
1053  }
1054 
1055  Heap* heap_;
1056 };
1057 
1058 
1059 #ifdef VERIFY_HEAP
1060 // Visitor class to verify pointers in code or data space do not point into
1061 // new space.
1062 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1063  public:
1064  void VisitPointers(Object** start, Object**end) {
1065  for (Object** current = start; current < end; current++) {
1066  if ((*current)->IsHeapObject()) {
1067  CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1068  }
1069  }
1070  }
1071 };
1072 
1073 
1074 static void VerifyNonPointerSpacePointers() {
1075  // Verify that there are no pointers to new space in spaces where we
1076  // do not expect them.
1077  VerifyNonPointerSpacePointersVisitor v;
1078  HeapObjectIterator code_it(HEAP->code_space());
1079  for (HeapObject* object = code_it.Next();
1080  object != NULL; object = code_it.Next())
1081  object->Iterate(&v);
1082 
1083  // The old data space was normally swept conservatively so that the iterator
1084  // doesn't work, so we normally skip the next bit.
1085  if (!HEAP->old_data_space()->was_swept_conservatively()) {
1086  HeapObjectIterator data_it(HEAP->old_data_space());
1087  for (HeapObject* object = data_it.Next();
1088  object != NULL; object = data_it.Next())
1089  object->Iterate(&v);
1090  }
1091 }
1092 #endif // VERIFY_HEAP
1093 
1094 
1096  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1097  survived_since_last_expansion_ > new_space_.Capacity() &&
1098  !new_space_high_promotion_mode_active_) {
1099  // Grow the size of new space if there is room to grow, enough data
1100  // has survived scavenge since the last expansion and we are not in
1101  // high promotion mode.
1102  new_space_.Grow();
1103  survived_since_last_expansion_ = 0;
1104  }
1105 }
1106 
1107 
1108 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1109  return heap->InNewSpace(*p) &&
1110  !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1111 }
1112 
1113 
1114 void Heap::ScavengeStoreBufferCallback(
1115  Heap* heap,
1116  MemoryChunk* page,
1117  StoreBufferEvent event) {
1118  heap->store_buffer_rebuilder_.Callback(page, event);
1119 }
1120 
1121 
1123  if (event == kStoreBufferStartScanningPagesEvent) {
1124  start_of_current_page_ = NULL;
1125  current_page_ = NULL;
1126  } else if (event == kStoreBufferScanningPageEvent) {
1127  if (current_page_ != NULL) {
1128  // If this page already overflowed the store buffer during this iteration.
1129  if (current_page_->scan_on_scavenge()) {
1130  // Then we should wipe out the entries that have been added for it.
1131  store_buffer_->SetTop(start_of_current_page_);
1132  } else if (store_buffer_->Top() - start_of_current_page_ >=
1133  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1134  // Did we find too many pointers in the previous page? The heuristic is
1135  // that no page can take more then 1/5 the remaining slots in the store
1136  // buffer.
1137  current_page_->set_scan_on_scavenge(true);
1138  store_buffer_->SetTop(start_of_current_page_);
1139  } else {
1140  // In this case the page we scanned took a reasonable number of slots in
1141  // the store buffer. It has now been rehabilitated and is no longer
1142  // marked scan_on_scavenge.
1143  ASSERT(!current_page_->scan_on_scavenge());
1144  }
1145  }
1146  start_of_current_page_ = store_buffer_->Top();
1147  current_page_ = page;
1148  } else if (event == kStoreBufferFullEvent) {
1149  // The current page overflowed the store buffer again. Wipe out its entries
1150  // in the store buffer and mark it scan-on-scavenge again. This may happen
1151  // several times while scanning.
1152  if (current_page_ == NULL) {
1153  // Store Buffer overflowed while scanning promoted objects. These are not
1154  // in any particular page, though they are likely to be clustered by the
1155  // allocation routines.
1157  } else {
1158  // Store Buffer overflowed while scanning a particular old space page for
1159  // pointers to new space.
1160  ASSERT(current_page_ == page);
1161  ASSERT(page != NULL);
1162  current_page_->set_scan_on_scavenge(true);
1163  ASSERT(start_of_current_page_ != store_buffer_->Top());
1164  store_buffer_->SetTop(start_of_current_page_);
1165  }
1166  } else {
1167  UNREACHABLE();
1168  }
1169 }
1170 
1171 
1173  // Assumes that a NewSpacePage exactly fits a number of promotion queue
1174  // entries (where each is a pair of intptr_t). This allows us to simplify
1175  // the test fpr when to switch pages.
1177  == 0);
1178  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1179  front_ = rear_ =
1180  reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1181  emergency_stack_ = NULL;
1182  guard_ = false;
1183 }
1184 
1185 
1186 void PromotionQueue::RelocateQueueHead() {
1187  ASSERT(emergency_stack_ == NULL);
1188 
1189  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1190  intptr_t* head_start = rear_;
1191  intptr_t* head_end =
1192  Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1193 
1194  int entries_count =
1195  static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1196 
1197  emergency_stack_ = new List<Entry>(2 * entries_count);
1198 
1199  while (head_start != head_end) {
1200  int size = static_cast<int>(*(head_start++));
1201  HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1202  emergency_stack_->Add(Entry(obj, size));
1203  }
1204  rear_ = head_end;
1205 }
1206 
1207 
1209  public:
1210  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1211 
1212  virtual Object* RetainAs(Object* object) {
1213  if (!heap_->InFromSpace(object)) {
1214  return object;
1215  }
1216 
1217  MapWord map_word = HeapObject::cast(object)->map_word();
1218  if (map_word.IsForwardingAddress()) {
1219  return map_word.ToForwardingAddress();
1220  }
1221  return NULL;
1222  }
1223 
1224  private:
1225  Heap* heap_;
1226 };
1227 
1228 
1229 void Heap::Scavenge() {
1230  RelocationLock relocation_lock(this);
1231 
1232 #ifdef VERIFY_HEAP
1233  if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1234 #endif
1235 
1236  gc_state_ = SCAVENGE;
1237 
1238  // Implements Cheney's copying algorithm
1239  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1240 
1241  // Clear descriptor cache.
1242  isolate_->descriptor_lookup_cache()->Clear();
1243 
1244  // Used for updating survived_since_last_expansion_ at function end.
1245  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1246 
1248 
1249  SelectScavengingVisitorsTable();
1250 
1252 
1253  AdvanceSweepers(static_cast<int>(new_space_.Size()));
1254 
1255  // Flip the semispaces. After flipping, to space is empty, from space has
1256  // live objects.
1257  new_space_.Flip();
1258  new_space_.ResetAllocationInfo();
1259 
1260  // We need to sweep newly copied objects which can be either in the
1261  // to space or promoted to the old generation. For to-space
1262  // objects, we treat the bottom of the to space as a queue. Newly
1263  // copied and unswept objects lie between a 'front' mark and the
1264  // allocation pointer.
1265  //
1266  // Promoted objects can go into various old-generation spaces, and
1267  // can be allocated internally in the spaces (from the free list).
1268  // We treat the top of the to space as a queue of addresses of
1269  // promoted objects. The addresses of newly promoted and unswept
1270  // objects lie between a 'front' mark and a 'rear' mark that is
1271  // updated as a side effect of promoting an object.
1272  //
1273  // There is guaranteed to be enough room at the top of the to space
1274  // for the addresses of promoted objects: every object promoted
1275  // frees up its size in bytes from the top of the new space, and
1276  // objects are at least one pointer in size.
1277  Address new_space_front = new_space_.ToSpaceStart();
1278  promotion_queue_.Initialize();
1279 
1280 #ifdef DEBUG
1281  store_buffer()->Clean();
1282 #endif
1283 
1284  ScavengeVisitor scavenge_visitor(this);
1285  // Copy roots.
1286  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1287 
1288  // Copy objects reachable from the old generation.
1289  {
1290  StoreBufferRebuildScope scope(this,
1291  store_buffer(),
1292  &ScavengeStoreBufferCallback);
1294  }
1295 
1296  // Copy objects reachable from cells by scavenging cell values directly.
1297  HeapObjectIterator cell_iterator(cell_space_);
1298  for (HeapObject* heap_object = cell_iterator.Next();
1299  heap_object != NULL;
1300  heap_object = cell_iterator.Next()) {
1301  if (heap_object->IsJSGlobalPropertyCell()) {
1302  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(heap_object);
1303  Address value_address = cell->ValueAddress();
1304  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1305  }
1306  }
1307 
1308  // Scavenge object reachable from the native contexts list directly.
1309  scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1310 
1311  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1313  &IsUnscavengedHeapObject);
1315  &scavenge_visitor);
1316  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1317 
1319  &UpdateNewSpaceReferenceInExternalStringTableEntry);
1320 
1321  promotion_queue_.Destroy();
1322 
1324  if (!FLAG_watch_ic_patching) {
1326  }
1328 
1329  ScavengeWeakObjectRetainer weak_object_retainer(this);
1330  ProcessWeakReferences(&weak_object_retainer);
1331 
1332  ASSERT(new_space_front == new_space_.top());
1333 
1334  // Set age mark.
1335  new_space_.set_age_mark(new_space_.top());
1336 
1337  new_space_.LowerInlineAllocationLimit(
1338  new_space_.inline_allocation_limit_step());
1339 
1340  // Update how much has survived scavenge.
1341  IncrementYoungSurvivorsCounter(static_cast<int>(
1342  (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1343 
1344  LOG(isolate_, ResourceEvent("scavenge", "end"));
1345 
1346  gc_state_ = NOT_IN_GC;
1347 
1348  scavenges_since_last_idle_round_++;
1349 }
1350 
1351 
1352 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1353  Object** p) {
1354  MapWord first_word = HeapObject::cast(*p)->map_word();
1355 
1356  if (!first_word.IsForwardingAddress()) {
1357  // Unreachable external string can be finalized.
1358  heap->FinalizeExternalString(String::cast(*p));
1359  return NULL;
1360  }
1361 
1362  // String is still reachable.
1363  return String::cast(first_word.ToForwardingAddress());
1364 }
1365 
1366 
1368  ExternalStringTableUpdaterCallback updater_func) {
1369 #ifdef VERIFY_HEAP
1370  if (FLAG_verify_heap) {
1371  external_string_table_.Verify();
1372  }
1373 #endif
1374 
1375  if (external_string_table_.new_space_strings_.is_empty()) return;
1376 
1377  Object** start = &external_string_table_.new_space_strings_[0];
1378  Object** end = start + external_string_table_.new_space_strings_.length();
1379  Object** last = start;
1380 
1381  for (Object** p = start; p < end; ++p) {
1382  ASSERT(InFromSpace(*p));
1383  String* target = updater_func(this, p);
1384 
1385  if (target == NULL) continue;
1386 
1387  ASSERT(target->IsExternalString());
1388 
1389  if (InNewSpace(target)) {
1390  // String is still in new space. Update the table entry.
1391  *last = target;
1392  ++last;
1393  } else {
1394  // String got promoted. Move it to the old string list.
1395  external_string_table_.AddOldString(target);
1396  }
1397  }
1398 
1399  ASSERT(last <= end);
1400  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1401 }
1402 
1403 
1405  ExternalStringTableUpdaterCallback updater_func) {
1406 
1407  // Update old space string references.
1408  if (external_string_table_.old_space_strings_.length() > 0) {
1409  Object** start = &external_string_table_.old_space_strings_[0];
1410  Object** end = start + external_string_table_.old_space_strings_.length();
1411  for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1412  }
1413 
1415 }
1416 
1417 
1418 static Object* ProcessFunctionWeakReferences(Heap* heap,
1419  Object* function,
1420  WeakObjectRetainer* retainer,
1421  bool record_slots) {
1422  Object* undefined = heap->undefined_value();
1423  Object* head = undefined;
1424  JSFunction* tail = NULL;
1425  Object* candidate = function;
1426  while (candidate != undefined) {
1427  // Check whether to keep the candidate in the list.
1428  JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1429  Object* retain = retainer->RetainAs(candidate);
1430  if (retain != NULL) {
1431  if (head == undefined) {
1432  // First element in the list.
1433  head = retain;
1434  } else {
1435  // Subsequent elements in the list.
1436  ASSERT(tail != NULL);
1437  tail->set_next_function_link(retain);
1438  if (record_slots) {
1439  Object** next_function =
1441  heap->mark_compact_collector()->RecordSlot(
1442  next_function, next_function, retain);
1443  }
1444  }
1445  // Retained function is new tail.
1446  candidate_function = reinterpret_cast<JSFunction*>(retain);
1447  tail = candidate_function;
1448 
1449  ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1450 
1451  if (retain == undefined) break;
1452  }
1453 
1454  // Move to next element in the list.
1455  candidate = candidate_function->next_function_link();
1456  }
1457 
1458  // Terminate the list if there is one or more elements.
1459  if (tail != NULL) {
1460  tail->set_next_function_link(undefined);
1461  }
1462 
1463  return head;
1464 }
1465 
1466 
1468  Object* undefined = undefined_value();
1469  Object* head = undefined;
1470  Context* tail = NULL;
1471  Object* candidate = native_contexts_list_;
1472 
1473  // We don't record weak slots during marking or scavenges.
1474  // Instead we do it once when we complete mark-compact cycle.
1475  // Note that write barrier has no effect if we are already in the middle of
1476  // compacting mark-sweep cycle and we have to record slots manually.
1477  bool record_slots =
1478  gc_state() == MARK_COMPACT &&
1480 
1481  while (candidate != undefined) {
1482  // Check whether to keep the candidate in the list.
1483  Context* candidate_context = reinterpret_cast<Context*>(candidate);
1484  Object* retain = retainer->RetainAs(candidate);
1485  if (retain != NULL) {
1486  if (head == undefined) {
1487  // First element in the list.
1488  head = retain;
1489  } else {
1490  // Subsequent elements in the list.
1491  ASSERT(tail != NULL);
1492  tail->set_unchecked(this,
1494  retain,
1496 
1497  if (record_slots) {
1498  Object** next_context =
1501  mark_compact_collector()->RecordSlot(
1502  next_context, next_context, retain);
1503  }
1504  }
1505  // Retained context is new tail.
1506  candidate_context = reinterpret_cast<Context*>(retain);
1507  tail = candidate_context;
1508 
1509  if (retain == undefined) break;
1510 
1511  // Process the weak list of optimized functions for the context.
1512  Object* function_list_head =
1513  ProcessFunctionWeakReferences(
1514  this,
1515  candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1516  retainer,
1517  record_slots);
1518  candidate_context->set_unchecked(this,
1520  function_list_head,
1522  if (record_slots) {
1523  Object** optimized_functions =
1526  mark_compact_collector()->RecordSlot(
1527  optimized_functions, optimized_functions, function_list_head);
1528  }
1529  }
1530 
1531  // Move to next element in the list.
1532  candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1533  }
1534 
1535  // Terminate the list if there is one or more elements.
1536  if (tail != NULL) {
1537  tail->set_unchecked(this,
1539  Heap::undefined_value(),
1541  }
1542 
1543  // Update the head of the list of contexts.
1544  native_contexts_list_ = head;
1545 }
1546 
1547 
1549  AssertNoAllocation no_allocation;
1550 
1551  class VisitorAdapter : public ObjectVisitor {
1552  public:
1553  explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1554  : visitor_(visitor) {}
1555  virtual void VisitPointers(Object** start, Object** end) {
1556  for (Object** p = start; p < end; p++) {
1557  if ((*p)->IsExternalString()) {
1558  visitor_->VisitExternalString(Utils::ToLocal(
1560  }
1561  }
1562  }
1563  private:
1564  v8::ExternalResourceVisitor* visitor_;
1565  } visitor_adapter(visitor);
1566  external_string_table_.Iterate(&visitor_adapter);
1567 }
1568 
1569 
1570 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1571  public:
1572  static inline void VisitPointer(Heap* heap, Object** p) {
1573  Object* object = *p;
1574  if (!heap->InNewSpace(object)) return;
1575  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1576  reinterpret_cast<HeapObject*>(object));
1577  }
1578 };
1579 
1580 
1581 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1582  Address new_space_front) {
1583  do {
1584  SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1585  // The addresses new_space_front and new_space_.top() define a
1586  // queue of unprocessed copied objects. Process them until the
1587  // queue is empty.
1588  while (new_space_front != new_space_.top()) {
1589  if (!NewSpacePage::IsAtEnd(new_space_front)) {
1590  HeapObject* object = HeapObject::FromAddress(new_space_front);
1591  new_space_front +=
1592  NewSpaceScavenger::IterateBody(object->map(), object);
1593  } else {
1594  new_space_front =
1595  NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1596  }
1597  }
1598 
1599  // Promote and process all the to-be-promoted objects.
1600  {
1601  StoreBufferRebuildScope scope(this,
1602  store_buffer(),
1603  &ScavengeStoreBufferCallback);
1604  while (!promotion_queue()->is_empty()) {
1605  HeapObject* target;
1606  int size;
1607  promotion_queue()->remove(&target, &size);
1608 
1609  // Promoted object might be already partially visited
1610  // during old space pointer iteration. Thus we search specificly
1611  // for pointers to from semispace instead of looking for pointers
1612  // to new space.
1613  ASSERT(!target->IsMap());
1614  IterateAndMarkPointersToFromSpace(target->address(),
1615  target->address() + size,
1616  &ScavengeObject);
1617  }
1618  }
1619 
1620  // Take another spin if there are now unswept objects in new space
1621  // (there are currently no more unswept promoted objects).
1622  } while (new_space_front != new_space_.top());
1623 
1624  return new_space_front;
1625 }
1626 
1627 
1629 
1630 
1631 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1632  HeapObject* object,
1633  int size));
1634 
1635 static HeapObject* EnsureDoubleAligned(Heap* heap,
1636  HeapObject* object,
1637  int size) {
1638  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1639  heap->CreateFillerObjectAt(object->address(), kPointerSize);
1640  return HeapObject::FromAddress(object->address() + kPointerSize);
1641  } else {
1642  heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1643  kPointerSize);
1644  return object;
1645  }
1646 }
1647 
1648 
1652 };
1653 
1654 
1656 
1657 
1658 template<MarksHandling marks_handling,
1659  LoggingAndProfiling logging_and_profiling_mode>
1661  public:
1662  static void Initialize() {
1663  table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1664  table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1665  table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1666  table_.Register(kVisitByteArray, &EvacuateByteArray);
1667  table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1668  table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1669 
1670  table_.Register(kVisitNativeContext,
1671  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1672  template VisitSpecialized<Context::kSize>);
1673 
1674  table_.Register(kVisitConsString,
1675  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1676  template VisitSpecialized<ConsString::kSize>);
1677 
1678  table_.Register(kVisitSlicedString,
1679  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1680  template VisitSpecialized<SlicedString::kSize>);
1681 
1682  table_.Register(kVisitSharedFunctionInfo,
1683  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1684  template VisitSpecialized<SharedFunctionInfo::kSize>);
1685 
1686  table_.Register(kVisitJSWeakMap,
1687  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1688  Visit);
1689 
1690  table_.Register(kVisitJSRegExp,
1691  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1692  Visit);
1693 
1694  if (marks_handling == IGNORE_MARKS) {
1695  table_.Register(kVisitJSFunction,
1696  &ObjectEvacuationStrategy<POINTER_OBJECT>::
1697  template VisitSpecialized<JSFunction::kSize>);
1698  } else {
1699  table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1700  }
1701 
1702  table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1704  kVisitDataObjectGeneric>();
1705 
1706  table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1708  kVisitJSObjectGeneric>();
1709 
1710  table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1711  kVisitStruct,
1712  kVisitStructGeneric>();
1713  }
1714 
1716  return &table_;
1717  }
1718 
1719  private:
1720  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1721  enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1722 
1723  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1724  bool should_record = false;
1725 #ifdef DEBUG
1726  should_record = FLAG_heap_stats;
1727 #endif
1728  should_record = should_record || FLAG_log_gc;
1729  if (should_record) {
1730  if (heap->new_space()->Contains(obj)) {
1731  heap->new_space()->RecordAllocation(obj);
1732  } else {
1733  heap->new_space()->RecordPromotion(obj);
1734  }
1735  }
1736  }
1737 
1738  // Helper function used by CopyObject to copy a source object to an
1739  // allocated target object and update the forwarding pointer in the source
1740  // object. Returns the target object.
1741  INLINE(static void MigrateObject(Heap* heap,
1742  HeapObject* source,
1743  HeapObject* target,
1744  int size)) {
1745  // Copy the content of source to target.
1746  heap->CopyBlock(target->address(), source->address(), size);
1747 
1748  // Set the forwarding address.
1749  source->set_map_word(MapWord::FromForwardingAddress(target));
1750 
1751  if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1752  // Update NewSpace stats if necessary.
1753  RecordCopiedObject(heap, target);
1754  HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1755  Isolate* isolate = heap->isolate();
1756  if (isolate->logger()->is_logging_code_events() ||
1757  CpuProfiler::is_profiling(isolate)) {
1758  if (target->IsSharedFunctionInfo()) {
1759  PROFILE(isolate, SharedFunctionInfoMoveEvent(
1760  source->address(), target->address()));
1761  }
1762  }
1763  }
1764 
1765  if (marks_handling == TRANSFER_MARKS) {
1766  if (Marking::TransferColor(source, target)) {
1767  MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1768  }
1769  }
1770  }
1771 
1772 
1773  template<ObjectContents object_contents,
1774  SizeRestriction size_restriction,
1775  int alignment>
1776  static inline void EvacuateObject(Map* map,
1777  HeapObject** slot,
1778  HeapObject* object,
1779  int object_size) {
1780  SLOW_ASSERT((size_restriction != SMALL) ||
1781  (object_size <= Page::kMaxNonCodeHeapObjectSize));
1782  SLOW_ASSERT(object->Size() == object_size);
1783 
1784  int allocation_size = object_size;
1785  if (alignment != kObjectAlignment) {
1786  ASSERT(alignment == kDoubleAlignment);
1787  allocation_size += kPointerSize;
1788  }
1789 
1790  Heap* heap = map->GetHeap();
1791  if (heap->ShouldBePromoted(object->address(), object_size)) {
1792  MaybeObject* maybe_result;
1793 
1794  if ((size_restriction != SMALL) &&
1795  (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
1796  maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
1797  NOT_EXECUTABLE);
1798  } else {
1799  if (object_contents == DATA_OBJECT) {
1800  maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
1801  } else {
1802  maybe_result =
1803  heap->old_pointer_space()->AllocateRaw(allocation_size);
1804  }
1805  }
1806 
1807  Object* result = NULL; // Initialization to please compiler.
1808  if (maybe_result->ToObject(&result)) {
1809  HeapObject* target = HeapObject::cast(result);
1810 
1811  if (alignment != kObjectAlignment) {
1812  target = EnsureDoubleAligned(heap, target, allocation_size);
1813  }
1814 
1815  // Order is important: slot might be inside of the target if target
1816  // was allocated over a dead object and slot comes from the store
1817  // buffer.
1818  *slot = target;
1819  MigrateObject(heap, object, target, object_size);
1820 
1821  if (object_contents == POINTER_OBJECT) {
1822  if (map->instance_type() == JS_FUNCTION_TYPE) {
1823  heap->promotion_queue()->insert(
1825  } else {
1826  heap->promotion_queue()->insert(target, object_size);
1827  }
1828  }
1829 
1830  heap->tracer()->increment_promoted_objects_size(object_size);
1831  return;
1832  }
1833  }
1834  MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
1835  heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1836  Object* result = allocation->ToObjectUnchecked();
1837  HeapObject* target = HeapObject::cast(result);
1838 
1839  if (alignment != kObjectAlignment) {
1840  target = EnsureDoubleAligned(heap, target, allocation_size);
1841  }
1842 
1843  // Order is important: slot might be inside of the target if target
1844  // was allocated over a dead object and slot comes from the store
1845  // buffer.
1846  *slot = target;
1847  MigrateObject(heap, object, target, object_size);
1848  return;
1849  }
1850 
1851 
1852  static inline void EvacuateJSFunction(Map* map,
1853  HeapObject** slot,
1854  HeapObject* object) {
1855  ObjectEvacuationStrategy<POINTER_OBJECT>::
1856  template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1857 
1858  HeapObject* target = *slot;
1859  MarkBit mark_bit = Marking::MarkBitFrom(target);
1860  if (Marking::IsBlack(mark_bit)) {
1861  // This object is black and it might not be rescanned by marker.
1862  // We should explicitly record code entry slot for compaction because
1863  // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1864  // miss it as it is not HeapObject-tagged.
1865  Address code_entry_slot =
1866  target->address() + JSFunction::kCodeEntryOffset;
1867  Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1868  map->GetHeap()->mark_compact_collector()->
1869  RecordCodeEntrySlot(code_entry_slot, code);
1870  }
1871  }
1872 
1873 
1874  static inline void EvacuateFixedArray(Map* map,
1875  HeapObject** slot,
1876  HeapObject* object) {
1877  int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1878  EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
1879  slot,
1880  object,
1881  object_size);
1882  }
1883 
1884 
1885  static inline void EvacuateFixedDoubleArray(Map* map,
1886  HeapObject** slot,
1887  HeapObject* object) {
1888  int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1889  int object_size = FixedDoubleArray::SizeFor(length);
1890  EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
1891  map,
1892  slot,
1893  object,
1894  object_size);
1895  }
1896 
1897 
1898  static inline void EvacuateByteArray(Map* map,
1899  HeapObject** slot,
1900  HeapObject* object) {
1901  int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1902  EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1903  map, slot, object, object_size);
1904  }
1905 
1906 
1907  static inline void EvacuateSeqAsciiString(Map* map,
1908  HeapObject** slot,
1909  HeapObject* object) {
1910  int object_size = SeqAsciiString::cast(object)->
1911  SeqAsciiStringSize(map->instance_type());
1912  EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1913  map, slot, object, object_size);
1914  }
1915 
1916 
1917  static inline void EvacuateSeqTwoByteString(Map* map,
1918  HeapObject** slot,
1919  HeapObject* object) {
1920  int object_size = SeqTwoByteString::cast(object)->
1921  SeqTwoByteStringSize(map->instance_type());
1922  EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
1923  map, slot, object, object_size);
1924  }
1925 
1926 
1927  static inline bool IsShortcutCandidate(int type) {
1928  return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1929  }
1930 
1931  static inline void EvacuateShortcutCandidate(Map* map,
1932  HeapObject** slot,
1933  HeapObject* object) {
1934  ASSERT(IsShortcutCandidate(map->instance_type()));
1935 
1936  Heap* heap = map->GetHeap();
1937 
1938  if (marks_handling == IGNORE_MARKS &&
1939  ConsString::cast(object)->unchecked_second() ==
1940  heap->empty_string()) {
1941  HeapObject* first =
1942  HeapObject::cast(ConsString::cast(object)->unchecked_first());
1943 
1944  *slot = first;
1945 
1946  if (!heap->InNewSpace(first)) {
1947  object->set_map_word(MapWord::FromForwardingAddress(first));
1948  return;
1949  }
1950 
1951  MapWord first_word = first->map_word();
1952  if (first_word.IsForwardingAddress()) {
1953  HeapObject* target = first_word.ToForwardingAddress();
1954 
1955  *slot = target;
1956  object->set_map_word(MapWord::FromForwardingAddress(target));
1957  return;
1958  }
1959 
1960  heap->DoScavengeObject(first->map(), slot, first);
1961  object->set_map_word(MapWord::FromForwardingAddress(*slot));
1962  return;
1963  }
1964 
1965  int object_size = ConsString::kSize;
1966  EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
1967  map, slot, object, object_size);
1968  }
1969 
1970  template<ObjectContents object_contents>
1971  class ObjectEvacuationStrategy {
1972  public:
1973  template<int object_size>
1974  static inline void VisitSpecialized(Map* map,
1975  HeapObject** slot,
1976  HeapObject* object) {
1977  EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1978  map, slot, object, object_size);
1979  }
1980 
1981  static inline void Visit(Map* map,
1982  HeapObject** slot,
1983  HeapObject* object) {
1984  int object_size = map->instance_size();
1985  EvacuateObject<object_contents, SMALL, kObjectAlignment>(
1986  map, slot, object, object_size);
1987  }
1988  };
1989 
1990  static VisitorDispatchTable<ScavengingCallback> table_;
1991 };
1992 
1993 
1994 template<MarksHandling marks_handling,
1995  LoggingAndProfiling logging_and_profiling_mode>
1996 VisitorDispatchTable<ScavengingCallback>
1997  ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1998 
1999 
2000 static void InitializeScavengingVisitorsTables() {
2001  ScavengingVisitor<TRANSFER_MARKS,
2002  LOGGING_AND_PROFILING_DISABLED>::Initialize();
2004  ScavengingVisitor<TRANSFER_MARKS,
2005  LOGGING_AND_PROFILING_ENABLED>::Initialize();
2007 }
2008 
2009 
2010 void Heap::SelectScavengingVisitorsTable() {
2011  bool logging_and_profiling =
2012  isolate()->logger()->is_logging() ||
2013  CpuProfiler::is_profiling(isolate()) ||
2014  (isolate()->heap_profiler() != NULL &&
2015  isolate()->heap_profiler()->is_profiling());
2016 
2017  if (!incremental_marking()->IsMarking()) {
2018  if (!logging_and_profiling) {
2019  scavenging_visitors_table_.CopyFrom(
2020  ScavengingVisitor<IGNORE_MARKS,
2021  LOGGING_AND_PROFILING_DISABLED>::GetTable());
2022  } else {
2023  scavenging_visitors_table_.CopyFrom(
2024  ScavengingVisitor<IGNORE_MARKS,
2025  LOGGING_AND_PROFILING_ENABLED>::GetTable());
2026  }
2027  } else {
2028  if (!logging_and_profiling) {
2029  scavenging_visitors_table_.CopyFrom(
2030  ScavengingVisitor<TRANSFER_MARKS,
2031  LOGGING_AND_PROFILING_DISABLED>::GetTable());
2032  } else {
2033  scavenging_visitors_table_.CopyFrom(
2034  ScavengingVisitor<TRANSFER_MARKS,
2035  LOGGING_AND_PROFILING_ENABLED>::GetTable());
2036  }
2037 
2038  if (incremental_marking()->IsCompacting()) {
2039  // When compacting forbid short-circuiting of cons-strings.
2040  // Scavenging code relies on the fact that new space object
2041  // can't be evacuated into evacuation candidate but
2042  // short-circuiting violates this assumption.
2043  scavenging_visitors_table_.Register(
2044  StaticVisitorBase::kVisitShortcutCandidate,
2045  scavenging_visitors_table_.GetVisitorById(
2046  StaticVisitorBase::kVisitConsString));
2047  }
2048  }
2049 }
2050 
2051 
2052 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2053  SLOW_ASSERT(HEAP->InFromSpace(object));
2054  MapWord first_word = object->map_word();
2055  SLOW_ASSERT(!first_word.IsForwardingAddress());
2056  Map* map = first_word.ToMap();
2057  map->GetHeap()->DoScavengeObject(map, p, object);
2058 }
2059 
2060 
2061 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2062  int instance_size) {
2063  Object* result;
2064  MaybeObject* maybe_result = AllocateRawMap();
2065  if (!maybe_result->ToObject(&result)) return maybe_result;
2066 
2067  // Map::cast cannot be used due to uninitialized map field.
2068  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2069  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2070  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2071  reinterpret_cast<Map*>(result)->set_visitor_id(
2072  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2073  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2074  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2075  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2076  reinterpret_cast<Map*>(result)->set_bit_field(0);
2077  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2080  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2081  return result;
2082 }
2083 
2084 
2085 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2086  int instance_size,
2087  ElementsKind elements_kind) {
2088  Object* result;
2089  MaybeObject* maybe_result = AllocateRawMap();
2090  if (!maybe_result->To(&result)) return maybe_result;
2091 
2092  Map* map = reinterpret_cast<Map*>(result);
2093  map->set_map_no_write_barrier(meta_map());
2094  map->set_instance_type(instance_type);
2095  map->set_visitor_id(
2096  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2097  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2098  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2099  map->set_instance_size(instance_size);
2100  map->set_inobject_properties(0);
2102  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2103  map->init_back_pointer(undefined_value());
2105  map->set_instance_descriptors(empty_descriptor_array());
2106  map->set_bit_field(0);
2110  map->set_bit_field3(bit_field3);
2111  map->set_elements_kind(elements_kind);
2112 
2113  return map;
2114 }
2115 
2116 
2117 MaybeObject* Heap::AllocateCodeCache() {
2118  CodeCache* code_cache;
2119  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2120  if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2121  }
2122  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2123  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2124  return code_cache;
2125 }
2126 
2127 
2130 }
2131 
2132 
2134  AccessorPair* accessors;
2135  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2136  if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2137  }
2138  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2139  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2140  return accessors;
2141 }
2142 
2143 
2145  TypeFeedbackInfo* info;
2146  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2147  if (!maybe_info->To(&info)) return maybe_info;
2148  }
2149  info->initialize_storage();
2150  info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2152  return info;
2153 }
2154 
2155 
2156 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2157  AliasedArgumentsEntry* entry;
2158  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2159  if (!maybe_entry->To(&entry)) return maybe_entry;
2160  }
2161  entry->set_aliased_context_slot(aliased_context_slot);
2162  return entry;
2163 }
2164 
2165 
2166 const Heap::StringTypeTable Heap::string_type_table[] = {
2167 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2168  {type, size, k##camel_name##MapRootIndex},
2170 #undef STRING_TYPE_ELEMENT
2171 };
2172 
2173 
2174 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
2175 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
2176  {contents, k##name##RootIndex},
2178 #undef CONSTANT_SYMBOL_ELEMENT
2179 };
2180 
2181 
2182 const Heap::StructTable Heap::struct_table[] = {
2183 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2184  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2186 #undef STRUCT_TABLE_ELEMENT
2187 };
2188 
2189 
2190 bool Heap::CreateInitialMaps() {
2191  Object* obj;
2192  { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2193  if (!maybe_obj->ToObject(&obj)) return false;
2194  }
2195  // Map::cast cannot be used due to uninitialized map field.
2196  Map* new_meta_map = reinterpret_cast<Map*>(obj);
2197  set_meta_map(new_meta_map);
2198  new_meta_map->set_map(new_meta_map);
2199 
2200  { MaybeObject* maybe_obj =
2202  if (!maybe_obj->ToObject(&obj)) return false;
2203  }
2204  set_fixed_array_map(Map::cast(obj));
2205 
2206  { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2207  if (!maybe_obj->ToObject(&obj)) return false;
2208  }
2209  set_oddball_map(Map::cast(obj));
2210 
2211  // Allocate the empty array.
2212  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2213  if (!maybe_obj->ToObject(&obj)) return false;
2214  }
2215  set_empty_fixed_array(FixedArray::cast(obj));
2216 
2217  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2218  if (!maybe_obj->ToObject(&obj)) return false;
2219  }
2220  set_null_value(Oddball::cast(obj));
2222 
2223  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2224  if (!maybe_obj->ToObject(&obj)) return false;
2225  }
2226  set_undefined_value(Oddball::cast(obj));
2228  ASSERT(!InNewSpace(undefined_value()));
2229 
2230  // Allocate the empty descriptor array.
2231  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2232  if (!maybe_obj->ToObject(&obj)) return false;
2233  }
2234  set_empty_descriptor_array(DescriptorArray::cast(obj));
2235 
2236  // Fix the instance_descriptors for the existing maps.
2237  meta_map()->set_code_cache(empty_fixed_array());
2238  meta_map()->init_back_pointer(undefined_value());
2239  meta_map()->set_instance_descriptors(empty_descriptor_array());
2240 
2241  fixed_array_map()->set_code_cache(empty_fixed_array());
2242  fixed_array_map()->init_back_pointer(undefined_value());
2243  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2244 
2245  oddball_map()->set_code_cache(empty_fixed_array());
2246  oddball_map()->init_back_pointer(undefined_value());
2247  oddball_map()->set_instance_descriptors(empty_descriptor_array());
2248 
2249  // Fix prototype object for existing maps.
2250  meta_map()->set_prototype(null_value());
2251  meta_map()->set_constructor(null_value());
2252 
2253  fixed_array_map()->set_prototype(null_value());
2254  fixed_array_map()->set_constructor(null_value());
2255 
2256  oddball_map()->set_prototype(null_value());
2257  oddball_map()->set_constructor(null_value());
2258 
2259  { MaybeObject* maybe_obj =
2261  if (!maybe_obj->ToObject(&obj)) return false;
2262  }
2263  set_fixed_cow_array_map(Map::cast(obj));
2264  ASSERT(fixed_array_map() != fixed_cow_array_map());
2265 
2266  { MaybeObject* maybe_obj =
2268  if (!maybe_obj->ToObject(&obj)) return false;
2269  }
2270  set_scope_info_map(Map::cast(obj));
2271 
2272  { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2273  if (!maybe_obj->ToObject(&obj)) return false;
2274  }
2275  set_heap_number_map(Map::cast(obj));
2276 
2277  { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2278  if (!maybe_obj->ToObject(&obj)) return false;
2279  }
2280  set_foreign_map(Map::cast(obj));
2281 
2282  for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2283  const StringTypeTable& entry = string_type_table[i];
2284  { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2285  if (!maybe_obj->ToObject(&obj)) return false;
2286  }
2287  roots_[entry.index] = Map::cast(obj);
2288  }
2289 
2290  { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2291  if (!maybe_obj->ToObject(&obj)) return false;
2292  }
2293  set_undetectable_string_map(Map::cast(obj));
2295 
2296  { MaybeObject* maybe_obj =
2298  if (!maybe_obj->ToObject(&obj)) return false;
2299  }
2300  set_undetectable_ascii_string_map(Map::cast(obj));
2302 
2303  { MaybeObject* maybe_obj =
2305  if (!maybe_obj->ToObject(&obj)) return false;
2306  }
2307  set_fixed_double_array_map(Map::cast(obj));
2308 
2309  { MaybeObject* maybe_obj =
2311  if (!maybe_obj->ToObject(&obj)) return false;
2312  }
2313  set_byte_array_map(Map::cast(obj));
2314 
2315  { MaybeObject* maybe_obj =
2317  if (!maybe_obj->ToObject(&obj)) return false;
2318  }
2319  set_free_space_map(Map::cast(obj));
2320 
2321  { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2322  if (!maybe_obj->ToObject(&obj)) return false;
2323  }
2324  set_empty_byte_array(ByteArray::cast(obj));
2325 
2326  { MaybeObject* maybe_obj =
2328  if (!maybe_obj->ToObject(&obj)) return false;
2329  }
2330  set_external_pixel_array_map(Map::cast(obj));
2331 
2332  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2334  if (!maybe_obj->ToObject(&obj)) return false;
2335  }
2336  set_external_byte_array_map(Map::cast(obj));
2337 
2338  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2340  if (!maybe_obj->ToObject(&obj)) return false;
2341  }
2342  set_external_unsigned_byte_array_map(Map::cast(obj));
2343 
2344  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2346  if (!maybe_obj->ToObject(&obj)) return false;
2347  }
2348  set_external_short_array_map(Map::cast(obj));
2349 
2350  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2352  if (!maybe_obj->ToObject(&obj)) return false;
2353  }
2354  set_external_unsigned_short_array_map(Map::cast(obj));
2355 
2356  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2358  if (!maybe_obj->ToObject(&obj)) return false;
2359  }
2360  set_external_int_array_map(Map::cast(obj));
2361 
2362  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2364  if (!maybe_obj->ToObject(&obj)) return false;
2365  }
2366  set_external_unsigned_int_array_map(Map::cast(obj));
2367 
2368  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2370  if (!maybe_obj->ToObject(&obj)) return false;
2371  }
2372  set_external_float_array_map(Map::cast(obj));
2373 
2374  { MaybeObject* maybe_obj =
2376  if (!maybe_obj->ToObject(&obj)) return false;
2377  }
2378  set_non_strict_arguments_elements_map(Map::cast(obj));
2379 
2380  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2382  if (!maybe_obj->ToObject(&obj)) return false;
2383  }
2384  set_external_double_array_map(Map::cast(obj));
2385 
2386  { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2387  if (!maybe_obj->ToObject(&obj)) return false;
2388  }
2389  set_code_map(Map::cast(obj));
2390 
2391  { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
2393  if (!maybe_obj->ToObject(&obj)) return false;
2394  }
2395  set_global_property_cell_map(Map::cast(obj));
2396 
2397  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2398  if (!maybe_obj->ToObject(&obj)) return false;
2399  }
2400  set_one_pointer_filler_map(Map::cast(obj));
2401 
2402  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2403  if (!maybe_obj->ToObject(&obj)) return false;
2404  }
2405  set_two_pointer_filler_map(Map::cast(obj));
2406 
2407  for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2408  const StructTable& entry = struct_table[i];
2409  { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2410  if (!maybe_obj->ToObject(&obj)) return false;
2411  }
2412  roots_[entry.index] = Map::cast(obj);
2413  }
2414 
2415  { MaybeObject* maybe_obj =
2417  if (!maybe_obj->ToObject(&obj)) return false;
2418  }
2419  set_hash_table_map(Map::cast(obj));
2420 
2421  { MaybeObject* maybe_obj =
2423  if (!maybe_obj->ToObject(&obj)) return false;
2424  }
2425  set_function_context_map(Map::cast(obj));
2426 
2427  { MaybeObject* maybe_obj =
2429  if (!maybe_obj->ToObject(&obj)) return false;
2430  }
2431  set_catch_context_map(Map::cast(obj));
2432 
2433  { MaybeObject* maybe_obj =
2435  if (!maybe_obj->ToObject(&obj)) return false;
2436  }
2437  set_with_context_map(Map::cast(obj));
2438 
2439  { MaybeObject* maybe_obj =
2441  if (!maybe_obj->ToObject(&obj)) return false;
2442  }
2443  set_block_context_map(Map::cast(obj));
2444 
2445  { MaybeObject* maybe_obj =
2447  if (!maybe_obj->ToObject(&obj)) return false;
2448  }
2449  set_module_context_map(Map::cast(obj));
2450 
2451  { MaybeObject* maybe_obj =
2453  if (!maybe_obj->ToObject(&obj)) return false;
2454  }
2455  set_global_context_map(Map::cast(obj));
2456 
2457  { MaybeObject* maybe_obj =
2459  if (!maybe_obj->ToObject(&obj)) return false;
2460  }
2461  Map* native_context_map = Map::cast(obj);
2462  native_context_map->set_dictionary_map(true);
2463  native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2464  set_native_context_map(native_context_map);
2465 
2466  { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2468  if (!maybe_obj->ToObject(&obj)) return false;
2469  }
2470  set_shared_function_info_map(Map::cast(obj));
2471 
2472  { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2474  if (!maybe_obj->ToObject(&obj)) return false;
2475  }
2476  set_message_object_map(Map::cast(obj));
2477 
2478  ASSERT(!InNewSpace(empty_fixed_array()));
2479  return true;
2480 }
2481 
2482 
2483 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2484  // Statically ensure that it is safe to allocate heap numbers in paged
2485  // spaces.
2487  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2488 
2489  Object* result;
2490  { MaybeObject* maybe_result =
2492  if (!maybe_result->ToObject(&result)) return maybe_result;
2493  }
2494 
2495  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2496  HeapNumber::cast(result)->set_value(value);
2497  return result;
2498 }
2499 
2500 
2501 MaybeObject* Heap::AllocateHeapNumber(double value) {
2502  // Use general version, if we're forced to always allocate.
2503  if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2504 
2505  // This version of AllocateHeapNumber is optimized for
2506  // allocation in new space.
2508  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2509  Object* result;
2510  { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2511  if (!maybe_result->ToObject(&result)) return maybe_result;
2512  }
2513  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2514  HeapNumber::cast(result)->set_value(value);
2515  return result;
2516 }
2517 
2518 
2520  Object* result;
2521  { MaybeObject* maybe_result = AllocateRawCell();
2522  if (!maybe_result->ToObject(&result)) return maybe_result;
2523  }
2525  global_property_cell_map());
2526  JSGlobalPropertyCell::cast(result)->set_value(value);
2527  return result;
2528 }
2529 
2530 
2531 MaybeObject* Heap::CreateOddball(const char* to_string,
2532  Object* to_number,
2533  byte kind) {
2534  Object* result;
2535  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2536  if (!maybe_result->ToObject(&result)) return maybe_result;
2537  }
2538  return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2539 }
2540 
2541 
2543  Object* obj;
2544 
2545  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2546  if (!maybe_obj->ToObject(&obj)) return false;
2547  }
2548  // Don't use Smi-only elements optimizations for objects with the neander
2549  // map. There are too many cases where element values are set directly with a
2550  // bottleneck to trap the Smi-only -> fast elements transition, and there
2551  // appears to be no benefit for optimize this case.
2552  Map* new_neander_map = Map::cast(obj);
2554  set_neander_map(new_neander_map);
2555 
2556  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2557  if (!maybe_obj->ToObject(&obj)) return false;
2558  }
2559  Object* elements;
2560  { MaybeObject* maybe_elements = AllocateFixedArray(2);
2561  if (!maybe_elements->ToObject(&elements)) return false;
2562  }
2563  FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2564  JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2565  set_message_listeners(JSObject::cast(obj));
2566 
2567  return true;
2568 }
2569 
2570 
2571 void Heap::CreateJSEntryStub() {
2572  JSEntryStub stub;
2573  set_js_entry_code(*stub.GetCode());
2574 }
2575 
2576 
2577 void Heap::CreateJSConstructEntryStub() {
2578  JSConstructEntryStub stub;
2579  set_js_construct_entry_code(*stub.GetCode());
2580 }
2581 
2582 
2583 void Heap::CreateFixedStubs() {
2584  // Here we create roots for fixed stubs. They are needed at GC
2585  // for cooking and uncooking (check out frames.cc).
2586  // The eliminates the need for doing dictionary lookup in the
2587  // stub cache for these stubs.
2588  HandleScope scope;
2589  // gcc-4.4 has problem generating correct code of following snippet:
2590  // { JSEntryStub stub;
2591  // js_entry_code_ = *stub.GetCode();
2592  // }
2593  // { JSConstructEntryStub stub;
2594  // js_construct_entry_code_ = *stub.GetCode();
2595  // }
2596  // To workaround the problem, make separate functions without inlining.
2597  Heap::CreateJSEntryStub();
2598  Heap::CreateJSConstructEntryStub();
2599 
2600  // Create stubs that should be there, so we don't unexpectedly have to
2601  // create them if we need them during the creation of another stub.
2602  // Stub creation mixes raw pointers and handles in an unsafe manner so
2603  // we cannot create stubs while we are creating stubs.
2604  CodeStub::GenerateStubsAheadOfTime();
2605 }
2606 
2607 
2608 bool Heap::CreateInitialObjects() {
2609  Object* obj;
2610 
2611  // The -0 value must be set before NumberFromDouble works.
2612  { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2613  if (!maybe_obj->ToObject(&obj)) return false;
2614  }
2615  set_minus_zero_value(HeapNumber::cast(obj));
2616  ASSERT(signbit(minus_zero_value()->Number()) != 0);
2617 
2618  { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2619  if (!maybe_obj->ToObject(&obj)) return false;
2620  }
2621  set_nan_value(HeapNumber::cast(obj));
2622 
2623  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2624  if (!maybe_obj->ToObject(&obj)) return false;
2625  }
2626  set_infinity_value(HeapNumber::cast(obj));
2627 
2628  // The hole has not been created yet, but we want to put something
2629  // predictable in the gaps in the symbol table, so lets make that Smi zero.
2630  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2631 
2632  // Allocate initial symbol table.
2633  { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2634  if (!maybe_obj->ToObject(&obj)) return false;
2635  }
2636  // Don't use set_symbol_table() due to asserts.
2637  roots_[kSymbolTableRootIndex] = obj;
2638 
2639  // Finish initializing oddballs after creating symboltable.
2640  { MaybeObject* maybe_obj =
2641  undefined_value()->Initialize("undefined",
2642  nan_value(),
2644  if (!maybe_obj->ToObject(&obj)) return false;
2645  }
2646 
2647  // Initialize the null_value.
2648  { MaybeObject* maybe_obj =
2649  null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
2650  if (!maybe_obj->ToObject(&obj)) return false;
2651  }
2652 
2653  { MaybeObject* maybe_obj = CreateOddball("true",
2654  Smi::FromInt(1),
2655  Oddball::kTrue);
2656  if (!maybe_obj->ToObject(&obj)) return false;
2657  }
2658  set_true_value(Oddball::cast(obj));
2659 
2660  { MaybeObject* maybe_obj = CreateOddball("false",
2661  Smi::FromInt(0),
2662  Oddball::kFalse);
2663  if (!maybe_obj->ToObject(&obj)) return false;
2664  }
2665  set_false_value(Oddball::cast(obj));
2666 
2667  { MaybeObject* maybe_obj = CreateOddball("hole",
2668  Smi::FromInt(-1),
2670  if (!maybe_obj->ToObject(&obj)) return false;
2671  }
2672  set_the_hole_value(Oddball::cast(obj));
2673 
2674  { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2675  Smi::FromInt(-4),
2677  if (!maybe_obj->ToObject(&obj)) return false;
2678  }
2679  set_arguments_marker(Oddball::cast(obj));
2680 
2681  { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2682  Smi::FromInt(-2),
2683  Oddball::kOther);
2684  if (!maybe_obj->ToObject(&obj)) return false;
2685  }
2686  set_no_interceptor_result_sentinel(obj);
2687 
2688  { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2689  Smi::FromInt(-3),
2690  Oddball::kOther);
2691  if (!maybe_obj->ToObject(&obj)) return false;
2692  }
2693  set_termination_exception(obj);
2694 
2695  // Allocate the empty string.
2696  { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2697  if (!maybe_obj->ToObject(&obj)) return false;
2698  }
2699  set_empty_string(String::cast(obj));
2700 
2701  for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2702  { MaybeObject* maybe_obj =
2703  LookupAsciiSymbol(constant_symbol_table[i].contents);
2704  if (!maybe_obj->ToObject(&obj)) return false;
2705  }
2706  roots_[constant_symbol_table[i].index] = String::cast(obj);
2707  }
2708 
2709  // Allocate the hidden symbol which is used to identify the hidden properties
2710  // in JSObjects. The hash code has a special value so that it will not match
2711  // the empty string when searching for the property. It cannot be part of the
2712  // loop above because it needs to be allocated manually with the special
2713  // hash code in place. The hash code for the hidden_symbol is zero to ensure
2714  // that it will always be at the first entry in property descriptors.
2715  { MaybeObject* maybe_obj =
2717  if (!maybe_obj->ToObject(&obj)) return false;
2718  }
2719  hidden_symbol_ = String::cast(obj);
2720 
2721  // Allocate the foreign for __proto__.
2722  { MaybeObject* maybe_obj =
2723  AllocateForeign((Address) &Accessors::ObjectPrototype);
2724  if (!maybe_obj->ToObject(&obj)) return false;
2725  }
2726  set_prototype_accessors(Foreign::cast(obj));
2727 
2728  // Allocate the code_stubs dictionary. The initial size is set to avoid
2729  // expanding the dictionary during bootstrapping.
2730  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2731  if (!maybe_obj->ToObject(&obj)) return false;
2732  }
2733  set_code_stubs(UnseededNumberDictionary::cast(obj));
2734 
2735 
2736  // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2737  // is set to avoid expanding the dictionary during bootstrapping.
2738  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2739  if (!maybe_obj->ToObject(&obj)) return false;
2740  }
2741  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2742 
2743  { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2744  if (!maybe_obj->ToObject(&obj)) return false;
2745  }
2746  set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2747 
2748  set_instanceof_cache_function(Smi::FromInt(0));
2749  set_instanceof_cache_map(Smi::FromInt(0));
2750  set_instanceof_cache_answer(Smi::FromInt(0));
2751 
2752  CreateFixedStubs();
2753 
2754  // Allocate the dictionary of intrinsic function names.
2755  { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2756  if (!maybe_obj->ToObject(&obj)) return false;
2757  }
2758  { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2759  obj);
2760  if (!maybe_obj->ToObject(&obj)) return false;
2761  }
2762  set_intrinsic_function_names(StringDictionary::cast(obj));
2763 
2764  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
2765  if (!maybe_obj->ToObject(&obj)) return false;
2766  }
2767  set_number_string_cache(FixedArray::cast(obj));
2768 
2769  // Allocate cache for single character ASCII strings.
2770  { MaybeObject* maybe_obj =
2772  if (!maybe_obj->ToObject(&obj)) return false;
2773  }
2774  set_single_character_string_cache(FixedArray::cast(obj));
2775 
2776  // Allocate cache for string split.
2777  { MaybeObject* maybe_obj = AllocateFixedArray(
2779  if (!maybe_obj->ToObject(&obj)) return false;
2780  }
2781  set_string_split_cache(FixedArray::cast(obj));
2782 
2783  { MaybeObject* maybe_obj = AllocateFixedArray(
2785  if (!maybe_obj->ToObject(&obj)) return false;
2786  }
2787  set_regexp_multiple_cache(FixedArray::cast(obj));
2788 
2789  // Allocate cache for external strings pointing to native source code.
2790  { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2791  if (!maybe_obj->ToObject(&obj)) return false;
2792  }
2793  set_natives_source_cache(FixedArray::cast(obj));
2794 
2795  // Handling of script id generation is in FACTORY->NewScript.
2796  set_last_script_id(undefined_value());
2797 
2798  // Initialize keyed lookup cache.
2799  isolate_->keyed_lookup_cache()->Clear();
2800 
2801  // Initialize context slot cache.
2802  isolate_->context_slot_cache()->Clear();
2803 
2804  // Initialize descriptor cache.
2805  isolate_->descriptor_lookup_cache()->Clear();
2806 
2807  // Initialize compilation cache.
2808  isolate_->compilation_cache()->Clear();
2809 
2810  return true;
2811 }
2812 
2813 
2815  String* key_string,
2816  Object* key_pattern,
2817  ResultsCacheType type) {
2818  FixedArray* cache;
2819  if (!key_string->IsSymbol()) return Smi::FromInt(0);
2820  if (type == STRING_SPLIT_SUBSTRINGS) {
2821  ASSERT(key_pattern->IsString());
2822  if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
2823  cache = heap->string_split_cache();
2824  } else {
2826  ASSERT(key_pattern->IsFixedArray());
2827  cache = heap->regexp_multiple_cache();
2828  }
2829 
2830  uint32_t hash = key_string->Hash();
2831  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2832  ~(kArrayEntriesPerCacheEntry - 1));
2833  if (cache->get(index + kStringOffset) == key_string &&
2834  cache->get(index + kPatternOffset) == key_pattern) {
2835  return cache->get(index + kArrayOffset);
2836  }
2837  index =
2838  ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2839  if (cache->get(index + kStringOffset) == key_string &&
2840  cache->get(index + kPatternOffset) == key_pattern) {
2841  return cache->get(index + kArrayOffset);
2842  }
2843  return Smi::FromInt(0);
2844 }
2845 
2846 
2848  String* key_string,
2849  Object* key_pattern,
2850  FixedArray* value_array,
2851  ResultsCacheType type) {
2852  FixedArray* cache;
2853  if (!key_string->IsSymbol()) return;
2854  if (type == STRING_SPLIT_SUBSTRINGS) {
2855  ASSERT(key_pattern->IsString());
2856  if (!key_pattern->IsSymbol()) return;
2857  cache = heap->string_split_cache();
2858  } else {
2860  ASSERT(key_pattern->IsFixedArray());
2861  cache = heap->regexp_multiple_cache();
2862  }
2863 
2864  uint32_t hash = key_string->Hash();
2865  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2866  ~(kArrayEntriesPerCacheEntry - 1));
2867  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2868  cache->set(index + kStringOffset, key_string);
2869  cache->set(index + kPatternOffset, key_pattern);
2870  cache->set(index + kArrayOffset, value_array);
2871  } else {
2872  uint32_t index2 =
2873  ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2874  if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2875  cache->set(index2 + kStringOffset, key_string);
2876  cache->set(index2 + kPatternOffset, key_pattern);
2877  cache->set(index2 + kArrayOffset, value_array);
2878  } else {
2879  cache->set(index2 + kStringOffset, Smi::FromInt(0));
2880  cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2881  cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2882  cache->set(index + kStringOffset, key_string);
2883  cache->set(index + kPatternOffset, key_pattern);
2884  cache->set(index + kArrayOffset, value_array);
2885  }
2886  }
2887  // If the array is a reasonably short list of substrings, convert it into a
2888  // list of symbols.
2889  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
2890  for (int i = 0; i < value_array->length(); i++) {
2891  String* str = String::cast(value_array->get(i));
2892  Object* symbol;
2893  MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2894  if (maybe_symbol->ToObject(&symbol)) {
2895  value_array->set(i, symbol);
2896  }
2897  }
2898  }
2899  // Convert backing store to a copy-on-write array.
2900  value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
2901 }
2902 
2903 
2905  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
2906  cache->set(i, Smi::FromInt(0));
2907  }
2908 }
2909 
2910 
2911 MaybeObject* Heap::AllocateInitialNumberStringCache() {
2912  MaybeObject* maybe_obj =
2913  AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
2914  return maybe_obj;
2915 }
2916 
2917 
2918 int Heap::FullSizeNumberStringCacheLength() {
2919  // Compute the size of the number string cache based on the max newspace size.
2920  // The number string cache has a minimum size based on twice the initial cache
2921  // size to ensure that it is bigger after being made 'full size'.
2922  int number_string_cache_size = max_semispace_size_ / 512;
2923  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2924  Min(0x4000, number_string_cache_size));
2925  // There is a string and a number per entry so the length is twice the number
2926  // of entries.
2927  return number_string_cache_size * 2;
2928 }
2929 
2930 
2931 void Heap::AllocateFullSizeNumberStringCache() {
2932  // The idea is to have a small number string cache in the snapshot to keep
2933  // boot-time memory usage down. If we expand the number string cache already
2934  // while creating the snapshot then that didn't work out.
2935  ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
2936  MaybeObject* maybe_obj =
2937  AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
2938  Object* new_cache;
2939  if (maybe_obj->ToObject(&new_cache)) {
2940  // We don't bother to repopulate the cache with entries from the old cache.
2941  // It will be repopulated soon enough with new strings.
2942  set_number_string_cache(FixedArray::cast(new_cache));
2943  }
2944  // If allocation fails then we just return without doing anything. It is only
2945  // a cache, so best effort is OK here.
2946 }
2947 
2948 
2949 void Heap::FlushNumberStringCache() {
2950  // Flush the number to string cache.
2951  int len = number_string_cache()->length();
2952  for (int i = 0; i < len; i++) {
2953  number_string_cache()->set_undefined(this, i);
2954  }
2955 }
2956 
2957 
2958 static inline int double_get_hash(double d) {
2959  DoubleRepresentation rep(d);
2960  return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2961 }
2962 
2963 
2964 static inline int smi_get_hash(Smi* smi) {
2965  return smi->value();
2966 }
2967 
2968 
2970  int hash;
2971  int mask = (number_string_cache()->length() >> 1) - 1;
2972  if (number->IsSmi()) {
2973  hash = smi_get_hash(Smi::cast(number)) & mask;
2974  } else {
2975  hash = double_get_hash(number->Number()) & mask;
2976  }
2977  Object* key = number_string_cache()->get(hash * 2);
2978  if (key == number) {
2979  return String::cast(number_string_cache()->get(hash * 2 + 1));
2980  } else if (key->IsHeapNumber() &&
2981  number->IsHeapNumber() &&
2982  key->Number() == number->Number()) {
2983  return String::cast(number_string_cache()->get(hash * 2 + 1));
2984  }
2985  return undefined_value();
2986 }
2987 
2988 
2989 void Heap::SetNumberStringCache(Object* number, String* string) {
2990  int hash;
2991  int mask = (number_string_cache()->length() >> 1) - 1;
2992  if (number->IsSmi()) {
2993  hash = smi_get_hash(Smi::cast(number)) & mask;
2994  } else {
2995  hash = double_get_hash(number->Number()) & mask;
2996  }
2997  if (number_string_cache()->get(hash * 2) != undefined_value() &&
2998  number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
2999  // The first time we have a hash collision, we move to the full sized
3000  // number string cache.
3001  AllocateFullSizeNumberStringCache();
3002  return;
3003  }
3004  number_string_cache()->set(hash * 2, number);
3005  number_string_cache()->set(hash * 2 + 1, string);
3006 }
3007 
3008 
3009 MaybeObject* Heap::NumberToString(Object* number,
3010  bool check_number_string_cache) {
3011  isolate_->counters()->number_to_string_runtime()->Increment();
3012  if (check_number_string_cache) {
3013  Object* cached = GetNumberStringCache(number);
3014  if (cached != undefined_value()) {
3015  return cached;
3016  }
3017  }
3018 
3019  char arr[100];
3020  Vector<char> buffer(arr, ARRAY_SIZE(arr));
3021  const char* str;
3022  if (number->IsSmi()) {
3023  int num = Smi::cast(number)->value();
3024  str = IntToCString(num, buffer);
3025  } else {
3026  double num = HeapNumber::cast(number)->value();
3027  str = DoubleToCString(num, buffer);
3028  }
3029 
3030  Object* js_string;
3031  MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
3032  if (maybe_js_string->ToObject(&js_string)) {
3033  SetNumberStringCache(number, String::cast(js_string));
3034  }
3035  return maybe_js_string;
3036 }
3037 
3038 
3039 MaybeObject* Heap::Uint32ToString(uint32_t value,
3040  bool check_number_string_cache) {
3041  Object* number;
3042  MaybeObject* maybe = NumberFromUint32(value);
3043  if (!maybe->To<Object>(&number)) return maybe;
3044  return NumberToString(number, check_number_string_cache);
3045 }
3046 
3047 
3049  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3050 }
3051 
3052 
3054  ExternalArrayType array_type) {
3055  switch (array_type) {
3056  case kExternalByteArray:
3057  return kExternalByteArrayMapRootIndex;
3059  return kExternalUnsignedByteArrayMapRootIndex;
3060  case kExternalShortArray:
3061  return kExternalShortArrayMapRootIndex;
3063  return kExternalUnsignedShortArrayMapRootIndex;
3064  case kExternalIntArray:
3065  return kExternalIntArrayMapRootIndex;
3067  return kExternalUnsignedIntArrayMapRootIndex;
3068  case kExternalFloatArray:
3069  return kExternalFloatArrayMapRootIndex;
3070  case kExternalDoubleArray:
3071  return kExternalDoubleArrayMapRootIndex;
3072  case kExternalPixelArray:
3073  return kExternalPixelArrayMapRootIndex;
3074  default:
3075  UNREACHABLE();
3076  return kUndefinedValueRootIndex;
3077  }
3078 }
3079 
3080 
3081 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3082  // We need to distinguish the minus zero value and this cannot be
3083  // done after conversion to int. Doing this by comparing bit
3084  // patterns is faster than using fpclassify() et al.
3085  static const DoubleRepresentation minus_zero(-0.0);
3086 
3087  DoubleRepresentation rep(value);
3088  if (rep.bits == minus_zero.bits) {
3089  return AllocateHeapNumber(-0.0, pretenure);
3090  }
3091 
3092  int int_value = FastD2I(value);
3093  if (value == int_value && Smi::IsValid(int_value)) {
3094  return Smi::FromInt(int_value);
3095  }
3096 
3097  // Materialize the value in the heap.
3098  return AllocateHeapNumber(value, pretenure);
3099 }
3100 
3101 
3102 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3103  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3105  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3106  Foreign* result;
3107  MaybeObject* maybe_result = Allocate(foreign_map(), space);
3108  if (!maybe_result->To(&result)) return maybe_result;
3109  result->set_foreign_address(address);
3110  return result;
3111 }
3112 
3113 
3115  SharedFunctionInfo* share;
3116  MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3117  if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3118 
3119  // Set pointer fields.
3120  share->set_name(name);
3121  Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3122  share->set_code(illegal);
3123  share->ClearOptimizedCodeMap();
3124  share->set_scope_info(ScopeInfo::Empty());
3125  Code* construct_stub =
3126  isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3127  share->set_construct_stub(construct_stub);
3128  share->set_instance_class_name(Object_symbol());
3129  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3130  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3131  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3132  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3133  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3134  share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
3135  share->set_ast_node_count(0);
3136  share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3137  share->set_counters(0);
3138 
3139  // Set integer fields (smi or int, depending on the architecture).
3140  share->set_length(0);
3141  share->set_formal_parameter_count(0);
3142  share->set_expected_nof_properties(0);
3143  share->set_num_literals(0);
3144  share->set_start_position_and_type(0);
3145  share->set_end_position(0);
3146  share->set_function_token_position(0);
3147  // All compiler hints default to false or 0.
3148  share->set_compiler_hints(0);
3150  share->set_opt_count(0);
3151 
3152  return share;
3153 }
3154 
3155 
3157  JSArray* arguments,
3158  int start_position,
3159  int end_position,
3160  Object* script,
3161  Object* stack_trace,
3162  Object* stack_frames) {
3163  Object* result;
3164  { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3165  if (!maybe_result->ToObject(&result)) return maybe_result;
3166  }
3168  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3169  message->initialize_elements();
3170  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3171  message->set_type(type);
3172  message->set_arguments(arguments);
3173  message->set_start_position(start_position);
3174  message->set_end_position(end_position);
3175  message->set_script(script);
3176  message->set_stack_trace(stack_trace);
3177  message->set_stack_frames(stack_frames);
3178  return result;
3179 }
3180 
3181 
3182 
3183 // Returns true for a character in a range. Both limits are inclusive.
3184 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3185  // This makes uses of the the unsigned wraparound.
3186  return character - from <= to - from;
3187 }
3188 
3189 
3190 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3191  Heap* heap,
3192  uint32_t c1,
3193  uint32_t c2) {
3194  String* symbol;
3195  // Numeric strings have a different hash algorithm not known by
3196  // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
3197  if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3198  heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
3199  return symbol;
3200  // Now we know the length is 2, we might as well make use of that fact
3201  // when building the new string.
3202  } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
3203  ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
3204  Object* result;
3205  { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
3206  if (!maybe_result->ToObject(&result)) return maybe_result;
3207  }
3208  char* dest = SeqAsciiString::cast(result)->GetChars();
3209  dest[0] = c1;
3210  dest[1] = c2;
3211  return result;
3212  } else {
3213  Object* result;
3214  { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3215  if (!maybe_result->ToObject(&result)) return maybe_result;
3216  }
3217  uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3218  dest[0] = c1;
3219  dest[1] = c2;
3220  return result;
3221  }
3222 }
3223 
3224 
3225 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3226  int first_length = first->length();
3227  if (first_length == 0) {
3228  return second;
3229  }
3230 
3231  int second_length = second->length();
3232  if (second_length == 0) {
3233  return first;
3234  }
3235 
3236  int length = first_length + second_length;
3237 
3238  // Optimization for 2-byte strings often used as keys in a decompression
3239  // dictionary. Check whether we already have the string in the symbol
3240  // table to prevent creation of many unneccesary strings.
3241  if (length == 2) {
3242  unsigned c1 = first->Get(0);
3243  unsigned c2 = second->Get(0);
3244  return MakeOrFindTwoCharacterString(this, c1, c2);
3245  }
3246 
3247  bool first_is_ascii = first->IsAsciiRepresentation();
3248  bool second_is_ascii = second->IsAsciiRepresentation();
3249  bool is_ascii = first_is_ascii && second_is_ascii;
3250 
3251  // Make sure that an out of memory exception is thrown if the length
3252  // of the new cons string is too large.
3253  if (length > String::kMaxLength || length < 0) {
3256  }
3257 
3258  bool is_ascii_data_in_two_byte_string = false;
3259  if (!is_ascii) {
3260  // At least one of the strings uses two-byte representation so we
3261  // can't use the fast case code for short ASCII strings below, but
3262  // we can try to save memory if all chars actually fit in ASCII.
3263  is_ascii_data_in_two_byte_string =
3264  first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
3265  if (is_ascii_data_in_two_byte_string) {
3266  isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3267  }
3268  }
3269 
3270  // If the resulting string is small make a flat string.
3271  if (length < ConsString::kMinLength) {
3272  // Note that neither of the two inputs can be a slice because:
3274  ASSERT(first->IsFlat());
3275  ASSERT(second->IsFlat());
3276  if (is_ascii) {
3277  Object* result;
3278  { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3279  if (!maybe_result->ToObject(&result)) return maybe_result;
3280  }
3281  // Copy the characters into the new object.
3282  char* dest = SeqAsciiString::cast(result)->GetChars();
3283  // Copy first part.
3284  const char* src;
3285  if (first->IsExternalString()) {
3286  src = ExternalAsciiString::cast(first)->GetChars();
3287  } else {
3288  src = SeqAsciiString::cast(first)->GetChars();
3289  }
3290  for (int i = 0; i < first_length; i++) *dest++ = src[i];
3291  // Copy second part.
3292  if (second->IsExternalString()) {
3293  src = ExternalAsciiString::cast(second)->GetChars();
3294  } else {
3295  src = SeqAsciiString::cast(second)->GetChars();
3296  }
3297  for (int i = 0; i < second_length; i++) *dest++ = src[i];
3298  return result;
3299  } else {
3300  if (is_ascii_data_in_two_byte_string) {
3301  Object* result;
3302  { MaybeObject* maybe_result = AllocateRawAsciiString(length);
3303  if (!maybe_result->ToObject(&result)) return maybe_result;
3304  }
3305  // Copy the characters into the new object.
3306  char* dest = SeqAsciiString::cast(result)->GetChars();
3307  String::WriteToFlat(first, dest, 0, first_length);
3308  String::WriteToFlat(second, dest + first_length, 0, second_length);
3309  isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3310  return result;
3311  }
3312 
3313  Object* result;
3314  { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3315  if (!maybe_result->ToObject(&result)) return maybe_result;
3316  }
3317  // Copy the characters into the new object.
3318  uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3319  String::WriteToFlat(first, dest, 0, first_length);
3320  String::WriteToFlat(second, dest + first_length, 0, second_length);
3321  return result;
3322  }
3323  }
3324 
3325  Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
3326  cons_ascii_string_map() : cons_string_map();
3327 
3328  Object* result;
3329  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3330  if (!maybe_result->ToObject(&result)) return maybe_result;
3331  }
3332 
3333  AssertNoAllocation no_gc;
3334  ConsString* cons_string = ConsString::cast(result);
3335  WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3336  cons_string->set_length(length);
3338  cons_string->set_first(first, mode);
3339  cons_string->set_second(second, mode);
3340  return result;
3341 }
3342 
3343 
3344 MaybeObject* Heap::AllocateSubString(String* buffer,
3345  int start,
3346  int end,
3347  PretenureFlag pretenure) {
3348  int length = end - start;
3349  if (length <= 0) {
3350  return empty_string();
3351  } else if (length == 1) {
3352  return LookupSingleCharacterStringFromCode(buffer->Get(start));
3353  } else if (length == 2) {
3354  // Optimization for 2-byte strings often used as keys in a decompression
3355  // dictionary. Check whether we already have the string in the symbol
3356  // table to prevent creation of many unneccesary strings.
3357  unsigned c1 = buffer->Get(start);
3358  unsigned c2 = buffer->Get(start + 1);
3359  return MakeOrFindTwoCharacterString(this, c1, c2);
3360  }
3361 
3362  // Make an attempt to flatten the buffer to reduce access time.
3363  buffer = buffer->TryFlattenGetString();
3364 
3365  if (!FLAG_string_slices ||
3366  !buffer->IsFlat() ||
3367  length < SlicedString::kMinLength ||
3368  pretenure == TENURED) {
3369  Object* result;
3370  // WriteToFlat takes care of the case when an indirect string has a
3371  // different encoding from its underlying string. These encodings may
3372  // differ because of externalization.
3373  bool is_ascii = buffer->IsAsciiRepresentation();
3374  { MaybeObject* maybe_result = is_ascii
3375  ? AllocateRawAsciiString(length, pretenure)
3376  : AllocateRawTwoByteString(length, pretenure);
3377  if (!maybe_result->ToObject(&result)) return maybe_result;
3378  }
3379  String* string_result = String::cast(result);
3380  // Copy the characters into the new object.
3381  if (is_ascii) {
3382  ASSERT(string_result->IsAsciiRepresentation());
3383  char* dest = SeqAsciiString::cast(string_result)->GetChars();
3384  String::WriteToFlat(buffer, dest, start, end);
3385  } else {
3386  ASSERT(string_result->IsTwoByteRepresentation());
3387  uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3388  String::WriteToFlat(buffer, dest, start, end);
3389  }
3390  return result;
3391  }
3392 
3393  ASSERT(buffer->IsFlat());
3394 #if VERIFY_HEAP
3395  if (FLAG_verify_heap) {
3396  buffer->StringVerify();
3397  }
3398 #endif
3399 
3400  Object* result;
3401  // When slicing an indirect string we use its encoding for a newly created
3402  // slice and don't check the encoding of the underlying string. This is safe
3403  // even if the encodings are different because of externalization. If an
3404  // indirect ASCII string is pointing to a two-byte string, the two-byte char
3405  // codes of the underlying string must still fit into ASCII (because
3406  // externalization must not change char codes).
3407  { Map* map = buffer->IsAsciiRepresentation()
3408  ? sliced_ascii_string_map()
3409  : sliced_string_map();
3410  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3411  if (!maybe_result->ToObject(&result)) return maybe_result;
3412  }
3413 
3414  AssertNoAllocation no_gc;
3415  SlicedString* sliced_string = SlicedString::cast(result);
3416  sliced_string->set_length(length);
3417  sliced_string->set_hash_field(String::kEmptyHashField);
3418  if (buffer->IsConsString()) {
3419  ConsString* cons = ConsString::cast(buffer);
3420  ASSERT(cons->second()->length() == 0);
3421  sliced_string->set_parent(cons->first());
3422  sliced_string->set_offset(start);
3423  } else if (buffer->IsSlicedString()) {
3424  // Prevent nesting sliced strings.
3425  SlicedString* parent_slice = SlicedString::cast(buffer);
3426  sliced_string->set_parent(parent_slice->parent());
3427  sliced_string->set_offset(start + parent_slice->offset());
3428  } else {
3429  sliced_string->set_parent(buffer);
3430  sliced_string->set_offset(start);
3431  }
3432  ASSERT(sliced_string->parent()->IsSeqString() ||
3433  sliced_string->parent()->IsExternalString());
3434  return result;
3435 }
3436 
3437 
3439  const ExternalAsciiString::Resource* resource) {
3440  size_t length = resource->length();
3441  if (length > static_cast<size_t>(String::kMaxLength)) {
3444  }
3445 
3446  ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
3447 
3448  Map* map = external_ascii_string_map();
3449  Object* result;
3450  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3451  if (!maybe_result->ToObject(&result)) return maybe_result;
3452  }
3453 
3454  ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3455  external_string->set_length(static_cast<int>(length));
3456  external_string->set_hash_field(String::kEmptyHashField);
3457  external_string->set_resource(resource);
3458 
3459  return result;
3460 }
3461 
3462 
3464  const ExternalTwoByteString::Resource* resource) {
3465  size_t length = resource->length();
3466  if (length > static_cast<size_t>(String::kMaxLength)) {
3469  }
3470 
3471  // For small strings we check whether the resource contains only
3472  // ASCII characters. If yes, we use a different string map.
3473  static const size_t kAsciiCheckLengthLimit = 32;
3474  bool is_ascii = length <= kAsciiCheckLengthLimit &&
3475  String::IsAscii(resource->data(), static_cast<int>(length));
3476  Map* map = is_ascii ?
3477  external_string_with_ascii_data_map() : external_string_map();
3478  Object* result;
3479  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3480  if (!maybe_result->ToObject(&result)) return maybe_result;
3481  }
3482 
3483  ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3484  external_string->set_length(static_cast<int>(length));
3485  external_string->set_hash_field(String::kEmptyHashField);
3486  external_string->set_resource(resource);
3487 
3488  return result;
3489 }
3490 
3491 
3493  if (code <= String::kMaxAsciiCharCode) {
3494  Object* value = single_character_string_cache()->get(code);
3495  if (value != undefined_value()) return value;
3496 
3497  char buffer[1];
3498  buffer[0] = static_cast<char>(code);
3499  Object* result;
3500  MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
3501 
3502  if (!maybe_result->ToObject(&result)) return maybe_result;
3503  single_character_string_cache()->set(code, result);
3504  return result;
3505  }
3506 
3507  Object* result;
3508  { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3509  if (!maybe_result->ToObject(&result)) return maybe_result;
3510  }
3511  String* answer = String::cast(result);
3512  answer->Set(0, code);
3513  return answer;
3514 }
3515 
3516 
3517 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3518  if (length < 0 || length > ByteArray::kMaxLength) {
3520  }
3521  if (pretenure == NOT_TENURED) {
3522  return AllocateByteArray(length);
3523  }
3524  int size = ByteArray::SizeFor(length);
3525  Object* result;
3526  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3527  ? old_data_space_->AllocateRaw(size)
3528  : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3529  if (!maybe_result->ToObject(&result)) return maybe_result;
3530  }
3531 
3532  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3533  byte_array_map());
3534  reinterpret_cast<ByteArray*>(result)->set_length(length);
3535  return result;
3536 }
3537 
3538 
3539 MaybeObject* Heap::AllocateByteArray(int length) {
3540  if (length < 0 || length > ByteArray::kMaxLength) {
3542  }
3543  int size = ByteArray::SizeFor(length);
3544  AllocationSpace space =
3546  Object* result;
3547  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3548  if (!maybe_result->ToObject(&result)) return maybe_result;
3549  }
3550 
3551  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3552  byte_array_map());
3553  reinterpret_cast<ByteArray*>(result)->set_length(length);
3554  return result;
3555 }
3556 
3557 
3558 void Heap::CreateFillerObjectAt(Address addr, int size) {
3559  if (size == 0) return;
3560  HeapObject* filler = HeapObject::FromAddress(addr);
3561  if (size == kPointerSize) {
3562  filler->set_map_no_write_barrier(one_pointer_filler_map());
3563  } else if (size == 2 * kPointerSize) {
3564  filler->set_map_no_write_barrier(two_pointer_filler_map());
3565  } else {
3566  filler->set_map_no_write_barrier(free_space_map());
3567  FreeSpace::cast(filler)->set_size(size);
3568  }
3569 }
3570 
3571 
3572 MaybeObject* Heap::AllocateExternalArray(int length,
3573  ExternalArrayType array_type,
3574  void* external_pointer,
3575  PretenureFlag pretenure) {
3576  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3577  Object* result;
3578  { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
3579  space,
3580  OLD_DATA_SPACE);
3581  if (!maybe_result->ToObject(&result)) return maybe_result;
3582  }
3583 
3584  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3585  MapForExternalArrayType(array_type));
3586  reinterpret_cast<ExternalArray*>(result)->set_length(length);
3587  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3588  external_pointer);
3589 
3590  return result;
3591 }
3592 
3593 
3594 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3596  Handle<Object> self_reference,
3597  bool immovable) {
3598  // Allocate ByteArray before the Code object, so that we do not risk
3599  // leaving uninitialized Code object (and breaking the heap).
3600  ByteArray* reloc_info;
3601  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3602  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3603 
3604  // Compute size.
3605  int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3606  int obj_size = Code::SizeFor(body_size);
3607  ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3608  MaybeObject* maybe_result;
3609  // Large code objects and code objects which should stay at a fixed address
3610  // are allocated in large object space.
3611  HeapObject* result;
3612  bool force_lo_space = obj_size > code_space()->AreaSize();
3613  if (force_lo_space) {
3614  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3615  } else {
3616  maybe_result = code_space_->AllocateRaw(obj_size);
3617  }
3618  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3619 
3620  if (immovable && !force_lo_space &&
3621  // Objects on the first page of each space are never moved.
3622  !code_space_->FirstPage()->Contains(result->address())) {
3623  // Discard the first code allocation, which was on a page where it could be
3624  // moved.
3625  CreateFillerObjectAt(result->address(), obj_size);
3626  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3627  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3628  }
3629 
3630  // Initialize the object
3631  result->set_map_no_write_barrier(code_map());
3632  Code* code = Code::cast(result);
3633  ASSERT(!isolate_->code_range()->exists() ||
3634  isolate_->code_range()->contains(code->address()));
3635  code->set_instruction_size(desc.instr_size);
3636  code->set_relocation_info(reloc_info);
3637  code->set_flags(flags);
3638  if (code->is_call_stub() || code->is_keyed_call_stub()) {
3640  }
3641  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3642  code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
3643  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3644  code->set_gc_metadata(Smi::FromInt(0));
3645  code->set_ic_age(global_ic_age_);
3646  // Allow self references to created code object by patching the handle to
3647  // point to the newly allocated Code object.
3648  if (!self_reference.is_null()) {
3649  *(self_reference.location()) = code;
3650  }
3651  // Migrate generated code.
3652  // The generated code can contain Object** values (typically from handles)
3653  // that are dereferenced during the copy to point directly to the actual heap
3654  // objects. These pointers can include references to the code object itself,
3655  // through the self_reference parameter.
3656  code->CopyFrom(desc);
3657 
3658 #ifdef VERIFY_HEAP
3659  if (FLAG_verify_heap) {
3660  code->Verify();
3661  }
3662 #endif
3663  return code;
3664 }
3665 
3666 
3667 MaybeObject* Heap::CopyCode(Code* code) {
3668  // Allocate an object the same size as the code object.
3669  int obj_size = code->Size();
3670  MaybeObject* maybe_result;
3671  if (obj_size > code_space()->AreaSize()) {
3672  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3673  } else {
3674  maybe_result = code_space_->AllocateRaw(obj_size);
3675  }
3676 
3677  Object* result;
3678  if (!maybe_result->ToObject(&result)) return maybe_result;
3679 
3680  // Copy code object.
3681  Address old_addr = code->address();
3682  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3683  CopyBlock(new_addr, old_addr, obj_size);
3684  // Relocate the copy.
3685  Code* new_code = Code::cast(result);
3686  ASSERT(!isolate_->code_range()->exists() ||
3687  isolate_->code_range()->contains(code->address()));
3688  new_code->Relocate(new_addr - old_addr);
3689  return new_code;
3690 }
3691 
3692 
3693 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3694  // Allocate ByteArray before the Code object, so that we do not risk
3695  // leaving uninitialized Code object (and breaking the heap).
3696  Object* reloc_info_array;
3697  { MaybeObject* maybe_reloc_info_array =
3698  AllocateByteArray(reloc_info.length(), TENURED);
3699  if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3700  return maybe_reloc_info_array;
3701  }
3702  }
3703 
3704  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3705 
3706  int new_obj_size = Code::SizeFor(new_body_size);
3707 
3708  Address old_addr = code->address();
3709 
3710  size_t relocation_offset =
3711  static_cast<size_t>(code->instruction_end() - old_addr);
3712 
3713  MaybeObject* maybe_result;
3714  if (new_obj_size > code_space()->AreaSize()) {
3715  maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3716  } else {
3717  maybe_result = code_space_->AllocateRaw(new_obj_size);
3718  }
3719 
3720  Object* result;
3721  if (!maybe_result->ToObject(&result)) return maybe_result;
3722 
3723  // Copy code object.
3724  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3725 
3726  // Copy header and instructions.
3727  memcpy(new_addr, old_addr, relocation_offset);
3728 
3729  Code* new_code = Code::cast(result);
3730  new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
3731 
3732  // Copy patched rinfo.
3733  memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
3734 
3735  // Relocate the copy.
3736  ASSERT(!isolate_->code_range()->exists() ||
3737  isolate_->code_range()->contains(code->address()));
3738  new_code->Relocate(new_addr - old_addr);
3739 
3740 #ifdef VERIFY_HEAP
3741  if (FLAG_verify_heap) {
3742  code->Verify();
3743  }
3744 #endif
3745  return new_code;
3746 }
3747 
3748 
3749 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
3750  ASSERT(gc_state_ == NOT_IN_GC);
3751  ASSERT(map->instance_type() != MAP_TYPE);
3752  // If allocation failures are disallowed, we may allocate in a different
3753  // space when new space is full and the object is not a large object.
3754  AllocationSpace retry_space =
3755  (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3756  Object* result;
3757  { MaybeObject* maybe_result =
3758  AllocateRaw(map->instance_size(), space, retry_space);
3759  if (!maybe_result->ToObject(&result)) return maybe_result;
3760  }
3761  // No need for write barrier since object is white and map is in old space.
3763  return result;
3764 }
3765 
3766 
3767 void Heap::InitializeFunction(JSFunction* function,
3768  SharedFunctionInfo* shared,
3769  Object* prototype) {
3770  ASSERT(!prototype->IsMap());
3771  function->initialize_properties();
3772  function->initialize_elements();
3773  function->set_shared(shared);
3774  function->set_code(shared->code());
3775  function->set_prototype_or_initial_map(prototype);
3776  function->set_context(undefined_value());
3777  function->set_literals_or_bindings(empty_fixed_array());
3778  function->set_next_function_link(undefined_value());
3779 }
3780 
3781 
3783  // Allocate the prototype. Make sure to use the object function
3784  // from the function's context, since the function can be from a
3785  // different context.
3786  JSFunction* object_function =
3787  function->context()->native_context()->object_function();
3788 
3789  // Each function prototype gets a copy of the object function map.
3790  // This avoid unwanted sharing of maps between prototypes of different
3791  // constructors.
3792  Map* new_map;
3793  ASSERT(object_function->has_initial_map());
3794  MaybeObject* maybe_map = object_function->initial_map()->Copy();
3795  if (!maybe_map->To(&new_map)) return maybe_map;
3796 
3797  Object* prototype;
3798  MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3799  if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3800 
3801  // When creating the prototype for the function we must set its
3802  // constructor to the function.
3803  MaybeObject* maybe_failure =
3805  constructor_symbol(), function, DONT_ENUM);
3806  if (maybe_failure->IsFailure()) return maybe_failure;
3807 
3808  return prototype;
3809 }
3810 
3811 
3812 MaybeObject* Heap::AllocateFunction(Map* function_map,
3813  SharedFunctionInfo* shared,
3814  Object* prototype,
3815  PretenureFlag pretenure) {
3816  AllocationSpace space =
3817  (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3818  Object* result;
3819  { MaybeObject* maybe_result = Allocate(function_map, space);
3820  if (!maybe_result->ToObject(&result)) return maybe_result;
3821  }
3822  InitializeFunction(JSFunction::cast(result), shared, prototype);
3823  return result;
3824 }
3825 
3826 
3827 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3828  // To get fast allocation and map sharing for arguments objects we
3829  // allocate them based on an arguments boilerplate.
3830 
3831  JSObject* boilerplate;
3832  int arguments_object_size;
3833  bool strict_mode_callee = callee->IsJSFunction() &&
3834  !JSFunction::cast(callee)->shared()->is_classic_mode();
3835  if (strict_mode_callee) {
3836  boilerplate =
3837  isolate()->context()->native_context()->
3838  strict_mode_arguments_boilerplate();
3839  arguments_object_size = kArgumentsObjectSizeStrict;
3840  } else {
3841  boilerplate =
3842  isolate()->context()->native_context()->arguments_boilerplate();
3843  arguments_object_size = kArgumentsObjectSize;
3844  }
3845 
3846  // This calls Copy directly rather than using Heap::AllocateRaw so we
3847  // duplicate the check here.
3848  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3849 
3850  // Check that the size of the boilerplate matches our
3851  // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3852  // on the size being a known constant.
3853  ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3854 
3855  // Do the allocation.
3856  Object* result;
3857  { MaybeObject* maybe_result =
3858  AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3859  if (!maybe_result->ToObject(&result)) return maybe_result;
3860  }
3861 
3862  // Copy the content. The arguments boilerplate doesn't have any
3863  // fields that point to new space so it's safe to skip the write
3864  // barrier here.
3865  CopyBlock(HeapObject::cast(result)->address(),
3866  boilerplate->address(),
3868 
3869  // Set the length property.
3871  Smi::FromInt(length),
3873  // Set the callee property for non-strict mode arguments object only.
3874  if (!strict_mode_callee) {
3876  callee);
3877  }
3878 
3879  // Check the state of the object
3880  ASSERT(JSObject::cast(result)->HasFastProperties());
3881  ASSERT(JSObject::cast(result)->HasFastObjectElements());
3882 
3883  return result;
3884 }
3885 
3886 
3887 static bool HasDuplicates(DescriptorArray* descriptors) {
3888  int count = descriptors->number_of_descriptors();
3889  if (count > 1) {
3890  String* prev_key = descriptors->GetKey(0);
3891  for (int i = 1; i != count; i++) {
3892  String* current_key = descriptors->GetKey(i);
3893  if (prev_key == current_key) return true;
3894  prev_key = current_key;
3895  }
3896  }
3897  return false;
3898 }
3899 
3900 
3902  ASSERT(!fun->has_initial_map());
3903 
3904  // First create a new map with the size and number of in-object properties
3905  // suggested by the function.
3906  int instance_size = fun->shared()->CalculateInstanceSize();
3907  int in_object_properties = fun->shared()->CalculateInObjectProperties();
3908  Map* map;
3909  MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
3910  if (!maybe_map->To(&map)) return maybe_map;
3911 
3912  // Fetch or allocate prototype.
3913  Object* prototype;
3914  if (fun->has_instance_prototype()) {
3915  prototype = fun->instance_prototype();
3916  } else {
3917  MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3918  if (!maybe_prototype->To(&prototype)) return maybe_prototype;
3919  }
3920  map->set_inobject_properties(in_object_properties);
3921  map->set_unused_property_fields(in_object_properties);
3922  map->set_prototype(prototype);
3924 
3925  // If the function has only simple this property assignments add
3926  // field descriptors for these to the initial map as the object
3927  // cannot be constructed without having these properties. Guard by
3928  // the inline_new flag so we only change the map if we generate a
3929  // specialized construct stub.
3930  ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3931  if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3932  int count = fun->shared()->this_property_assignments_count();
3933  if (count > in_object_properties) {
3934  // Inline constructor can only handle inobject properties.
3935  fun->shared()->ForbidInlineConstructor();
3936  } else {
3937  DescriptorArray* descriptors;
3938  MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
3939  if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
3940 
3941  DescriptorArray::WhitenessWitness witness(descriptors);
3942  for (int i = 0; i < count; i++) {
3943  String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3944  ASSERT(name->IsSymbol());
3945  FieldDescriptor field(name, i, NONE, i + 1);
3946  descriptors->Set(i, &field, witness);
3947  }
3948  descriptors->Sort();
3949 
3950  // The descriptors may contain duplicates because the compiler does not
3951  // guarantee the uniqueness of property names (it would have required
3952  // quadratic time). Once the descriptors are sorted we can check for
3953  // duplicates in linear time.
3954  if (HasDuplicates(descriptors)) {
3955  fun->shared()->ForbidInlineConstructor();
3956  } else {
3957  map->InitializeDescriptors(descriptors);
3959  map->set_unused_property_fields(in_object_properties - count);
3960  }
3961  }
3962  }
3963 
3964  fun->shared()->StartInobjectSlackTracking(map);
3965 
3966  return map;
3967 }
3968 
3969 
3970 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3971  FixedArray* properties,
3972  Map* map) {
3973  obj->set_properties(properties);
3974  obj->initialize_elements();
3975  // TODO(1240798): Initialize the object's body using valid initial values
3976  // according to the object's initial map. For example, if the map's
3977  // instance type is JS_ARRAY_TYPE, the length field should be initialized
3978  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3979  // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3980  // verification code has to cope with (temporarily) invalid objects. See
3981  // for example, JSArray::JSArrayVerify).
3982  Object* filler;
3983  // We cannot always fill with one_pointer_filler_map because objects
3984  // created from API functions expect their internal fields to be initialized
3985  // with undefined_value.
3986  // Pre-allocated fields need to be initialized with undefined_value as well
3987  // so that object accesses before the constructor completes (e.g. in the
3988  // debugger) will not cause a crash.
3989  if (map->constructor()->IsJSFunction() &&
3990  JSFunction::cast(map->constructor())->shared()->
3991  IsInobjectSlackTrackingInProgress()) {
3992  // We might want to shrink the object later.
3993  ASSERT(obj->GetInternalFieldCount() == 0);
3994  filler = Heap::one_pointer_filler_map();
3995  } else {
3996  filler = Heap::undefined_value();
3997  }
3998  obj->InitializeBody(map, Heap::undefined_value(), filler);
3999 }
4000 
4001 
4002 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4003  // JSFunctions should be allocated using AllocateFunction to be
4004  // properly initialized.
4006 
4007  // Both types of global objects should be allocated using
4008  // AllocateGlobalObject to be properly initialized.
4011 
4012  // Allocate the backing storage for the properties.
4013  int prop_size =
4015  map->unused_property_fields() -
4016  map->inobject_properties();
4017  ASSERT(prop_size >= 0);
4018  Object* properties;
4019  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4020  if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4021  }
4022 
4023  // Allocate the JSObject.
4024  AllocationSpace space =
4025  (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4027  Object* obj;
4028  { MaybeObject* maybe_obj = Allocate(map, space);
4029  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4030  }
4031 
4032  // Initialize the JSObject.
4033  InitializeJSObjectFromMap(JSObject::cast(obj),
4034  FixedArray::cast(properties),
4035  map);
4036  ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
4037  return obj;
4038 }
4039 
4040 
4041 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4042  PretenureFlag pretenure) {
4043  // Allocate the initial map if absent.
4044  if (!constructor->has_initial_map()) {
4045  Object* initial_map;
4046  { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4047  if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4048  }
4049  constructor->set_initial_map(Map::cast(initial_map));
4050  Map::cast(initial_map)->set_constructor(constructor);
4051  }
4052  // Allocate the object based on the constructors initial map.
4053  MaybeObject* result = AllocateJSObjectFromMap(
4054  constructor->initial_map(), pretenure);
4055 #ifdef DEBUG
4056  // Make sure result is NOT a global object if valid.
4057  Object* non_failure;
4058  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4059 #endif
4060  return result;
4061 }
4062 
4063 
4064 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4065  // Allocate a fresh map. Modules do not have a prototype.
4066  Map* map;
4067  MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4068  if (!maybe_map->To(&map)) return maybe_map;
4069  // Allocate the object based on the map.
4070  JSModule* module;
4071  MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4072  if (!maybe_module->To(&module)) return maybe_module;
4073  module->set_context(context);
4074  module->set_scope_info(scope_info);
4075  return module;
4076 }
4077 
4078 
4080  ElementsKind elements_kind,
4081  int length,
4082  int capacity,
4084  PretenureFlag pretenure) {
4085  ASSERT(capacity >= length);
4086  if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
4087  elements_kind = GetHoleyElementsKind(elements_kind);
4088  }
4089  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4090  JSArray* array;
4091  if (!maybe_array->To(&array)) return maybe_array;
4092 
4093  if (capacity == 0) {
4094  array->set_length(Smi::FromInt(0));
4095  array->set_elements(empty_fixed_array());
4096  return array;
4097  }
4098 
4099  FixedArrayBase* elms;
4100  MaybeObject* maybe_elms = NULL;
4101  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
4102  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4103  maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4104  } else {
4106  maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4107  }
4108  } else {
4109  ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4110  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4111  maybe_elms = AllocateUninitializedFixedArray(capacity);
4112  } else {
4114  maybe_elms = AllocateFixedArrayWithHoles(capacity);
4115  }
4116  }
4117  if (!maybe_elms->To(&elms)) return maybe_elms;
4118 
4119  array->set_elements(elms);
4120  array->set_length(Smi::FromInt(length));
4121  return array;
4122 }
4123 
4124 
4126  FixedArrayBase* elements,
4127  ElementsKind elements_kind,
4128  PretenureFlag pretenure) {
4129  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4130  JSArray* array;
4131  if (!maybe_array->To(&array)) return maybe_array;
4132 
4133  array->set_elements(elements);
4134  array->set_length(Smi::FromInt(elements->length()));
4135  array->ValidateElements();
4136  return array;
4137 }
4138 
4139 
4140 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4141  // Allocate map.
4142  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4143  // maps. Will probably depend on the identity of the handler object, too.
4144  Map* map;
4145  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4146  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4147  map->set_prototype(prototype);
4148 
4149  // Allocate the proxy object.
4150  JSProxy* result;
4151  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4152  if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4153  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4154  result->set_handler(handler);
4155  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4156  return result;
4157 }
4158 
4159 
4161  Object* call_trap,
4162  Object* construct_trap,
4163  Object* prototype) {
4164  // Allocate map.
4165  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4166  // maps. Will probably depend on the identity of the handler object, too.
4167  Map* map;
4168  MaybeObject* maybe_map_obj =
4170  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4171  map->set_prototype(prototype);
4172 
4173  // Allocate the proxy object.
4174  JSFunctionProxy* result;
4175  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4176  if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4177  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4178  result->set_handler(handler);
4179  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4180  result->set_call_trap(call_trap);
4181  result->set_construct_trap(construct_trap);
4182  return result;
4183 }
4184 
4185 
4186 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4187  ASSERT(constructor->has_initial_map());
4188  Map* map = constructor->initial_map();
4189  ASSERT(map->is_dictionary_map());
4190 
4191  // Make sure no field properties are described in the initial map.
4192  // This guarantees us that normalizing the properties does not
4193  // require us to change property values to JSGlobalPropertyCells.
4194  ASSERT(map->NextFreePropertyIndex() == 0);
4195 
4196  // Make sure we don't have a ton of pre-allocated slots in the
4197  // global objects. They will be unused once we normalize the object.
4198  ASSERT(map->unused_property_fields() == 0);
4199  ASSERT(map->inobject_properties() == 0);
4200 
4201  // Initial size of the backing store to avoid resize of the storage during
4202  // bootstrapping. The size differs between the JS global object ad the
4203  // builtins object.
4204  int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4205 
4206  // Allocate a dictionary object for backing storage.
4207  StringDictionary* dictionary;
4208  MaybeObject* maybe_dictionary =
4210  map->NumberOfOwnDescriptors() * 2 + initial_size);
4211  if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4212 
4213  // The global object might be created from an object template with accessors.
4214  // Fill these accessors into the dictionary.
4215  DescriptorArray* descs = map->instance_descriptors();
4216  for (int i = 0; i < descs->number_of_descriptors(); i++) {
4217  PropertyDetails details = descs->GetDetails(i);
4218  ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
4219  PropertyDetails d = PropertyDetails(details.attributes(),
4220  CALLBACKS,
4221  details.descriptor_index());
4222  Object* value = descs->GetCallbacksObject(i);
4223  MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
4224  if (!maybe_value->ToObject(&value)) return maybe_value;
4225 
4226  MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4227  if (!maybe_added->To(&dictionary)) return maybe_added;
4228  }
4229 
4230  // Allocate the global object and initialize it with the backing store.
4231  JSObject* global;
4232  MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4233  if (!maybe_global->To(&global)) return maybe_global;
4234 
4235  InitializeJSObjectFromMap(global, dictionary, map);
4236 
4237  // Create a new map for the global object.
4238  Map* new_map;
4239  MaybeObject* maybe_map = map->CopyDropDescriptors();
4240  if (!maybe_map->To(&new_map)) return maybe_map;
4241  new_map->set_dictionary_map(true);
4242 
4243  // Set up the global object as a normalized object.
4244  global->set_map(new_map);
4245  global->set_properties(dictionary);
4246 
4247  // Make sure result is a global object with properties in dictionary.
4248  ASSERT(global->IsGlobalObject());
4249  ASSERT(!global->HasFastProperties());
4250  return global;
4251 }
4252 
4253 
4254 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4255  // Never used to copy functions. If functions need to be copied we
4256  // have to be careful to clear the literals array.
4257  SLOW_ASSERT(!source->IsJSFunction());
4258 
4259  // Make the clone.
4260  Map* map = source->map();
4261  int object_size = map->instance_size();
4262  Object* clone;
4263 
4265 
4266  // If we're forced to always allocate, we use the general allocation
4267  // functions which may leave us with an object in old space.
4268  if (always_allocate()) {
4269  { MaybeObject* maybe_clone =
4270  AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4271  if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4272  }
4273  Address clone_address = HeapObject::cast(clone)->address();
4274  CopyBlock(clone_address,
4275  source->address(),
4276  object_size);
4277  // Update write barrier for all fields that lie beyond the header.
4278  RecordWrites(clone_address,
4280  (object_size - JSObject::kHeaderSize) / kPointerSize);
4281  } else {
4282  wb_mode = SKIP_WRITE_BARRIER;
4283  { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4284  if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4285  }
4286  SLOW_ASSERT(InNewSpace(clone));
4287  // Since we know the clone is allocated in new space, we can copy
4288  // the contents without worrying about updating the write barrier.
4289  CopyBlock(HeapObject::cast(clone)->address(),
4290  source->address(),
4291  object_size);
4292  }
4293 
4294  SLOW_ASSERT(
4295  JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4296  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4297  FixedArray* properties = FixedArray::cast(source->properties());
4298  // Update elements if necessary.
4299  if (elements->length() > 0) {
4300  Object* elem;
4301  { MaybeObject* maybe_elem;
4302  if (elements->map() == fixed_cow_array_map()) {
4303  maybe_elem = FixedArray::cast(elements);
4304  } else if (source->HasFastDoubleElements()) {
4305  maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4306  } else {
4307  maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4308  }
4309  if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4310  }
4311  JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4312  }
4313  // Update properties if necessary.
4314  if (properties->length() > 0) {
4315  Object* prop;
4316  { MaybeObject* maybe_prop = CopyFixedArray(properties);
4317  if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4318  }
4319  JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4320  }
4321  // Return the new clone.
4322  return clone;
4323 }
4324 
4325 
4327  JSReceiver* object, InstanceType type, int size) {
4328  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4329 
4330  // Allocate fresh map.
4331  // TODO(rossberg): Once we optimize proxies, cache these maps.
4332  Map* map;
4333  MaybeObject* maybe = AllocateMap(type, size);
4334  if (!maybe->To<Map>(&map)) return maybe;
4335 
4336  // Check that the receiver has at least the size of the fresh object.
4337  int size_difference = object->map()->instance_size() - map->instance_size();
4338  ASSERT(size_difference >= 0);
4339 
4340  map->set_prototype(object->map()->prototype());
4341 
4342  // Allocate the backing storage for the properties.
4343  int prop_size = map->unused_property_fields() - map->inobject_properties();
4344  Object* properties;
4345  maybe = AllocateFixedArray(prop_size, TENURED);
4346  if (!maybe->ToObject(&properties)) return maybe;
4347 
4348  // Functions require some allocation, which might fail here.
4349  SharedFunctionInfo* shared = NULL;
4350  if (type == JS_FUNCTION_TYPE) {
4351  String* name;
4352  maybe = LookupAsciiSymbol("<freezing call trap>");
4353  if (!maybe->To<String>(&name)) return maybe;
4354  maybe = AllocateSharedFunctionInfo(name);
4355  if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4356  }
4357 
4358  // Because of possible retries of this function after failure,
4359  // we must NOT fail after this point, where we have changed the type!
4360 
4361  // Reset the map for the object.
4362  object->set_map(map);
4363  JSObject* jsobj = JSObject::cast(object);
4364 
4365  // Reinitialize the object from the constructor map.
4366  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4367 
4368  // Functions require some minimal initialization.
4369  if (type == JS_FUNCTION_TYPE) {
4370  map->set_function_with_prototype(true);
4371  InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4372  JSFunction::cast(object)->set_context(
4373  isolate()->context()->native_context());
4374  }
4375 
4376  // Put in filler if the new object is smaller than the old.
4377  if (size_difference > 0) {
4379  object->address() + map->instance_size(), size_difference);
4380  }
4381 
4382  return object;
4383 }
4384 
4385 
4387  JSGlobalProxy* object) {
4388  ASSERT(constructor->has_initial_map());
4389  Map* map = constructor->initial_map();
4390 
4391  // Check that the already allocated object has the same size and type as
4392  // objects allocated using the constructor.
4393  ASSERT(map->instance_size() == object->map()->instance_size());
4394  ASSERT(map->instance_type() == object->map()->instance_type());
4395 
4396  // Allocate the backing storage for the properties.
4397  int prop_size = map->unused_property_fields() - map->inobject_properties();
4398  Object* properties;
4399  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4400  if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4401  }
4402 
4403  // Reset the map for the object.
4404  object->set_map(constructor->initial_map());
4405 
4406  // Reinitialize the object from the constructor map.
4407  InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4408  return object;
4409 }
4410 
4411 
4413  PretenureFlag pretenure) {
4414  int length = string.length();
4415  if (length == 1) {
4417  }
4418  Object* result;
4419  { MaybeObject* maybe_result =
4420  AllocateRawAsciiString(string.length(), pretenure);
4421  if (!maybe_result->ToObject(&result)) return maybe_result;
4422  }
4423 
4424  // Copy the characters into the new object.
4425  CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
4426  return result;
4427 }
4428 
4429 
4431  int non_ascii_start,
4432  PretenureFlag pretenure) {
4433  // Continue counting the number of characters in the UTF-8 string, starting
4434  // from the first non-ascii character or word.
4435  int chars = non_ascii_start;
4437  decoder(isolate_->unicode_cache()->utf8_decoder());
4438  decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
4439  while (decoder->has_more()) {
4440  uint32_t r = decoder->GetNext();
4442  chars++;
4443  } else {
4444  chars += 2;
4445  }
4446  }
4447 
4448  Object* result;
4449  { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4450  if (!maybe_result->ToObject(&result)) return maybe_result;
4451  }
4452 
4453  // Convert and copy the characters into the new object.
4454  SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4455  decoder->Reset(string.start(), string.length());
4456  int i = 0;
4457  while (i < chars) {
4458  uint32_t r = decoder->GetNext();
4462  } else {
4463  twobyte->SeqTwoByteStringSet(i++, r);
4464  }
4465  }
4466  return result;
4467 }
4468 
4469 
4471  PretenureFlag pretenure) {
4472  // Check if the string is an ASCII string.
4473  Object* result;
4474  int length = string.length();
4475  const uc16* start = string.start();
4476 
4477  if (String::IsAscii(start, length)) {
4478  MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
4479  if (!maybe_result->ToObject(&result)) return maybe_result;
4480  CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
4481  } else { // It's not an ASCII string.
4482  MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4483  if (!maybe_result->ToObject(&result)) return maybe_result;
4484  CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4485  }
4486  return result;
4487 }
4488 
4489 
4491  // If the string is in new space it cannot be used as a symbol.
4492  if (InNewSpace(string)) return NULL;
4493 
4494  // Find the corresponding symbol map for strings.
4495  switch (string->map()->instance_type()) {
4496  case STRING_TYPE: return symbol_map();
4497  case ASCII_STRING_TYPE: return ascii_symbol_map();
4498  case CONS_STRING_TYPE: return cons_symbol_map();
4499  case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
4500  case EXTERNAL_STRING_TYPE: return external_symbol_map();
4501  case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
4503  return external_symbol_with_ascii_data_map();
4504  case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
4506  return short_external_ascii_symbol_map();
4508  return short_external_symbol_with_ascii_data_map();
4509  default: return NULL; // No match found.
4510  }
4511 }
4512 
4513 
4515  int chars,
4516  uint32_t hash_field) {
4517  ASSERT(chars >= 0);
4518  // Ensure the chars matches the number of characters in the buffer.
4519  ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
4520  // Determine whether the string is ASCII.
4521  bool is_ascii = true;
4522  while (buffer->has_more()) {
4523  if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
4524  is_ascii = false;
4525  break;
4526  }
4527  }
4528  buffer->Rewind();
4529 
4530  // Compute map and object size.
4531  int size;
4532  Map* map;
4533 
4534  if (is_ascii) {
4535  if (chars > SeqAsciiString::kMaxLength) {
4537  }
4538  map = ascii_symbol_map();
4539  size = SeqAsciiString::SizeFor(chars);
4540  } else {
4541  if (chars > SeqTwoByteString::kMaxLength) {
4543  }
4544  map = symbol_map();
4545  size = SeqTwoByteString::SizeFor(chars);
4546  }
4547 
4548  // Allocate string.
4549  Object* result;
4550  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4551  ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4552  : old_data_space_->AllocateRaw(size);
4553  if (!maybe_result->ToObject(&result)) return maybe_result;
4554  }
4555 
4556  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4557  // Set length and hash fields of the allocated string.
4558  String* answer = String::cast(result);
4559  answer->set_length(chars);
4560  answer->set_hash_field(hash_field);
4561 
4562  ASSERT_EQ(size, answer->Size());
4563 
4564  // Fill in the characters.
4565  int i = 0;
4566  while (i < chars) {
4567  uint32_t character = buffer->GetNext();
4568  if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4569  answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
4570  answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
4571  } else {
4572  answer->Set(i++, character);
4573  }
4574  }
4575  return answer;
4576 }
4577 
4578 
4579 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
4580  if (length < 0 || length > SeqAsciiString::kMaxLength) {
4582  }
4583 
4584  int size = SeqAsciiString::SizeFor(length);
4586 
4587  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4588  AllocationSpace retry_space = OLD_DATA_SPACE;
4589 
4590  if (space == NEW_SPACE) {
4591  if (size > kMaxObjectSizeInNewSpace) {
4592  // Allocate in large object space, retry space will be ignored.
4593  space = LO_SPACE;
4594  } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4595  // Allocate in new space, retry in large object space.
4596  retry_space = LO_SPACE;
4597  }
4598  } else if (space == OLD_DATA_SPACE &&
4600  space = LO_SPACE;
4601  }
4602  Object* result;
4603  { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4604  if (!maybe_result->ToObject(&result)) return maybe_result;
4605  }
4606 
4607  // Partially initialize the object.
4608  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4609  String::cast(result)->set_length(length);
4611  ASSERT_EQ(size, HeapObject::cast(result)->Size());
4612 
4613 #ifdef VERIFY_HEAP
4614  if (FLAG_verify_heap) {
4615  // Initialize string's content to ensure ASCII-ness (character range 0-127)
4616  // as required when verifying the heap.
4617  char* dest = SeqAsciiString::cast(result)->GetChars();
4618  memset(dest, 0x0F, length * kCharSize);
4619  }
4620 #endif
4621 
4622  return result;
4623 }
4624 
4625 
4626 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4627  PretenureFlag pretenure) {
4628  if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4630  }
4631  int size = SeqTwoByteString::SizeFor(length);
4633  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4634  AllocationSpace retry_space = OLD_DATA_SPACE;
4635 
4636  if (space == NEW_SPACE) {
4637  if (size > kMaxObjectSizeInNewSpace) {
4638  // Allocate in large object space, retry space will be ignored.
4639  space = LO_SPACE;
4640  } else if (size > Page::kMaxNonCodeHeapObjectSize) {
4641  // Allocate in new space, retry in large object space.
4642  retry_space = LO_SPACE;
4643  }
4644  } else if (space == OLD_DATA_SPACE &&
4646  space = LO_SPACE;
4647  }
4648  Object* result;
4649  { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4650  if (!maybe_result->ToObject(&result)) return maybe_result;
4651  }
4652 
4653  // Partially initialize the object.
4654  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4655  String::cast(result)->set_length(length);
4657  ASSERT_EQ(size, HeapObject::cast(result)->Size());
4658  return result;
4659 }
4660 
4661 
4662 MaybeObject* Heap::AllocateJSArray(
4663  ElementsKind elements_kind,
4664  PretenureFlag pretenure) {
4665  Context* native_context = isolate()->context()->native_context();
4666  JSFunction* array_function = native_context->array_function();
4667  Map* map = array_function->initial_map();
4668  Object* maybe_map_array = native_context->js_array_maps();
4669  if (!maybe_map_array->IsUndefined()) {
4670  Object* maybe_transitioned_map =
4671  FixedArray::cast(maybe_map_array)->get(elements_kind);
4672  if (!maybe_transitioned_map->IsUndefined()) {
4673  map = Map::cast(maybe_transitioned_map);
4674  }
4675  }
4676 
4677  return AllocateJSObjectFromMap(map, pretenure);
4678 }
4679 
4680 
4681 MaybeObject* Heap::AllocateEmptyFixedArray() {
4682  int size = FixedArray::SizeFor(0);
4683  Object* result;
4684  { MaybeObject* maybe_result =
4686  if (!maybe_result->ToObject(&result)) return maybe_result;
4687  }
4688  // Initialize the object.
4689  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
4690  fixed_array_map());
4691  reinterpret_cast<FixedArray*>(result)->set_length(0);
4692  return result;
4693 }
4694 
4695 
4696 MaybeObject* Heap::AllocateRawFixedArray(int length) {
4697  if (length < 0 || length > FixedArray::kMaxLength) {
4699  }
4700  ASSERT(length > 0);
4701  // Use the general function if we're forced to always allocate.
4702  if (always_allocate()) return AllocateFixedArray(length, TENURED);
4703  // Allocate the raw data for a fixed array.
4704  int size = FixedArray::SizeFor(length);
4705  return size <= kMaxObjectSizeInNewSpace
4706  ? new_space_.AllocateRaw(size)
4707  : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4708 }
4709 
4710 
4711 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4712  int len = src->length();
4713  Object* obj;
4714  { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
4715  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4716  }
4717  if (InNewSpace(obj)) {
4718  HeapObject* dst = HeapObject::cast(obj);
4719  dst->set_map_no_write_barrier(map);
4720  CopyBlock(dst->address() + kPointerSize,
4721  src->address() + kPointerSize,
4723  return obj;
4724  }
4726  FixedArray* result = FixedArray::cast(obj);
4727  result->set_length(len);
4728 
4729  // Copy the content
4730  AssertNoAllocation no_gc;
4731  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4732  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4733  return result;
4734 }
4735 
4736 
4738  Map* map) {
4739  int len = src->length();
4740  Object* obj;
4741  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4742  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4743  }
4744  HeapObject* dst = HeapObject::cast(obj);
4745  dst->set_map_no_write_barrier(map);
4746  CopyBlock(
4750  return obj;
4751 }
4752 
4753 
4754 MaybeObject* Heap::AllocateFixedArray(int length) {
4755  ASSERT(length >= 0);
4756  if (length == 0) return empty_fixed_array();
4757  Object* result;
4758  { MaybeObject* maybe_result = AllocateRawFixedArray(length);
4759  if (!maybe_result->ToObject(&result)) return maybe_result;
4760  }
4761  // Initialize header.
4762  FixedArray* array = reinterpret_cast<FixedArray*>(result);
4763  array->set_map_no_write_barrier(fixed_array_map());
4764  array->set_length(length);
4765  // Initialize body.
4766  ASSERT(!InNewSpace(undefined_value()));
4767  MemsetPointer(array->data_start(), undefined_value(), length);
4768  return result;
4769 }
4770 
4771 
4772 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
4773  if (length < 0 || length > FixedArray::kMaxLength) {
4775  }
4776 
4777  AllocationSpace space =
4778  (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4779  int size = FixedArray::SizeFor(length);
4780  if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4781  // Too big for new space.
4782  space = LO_SPACE;
4783  } else if (space == OLD_POINTER_SPACE &&
4785  // Too big for old pointer space.
4786  space = LO_SPACE;
4787  }
4788 
4789  AllocationSpace retry_space =
4791 
4792  return AllocateRaw(size, space, retry_space);
4793 }
4794 
4795 
4796 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
4797  Heap* heap,
4798  int length,
4799  PretenureFlag pretenure,
4800  Object* filler) {
4801  ASSERT(length >= 0);
4802  ASSERT(heap->empty_fixed_array()->IsFixedArray());
4803  if (length == 0) return heap->empty_fixed_array();
4804 
4805  ASSERT(!heap->InNewSpace(filler));
4806  Object* result;
4807  { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
4808  if (!maybe_result->ToObject(&result)) return maybe_result;
4809  }
4810 
4811  HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4812  FixedArray* array = FixedArray::cast(result);
4813  array->set_length(length);
4814  MemsetPointer(array->data_start(), filler, length);
4815  return array;
4816 }
4817 
4818 
4819 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4820  return AllocateFixedArrayWithFiller(this,
4821  length,
4822  pretenure,
4823  undefined_value());
4824 }
4825 
4826 
4827 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4828  PretenureFlag pretenure) {
4829  return AllocateFixedArrayWithFiller(this,
4830  length,
4831  pretenure,
4832  the_hole_value());
4833 }
4834 
4835 
4836 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
4837  if (length == 0) return empty_fixed_array();
4838 
4839  Object* obj;
4840  { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4841  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4842  }
4843 
4844  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
4845  fixed_array_map());
4846  FixedArray::cast(obj)->set_length(length);
4847  return obj;
4848 }
4849 
4850 
4851 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4852  int size = FixedDoubleArray::SizeFor(0);
4853  Object* result;
4854  { MaybeObject* maybe_result =
4856  if (!maybe_result->ToObject(&result)) return maybe_result;
4857  }
4858  // Initialize the object.
4859  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
4860  fixed_double_array_map());
4861  reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4862  return result;
4863 }
4864 
4865 
4867  int length,
4868  PretenureFlag pretenure) {
4869  if (length == 0) return empty_fixed_array();
4870 
4871  Object* elements_object;
4872  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4873  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4874  FixedDoubleArray* elements =
4875  reinterpret_cast<FixedDoubleArray*>(elements_object);
4876 
4877  elements->set_map_no_write_barrier(fixed_double_array_map());
4878  elements->set_length(length);
4879  return elements;
4880 }
4881 
4882 
4884  int length,
4885  PretenureFlag pretenure) {
4886  if (length == 0) return empty_fixed_array();
4887 
4888  Object* elements_object;
4889  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4890  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
4891  FixedDoubleArray* elements =
4892  reinterpret_cast<FixedDoubleArray*>(elements_object);
4893 
4894  for (int i = 0; i < length; ++i) {
4895  elements->set_the_hole(i);
4896  }
4897 
4898  elements->set_map_no_write_barrier(fixed_double_array_map());
4899  elements->set_length(length);
4900  return elements;
4901 }
4902 
4903 
4904 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4905  PretenureFlag pretenure) {
4906  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4908  }
4909 
4910  AllocationSpace space =
4911  (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4912  int size = FixedDoubleArray::SizeFor(length);
4913 
4914 #ifndef V8_HOST_ARCH_64_BIT
4915  size += kPointerSize;
4916 #endif
4917 
4918  if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4919  // Too big for new space.
4920  space = LO_SPACE;
4921  } else if (space == OLD_DATA_SPACE &&
4923  // Too big for old data space.
4924  space = LO_SPACE;
4925  }
4926 
4927  AllocationSpace retry_space =
4929 
4930  HeapObject* object;
4931  { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
4932  if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
4933  }
4934 
4935  return EnsureDoubleAligned(this, object, size);
4936 }
4937 
4938 
4939 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4940  Object* result;
4941  { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
4942  if (!maybe_result->ToObject(&result)) return maybe_result;
4943  }
4944  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
4945  hash_table_map());
4946  ASSERT(result->IsHashTable());
4947  return result;
4948 }
4949 
4950 
4952  Object* result;
4953  { MaybeObject* maybe_result =
4955  if (!maybe_result->ToObject(&result)) return maybe_result;
4956  }
4957  Context* context = reinterpret_cast<Context*>(result);
4958  context->set_map_no_write_barrier(native_context_map());
4959  context->set_js_array_maps(undefined_value());
4960  ASSERT(context->IsNativeContext());
4961  ASSERT(result->IsContext());
4962  return result;
4963 }
4964 
4965 
4967  ScopeInfo* scope_info) {
4968  Object* result;
4969  { MaybeObject* maybe_result =
4970  AllocateFixedArray(scope_info->ContextLength(), TENURED);
4971  if (!maybe_result->ToObject(&result)) return maybe_result;
4972  }
4973  Context* context = reinterpret_cast<Context*>(result);
4974  context->set_map_no_write_barrier(global_context_map());
4975  context->set_closure(function);
4976  context->set_previous(function->context());
4977  context->set_extension(scope_info);
4978  context->set_global_object(function->context()->global_object());
4979  ASSERT(context->IsGlobalContext());
4980  ASSERT(result->IsContext());
4981  return context;
4982 }
4983 
4984 
4985 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
4986  Object* result;
4987  { MaybeObject* maybe_result =
4988  AllocateFixedArray(scope_info->ContextLength(), TENURED);
4989  if (!maybe_result->ToObject(&result)) return maybe_result;
4990  }
4991  Context* context = reinterpret_cast<Context*>(result);
4992  context->set_map_no_write_barrier(module_context_map());
4993  // Context links will be set later.
4994  context->set_extension(Smi::FromInt(0));
4995  return context;
4996 }
4997 
4998 
4999 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5001  Object* result;
5002  { MaybeObject* maybe_result = AllocateFixedArray(length);
5003  if (!maybe_result->ToObject(&result)) return maybe_result;
5004  }
5005  Context* context = reinterpret_cast<Context*>(result);
5006  context->set_map_no_write_barrier(function_context_map());
5007  context->set_closure(function);
5008  context->set_previous(function->context());
5009  context->set_extension(Smi::FromInt(0));
5010  context->set_global_object(function->context()->global_object());
5011  return context;
5012 }
5013 
5014 
5016  Context* previous,
5017  String* name,
5018  Object* thrown_object) {
5020  Object* result;
5021  { MaybeObject* maybe_result =
5023  if (!maybe_result->ToObject(&result)) return maybe_result;
5024  }
5025  Context* context = reinterpret_cast<Context*>(result);
5026  context->set_map_no_write_barrier(catch_context_map());
5027  context->set_closure(function);
5028  context->set_previous(previous);
5029  context->set_extension(name);
5030  context->set_global_object(previous->global_object());
5031  context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5032  return context;
5033 }
5034 
5035 
5036 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5037  Context* previous,
5038  JSObject* extension) {
5039  Object* result;
5040  { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5041  if (!maybe_result->ToObject(&result)) return maybe_result;
5042  }
5043  Context* context = reinterpret_cast<Context*>(result);
5044  context->set_map_no_write_barrier(with_context_map());
5045  context->set_closure(function);
5046  context->set_previous(previous);
5047  context->set_extension(extension);
5048  context->set_global_object(previous->global_object());
5049  return context;
5050 }
5051 
5052 
5054  Context* previous,
5055  ScopeInfo* scope_info) {
5056  Object* result;
5057  { MaybeObject* maybe_result =
5059  if (!maybe_result->ToObject(&result)) return maybe_result;
5060  }
5061  Context* context = reinterpret_cast<Context*>(result);
5062  context->set_map_no_write_barrier(block_context_map());
5063  context->set_closure(function);
5064  context->set_previous(previous);
5065  context->set_extension(scope_info);
5066  context->set_global_object(previous->global_object());
5067  return context;
5068 }
5069 
5070 
5071 MaybeObject* Heap::AllocateScopeInfo(int length) {
5072  FixedArray* scope_info;
5073  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5074  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5075  scope_info->set_map_no_write_barrier(scope_info_map());
5076  return scope_info;
5077 }
5078 
5079 
5081  Map* map;
5082  switch (type) {
5083 #define MAKE_CASE(NAME, Name, name) \
5084  case NAME##_TYPE: map = name##_map(); break;
5086 #undef MAKE_CASE
5087  default:
5088  UNREACHABLE();
5089  return Failure::InternalError();
5090  }
5091  int size = map->instance_size();
5092  AllocationSpace space =
5094  Object* result;
5095  { MaybeObject* maybe_result = Allocate(map, space);
5096  if (!maybe_result->ToObject(&result)) return maybe_result;
5097  }
5098  Struct::cast(result)->InitializeBody(size);
5099  return result;
5100 }
5101 
5102 
5104  return (!old_pointer_space()->was_swept_conservatively() &&
5105  !old_data_space()->was_swept_conservatively());
5106 }
5107 
5108 
5110  ASSERT(IsAllocationAllowed());
5111  if (!IsHeapIterable()) {
5112  CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5113  }
5115 }
5116 
5117 
5118 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5119  incremental_marking()->Step(step_size,
5121 
5122  if (incremental_marking()->IsComplete()) {
5123  bool uncommit = false;
5124  if (gc_count_at_last_idle_gc_ == gc_count_) {
5125  // No GC since the last full GC, the mutator is probably not active.
5126  isolate_->compilation_cache()->Clear();
5127  uncommit = true;
5128  }
5129  CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5130  gc_count_at_last_idle_gc_ = gc_count_;
5131  if (uncommit) {
5132  new_space_.Shrink();
5134  }
5135  }
5136 }
5137 
5138 
5139 bool Heap::IdleNotification(int hint) {
5140  // Hints greater than this value indicate that
5141  // the embedder is requesting a lot of GC work.
5142  const int kMaxHint = 1000;
5143  // Minimal hint that allows to do full GC.
5144  const int kMinHintForFullGC = 100;
5145  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5146  // The size factor is in range [5..250]. The numbers here are chosen from
5147  // experiments. If you changes them, make sure to test with
5148  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5149  intptr_t step_size =
5151 
5152  if (contexts_disposed_ > 0) {
5153  if (hint >= kMaxHint) {
5154  // The embedder is requesting a lot of GC work after context disposal,
5155  // we age inline caches so that they don't keep objects from
5156  // the old context alive.
5157  AgeInlineCaches();
5158  }
5159  int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5160  if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5161  incremental_marking()->IsStopped()) {
5162  HistogramTimerScope scope(isolate_->counters()->gc_context());
5164  "idle notification: contexts disposed");
5165  } else {
5166  AdvanceIdleIncrementalMarking(step_size);
5167  contexts_disposed_ = 0;
5168  }
5169  // Make sure that we have no pending context disposals.
5170  // Take into account that we might have decided to delay full collection
5171  // because incremental marking is in progress.
5172  ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
5173  // After context disposal there is likely a lot of garbage remaining, reset
5174  // the idle notification counters in order to trigger more incremental GCs
5175  // on subsequent idle notifications.
5176  StartIdleRound();
5177  return false;
5178  }
5179 
5180  if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5181  return IdleGlobalGC();
5182  }
5183 
5184  // By doing small chunks of GC work in each IdleNotification,
5185  // perform a round of incremental GCs and after that wait until
5186  // the mutator creates enough garbage to justify a new round.
5187  // An incremental GC progresses as follows:
5188  // 1. many incremental marking steps,
5189  // 2. one old space mark-sweep-compact,
5190  // 3. many lazy sweep steps.
5191  // Use mark-sweep-compact events to count incremental GCs in a round.
5192 
5193 
5194  if (incremental_marking()->IsStopped()) {
5195  if (!IsSweepingComplete() &&
5196  !AdvanceSweepers(static_cast<int>(step_size))) {
5197  return false;
5198  }
5199  }
5200 
5201  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5202  if (EnoughGarbageSinceLastIdleRound()) {
5203  StartIdleRound();
5204  } else {
5205  return true;
5206  }
5207  }
5208 
5209  int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
5210  mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
5211  ms_count_at_last_idle_notification_ = ms_count_;
5212 
5213  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5214  mark_sweeps_since_idle_round_started_;
5215 
5216  if (remaining_mark_sweeps <= 0) {
5217  FinishIdleRound();
5218  return true;
5219  }
5220 
5221  if (incremental_marking()->IsStopped()) {
5222  // If there are no more than two GCs left in this idle round and we are
5223  // allowed to do a full GC, then make those GCs full in order to compact
5224  // the code space.
5225  // TODO(ulan): Once we enable code compaction for incremental marking,
5226  // we can get rid of this special case and always start incremental marking.
5227  if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5229  "idle notification: finalize idle round");
5230  } else {
5232  }
5233  }
5234  if (!incremental_marking()->IsStopped()) {
5235  AdvanceIdleIncrementalMarking(step_size);
5236  }
5237  return false;
5238 }
5239 
5240 
5241 bool Heap::IdleGlobalGC() {
5242  static const int kIdlesBeforeScavenge = 4;
5243  static const int kIdlesBeforeMarkSweep = 7;
5244  static const int kIdlesBeforeMarkCompact = 8;
5245  static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5246  static const unsigned int kGCsBetweenCleanup = 4;
5247 
5248  if (!last_idle_notification_gc_count_init_) {
5249  last_idle_notification_gc_count_ = gc_count_;
5250  last_idle_notification_gc_count_init_ = true;
5251  }
5252 
5253  bool uncommit = true;
5254  bool finished = false;
5255 
5256  // Reset the number of idle notifications received when a number of
5257  // GCs have taken place. This allows another round of cleanup based
5258  // on idle notifications if enough work has been carried out to
5259  // provoke a number of garbage collections.
5260  if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5261  number_idle_notifications_ =
5262  Min(number_idle_notifications_ + 1, kMaxIdleCount);
5263  } else {
5264  number_idle_notifications_ = 0;
5265  last_idle_notification_gc_count_ = gc_count_;
5266  }
5267 
5268  if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5269  CollectGarbage(NEW_SPACE, "idle notification");
5270  new_space_.Shrink();
5271  last_idle_notification_gc_count_ = gc_count_;
5272  } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5273  // Before doing the mark-sweep collections we clear the
5274  // compilation cache to avoid hanging on to source code and
5275  // generated code for cached functions.
5276  isolate_->compilation_cache()->Clear();
5277 
5278  CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5279  new_space_.Shrink();
5280  last_idle_notification_gc_count_ = gc_count_;
5281 
5282  } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5283  CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5284  new_space_.Shrink();
5285  last_idle_notification_gc_count_ = gc_count_;
5286  number_idle_notifications_ = 0;
5287  finished = true;
5288  } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5289  // If we have received more than kIdlesBeforeMarkCompact idle
5290  // notifications we do not perform any cleanup because we don't
5291  // expect to gain much by doing so.
5292  finished = true;
5293  }
5294 
5295  if (uncommit) UncommitFromSpace();
5296 
5297  return finished;
5298 }
5299 
5300 
5301 #ifdef DEBUG
5302 
5303 void Heap::Print() {
5304  if (!HasBeenSetUp()) return;
5305  isolate()->PrintStack();
5306  AllSpaces spaces;
5307  for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5308  space->Print();
5309 }
5310 
5311 
5312 void Heap::ReportCodeStatistics(const char* title) {
5313  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5314  PagedSpace::ResetCodeStatistics();
5315  // We do not look for code in new space, map space, or old space. If code
5316  // somehow ends up in those spaces, we would miss it here.
5317  code_space_->CollectCodeStatistics();
5318  lo_space_->CollectCodeStatistics();
5319  PagedSpace::ReportCodeStatistics();
5320 }
5321 
5322 
5323 // This function expects that NewSpace's allocated objects histogram is
5324 // populated (via a call to CollectStatistics or else as a side effect of a
5325 // just-completed scavenge collection).
5326 void Heap::ReportHeapStatistics(const char* title) {
5327  USE(title);
5328  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5329  title, gc_count_);
5330  PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
5331  old_gen_promotion_limit_);
5332  PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5333  old_gen_allocation_limit_);
5334  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
5335 
5336  PrintF("\n");
5337  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
5338  isolate_->global_handles()->PrintStats();
5339  PrintF("\n");
5340 
5341  PrintF("Heap statistics : ");
5342  isolate_->memory_allocator()->ReportStatistics();
5343  PrintF("To space : ");
5344  new_space_.ReportStatistics();
5345  PrintF("Old pointer space : ");
5346  old_pointer_space_->ReportStatistics();
5347  PrintF("Old data space : ");
5348  old_data_space_->ReportStatistics();
5349  PrintF("Code space : ");
5350  code_space_->ReportStatistics();
5351  PrintF("Map space : ");
5352  map_space_->ReportStatistics();
5353  PrintF("Cell space : ");
5354  cell_space_->ReportStatistics();
5355  PrintF("Large object space : ");
5356  lo_space_->ReportStatistics();
5357  PrintF(">>>>>> ========================================= >>>>>>\n");
5358 }
5359 
5360 #endif // DEBUG
5361 
5363  return Contains(value->address());
5364 }
5365 
5366 
5368  if (OS::IsOutsideAllocatedSpace(addr)) return false;
5369  return HasBeenSetUp() &&
5370  (new_space_.ToSpaceContains(addr) ||
5371  old_pointer_space_->Contains(addr) ||
5372  old_data_space_->Contains(addr) ||
5373  code_space_->Contains(addr) ||
5374  map_space_->Contains(addr) ||
5375  cell_space_->Contains(addr) ||
5376  lo_space_->SlowContains(addr));
5377 }
5378 
5379 
5381  return InSpace(value->address(), space);
5382 }
5383 
5384 
5386  if (OS::IsOutsideAllocatedSpace(addr)) return false;
5387  if (!HasBeenSetUp()) return false;
5388 
5389  switch (space) {
5390  case NEW_SPACE:
5391  return new_space_.ToSpaceContains(addr);
5392  case OLD_POINTER_SPACE:
5393  return old_pointer_space_->Contains(addr);
5394  case OLD_DATA_SPACE:
5395  return old_data_space_->Contains(addr);
5396  case CODE_SPACE:
5397  return code_space_->Contains(addr);
5398  case MAP_SPACE:
5399  return map_space_->Contains(addr);
5400  case CELL_SPACE:
5401  return cell_space_->Contains(addr);
5402  case LO_SPACE:
5403  return lo_space_->SlowContains(addr);
5404  }
5405 
5406  return false;
5407 }
5408 
5409 
5410 #ifdef VERIFY_HEAP
5411 void Heap::Verify() {
5412  CHECK(HasBeenSetUp());
5413 
5414  store_buffer()->Verify();
5415 
5416  VerifyPointersVisitor visitor;
5417  IterateRoots(&visitor, VISIT_ONLY_STRONG);
5418 
5419  new_space_.Verify();
5420 
5421  old_pointer_space_->Verify(&visitor);
5422  map_space_->Verify(&visitor);
5423 
5424  VerifyPointersVisitor no_dirty_regions_visitor;
5425  old_data_space_->Verify(&no_dirty_regions_visitor);
5426  code_space_->Verify(&no_dirty_regions_visitor);
5427  cell_space_->Verify(&no_dirty_regions_visitor);
5428 
5429  lo_space_->Verify();
5430 }
5431 #endif
5432 
5433 
5435  Object* symbol = NULL;
5436  Object* new_table;
5437  { MaybeObject* maybe_new_table =
5438  symbol_table()->LookupSymbol(string, &symbol);
5439  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5440  }
5441  // Can't use set_symbol_table because SymbolTable::cast knows that
5442  // SymbolTable is a singleton and checks for identity.
5443  roots_[kSymbolTableRootIndex] = new_table;
5444  ASSERT(symbol != NULL);
5445  return symbol;
5446 }
5447 
5448 
5450  Object* symbol = NULL;
5451  Object* new_table;
5452  { MaybeObject* maybe_new_table =
5453  symbol_table()->LookupAsciiSymbol(string, &symbol);
5454  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5455  }
5456  // Can't use set_symbol_table because SymbolTable::cast knows that
5457  // SymbolTable is a singleton and checks for identity.
5458  roots_[kSymbolTableRootIndex] = new_table;
5459  ASSERT(symbol != NULL);
5460  return symbol;
5461 }
5462 
5463 
5465  int from,
5466  int length) {
5467  Object* symbol = NULL;
5468  Object* new_table;
5469  { MaybeObject* maybe_new_table =
5470  symbol_table()->LookupSubStringAsciiSymbol(string,
5471  from,
5472  length,
5473  &symbol);
5474  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5475  }
5476  // Can't use set_symbol_table because SymbolTable::cast knows that
5477  // SymbolTable is a singleton and checks for identity.
5478  roots_[kSymbolTableRootIndex] = new_table;
5479  ASSERT(symbol != NULL);
5480  return symbol;
5481 }
5482 
5483 
5485  Object* symbol = NULL;
5486  Object* new_table;
5487  { MaybeObject* maybe_new_table =
5488  symbol_table()->LookupTwoByteSymbol(string, &symbol);
5489  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5490  }
5491  // Can't use set_symbol_table because SymbolTable::cast knows that
5492  // SymbolTable is a singleton and checks for identity.
5493  roots_[kSymbolTableRootIndex] = new_table;
5494  ASSERT(symbol != NULL);
5495  return symbol;
5496 }
5497 
5498 
5499 MaybeObject* Heap::LookupSymbol(String* string) {
5500  if (string->IsSymbol()) return string;
5501  Object* symbol = NULL;
5502  Object* new_table;
5503  { MaybeObject* maybe_new_table =
5504  symbol_table()->LookupString(string, &symbol);
5505  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5506  }
5507  // Can't use set_symbol_table because SymbolTable::cast knows that
5508  // SymbolTable is a singleton and checks for identity.
5509  roots_[kSymbolTableRootIndex] = new_table;
5510  ASSERT(symbol != NULL);
5511  return symbol;
5512 }
5513 
5514 
5515 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
5516  if (string->IsSymbol()) {
5517  *symbol = string;
5518  return true;
5519  }
5520  return symbol_table()->LookupSymbolIfExists(string, symbol);
5521 }
5522 
5524  NewSpacePageIterator it(new_space_.FromSpaceStart(),
5525  new_space_.FromSpaceEnd());
5526  while (it.has_next()) {
5527  NewSpacePage* page = it.next();
5528  for (Address cursor = page->area_start(), limit = page->area_end();
5529  cursor < limit;
5530  cursor += kPointerSize) {
5532  }
5533  }
5534 }
5535 
5536 
5538  Address end,
5539  ObjectSlotCallback callback) {
5540  Address slot_address = start;
5541 
5542  // We are not collecting slots on new space objects during mutation
5543  // thus we have to scan for pointers to evacuation candidates when we
5544  // promote objects. But we should not record any slots in non-black
5545  // objects. Grey object's slots would be rescanned.
5546  // White object might not survive until the end of collection
5547  // it would be a violation of the invariant to record it's slots.
5548  bool record_slots = false;
5549  if (incremental_marking()->IsCompacting()) {
5551  record_slots = Marking::IsBlack(mark_bit);
5552  }
5553 
5554  while (slot_address < end) {
5555  Object** slot = reinterpret_cast<Object**>(slot_address);
5556  Object* object = *slot;
5557  // If the store buffer becomes overfull we mark pages as being exempt from
5558  // the store buffer. These pages are scanned to find pointers that point
5559  // to the new space. In that case we may hit newly promoted objects and
5560  // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5561  if (object->IsHeapObject()) {
5562  if (Heap::InFromSpace(object)) {
5563  callback(reinterpret_cast<HeapObject**>(slot),
5564  HeapObject::cast(object));
5565  Object* new_object = *slot;
5566  if (InNewSpace(new_object)) {
5567  SLOW_ASSERT(Heap::InToSpace(new_object));
5568  SLOW_ASSERT(new_object->IsHeapObject());
5569  store_buffer_.EnterDirectlyIntoStoreBuffer(
5570  reinterpret_cast<Address>(slot));
5571  }
5572  SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5573  } else if (record_slots &&
5574  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5575  mark_compact_collector()->RecordSlot(slot, slot, object);
5576  }
5577  }
5578  slot_address += kPointerSize;
5579  }
5580 }
5581 
5582 
5583 #ifdef DEBUG
5584 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5585 
5586 
5587 bool IsAMapPointerAddress(Object** addr) {
5588  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5589  int mod = a % Map::kSize;
5590  return mod >= Map::kPointerFieldsBeginOffset &&
5592 }
5593 
5594 
5595 bool EverythingsAPointer(Object** addr) {
5596  return true;
5597 }
5598 
5599 
5600 static void CheckStoreBuffer(Heap* heap,
5601  Object** current,
5602  Object** limit,
5603  Object**** store_buffer_position,
5604  Object*** store_buffer_top,
5605  CheckStoreBufferFilter filter,
5606  Address special_garbage_start,
5607  Address special_garbage_end) {
5608  Map* free_space_map = heap->free_space_map();
5609  for ( ; current < limit; current++) {
5610  Object* o = *current;
5611  Address current_address = reinterpret_cast<Address>(current);
5612  // Skip free space.
5613  if (o == free_space_map) {
5614  Address current_address = reinterpret_cast<Address>(current);
5615  FreeSpace* free_space =
5616  FreeSpace::cast(HeapObject::FromAddress(current_address));
5617  int skip = free_space->Size();
5618  ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5619  ASSERT(skip > 0);
5620  current_address += skip - kPointerSize;
5621  current = reinterpret_cast<Object**>(current_address);
5622  continue;
5623  }
5624  // Skip the current linear allocation space between top and limit which is
5625  // unmarked with the free space map, but can contain junk.
5626  if (current_address == special_garbage_start &&
5627  special_garbage_end != special_garbage_start) {
5628  current_address = special_garbage_end - kPointerSize;
5629  current = reinterpret_cast<Object**>(current_address);
5630  continue;
5631  }
5632  if (!(*filter)(current)) continue;
5633  ASSERT(current_address < special_garbage_start ||
5634  current_address >= special_garbage_end);
5635  ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5636  // We have to check that the pointer does not point into new space
5637  // without trying to cast it to a heap object since the hash field of
5638  // a string can contain values like 1 and 3 which are tagged null
5639  // pointers.
5640  if (!heap->InNewSpace(o)) continue;
5641  while (**store_buffer_position < current &&
5642  *store_buffer_position < store_buffer_top) {
5643  (*store_buffer_position)++;
5644  }
5645  if (**store_buffer_position != current ||
5646  *store_buffer_position == store_buffer_top) {
5647  Object** obj_start = current;
5648  while (!(*obj_start)->IsMap()) obj_start--;
5649  UNREACHABLE();
5650  }
5651  }
5652 }
5653 
5654 
5655 // Check that the store buffer contains all intergenerational pointers by
5656 // scanning a page and ensuring that all pointers to young space are in the
5657 // store buffer.
5658 void Heap::OldPointerSpaceCheckStoreBuffer() {
5659  OldSpace* space = old_pointer_space();
5660  PageIterator pages(space);
5661 
5662  store_buffer()->SortUniq();
5663 
5664  while (pages.has_next()) {
5665  Page* page = pages.next();
5666  Object** current = reinterpret_cast<Object**>(page->area_start());
5667 
5668  Address end = page->area_end();
5669 
5670  Object*** store_buffer_position = store_buffer()->Start();
5671  Object*** store_buffer_top = store_buffer()->Top();
5672 
5673  Object** limit = reinterpret_cast<Object**>(end);
5674  CheckStoreBuffer(this,
5675  current,
5676  limit,
5677  &store_buffer_position,
5678  store_buffer_top,
5679  &EverythingsAPointer,
5680  space->top(),
5681  space->limit());
5682  }
5683 }
5684 
5685 
5686 void Heap::MapSpaceCheckStoreBuffer() {
5687  MapSpace* space = map_space();
5688  PageIterator pages(space);
5689 
5690  store_buffer()->SortUniq();
5691 
5692  while (pages.has_next()) {
5693  Page* page = pages.next();
5694  Object** current = reinterpret_cast<Object**>(page->area_start());
5695 
5696  Address end = page->area_end();
5697 
5698  Object*** store_buffer_position = store_buffer()->Start();
5699  Object*** store_buffer_top = store_buffer()->Top();
5700 
5701  Object** limit = reinterpret_cast<Object**>(end);
5702  CheckStoreBuffer(this,
5703  current,
5704  limit,
5705  &store_buffer_position,
5706  store_buffer_top,
5707  &IsAMapPointerAddress,
5708  space->top(),
5709  space->limit());
5710  }
5711 }
5712 
5713 
5714 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5715  LargeObjectIterator it(lo_space());
5716  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5717  // We only have code, sequential strings, or fixed arrays in large
5718  // object space, and only fixed arrays can possibly contain pointers to
5719  // the young generation.
5720  if (object->IsFixedArray()) {
5721  Object*** store_buffer_position = store_buffer()->Start();
5722  Object*** store_buffer_top = store_buffer()->Top();
5723  Object** current = reinterpret_cast<Object**>(object->address());
5724  Object** limit =
5725  reinterpret_cast<Object**>(object->address() + object->Size());
5726  CheckStoreBuffer(this,
5727  current,
5728  limit,
5729  &store_buffer_position,
5730  store_buffer_top,
5731  &EverythingsAPointer,
5732  NULL,
5733  NULL);
5734  }
5735  }
5736 }
5737 #endif
5738 
5739 
5740 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5741  IterateStrongRoots(v, mode);
5742  IterateWeakRoots(v, mode);
5743 }
5744 
5745 
5746 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5747  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5748  v->Synchronize(VisitorSynchronization::kSymbolTable);
5749  if (mode != VISIT_ALL_IN_SCAVENGE &&
5750  mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5751  // Scavenge collections have special processing for this.
5752  external_string_table_.Iterate(v);
5753  }
5754  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5755 }
5756 
5757 
5758 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5759  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5760  v->Synchronize(VisitorSynchronization::kStrongRootList);
5761 
5762  v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5763  v->Synchronize(VisitorSynchronization::kSymbol);
5764 
5765  isolate_->bootstrapper()->Iterate(v);
5766  v->Synchronize(VisitorSynchronization::kBootstrapper);
5767  isolate_->Iterate(v);
5768  v->Synchronize(VisitorSynchronization::kTop);
5769  Relocatable::Iterate(v);
5770  v->Synchronize(VisitorSynchronization::kRelocatable);
5771 
5772 #ifdef ENABLE_DEBUGGER_SUPPORT
5773  isolate_->debug()->Iterate(v);
5774  if (isolate_->deoptimizer_data() != NULL) {
5775  isolate_->deoptimizer_data()->Iterate(v);
5776  }
5777 #endif
5778  v->Synchronize(VisitorSynchronization::kDebug);
5779  isolate_->compilation_cache()->Iterate(v);
5780  v->Synchronize(VisitorSynchronization::kCompilationCache);
5781 
5782  // Iterate over local handles in handle scopes.
5783  isolate_->handle_scope_implementer()->Iterate(v);
5784  isolate_->IterateDeferredHandles(v);
5785  v->Synchronize(VisitorSynchronization::kHandleScope);
5786 
5787  // Iterate over the builtin code objects and code stubs in the
5788  // heap. Note that it is not necessary to iterate over code objects
5789  // on scavenge collections.
5790  if (mode != VISIT_ALL_IN_SCAVENGE) {
5791  isolate_->builtins()->IterateBuiltins(v);
5792  }
5793  v->Synchronize(VisitorSynchronization::kBuiltins);
5794 
5795  // Iterate over global handles.
5796  switch (mode) {
5797  case VISIT_ONLY_STRONG:
5798  isolate_->global_handles()->IterateStrongRoots(v);
5799  break;
5800  case VISIT_ALL_IN_SCAVENGE:
5802  break;
5804  case VISIT_ALL:
5805  isolate_->global_handles()->IterateAllRoots(v);
5806  break;
5807  }
5808  v->Synchronize(VisitorSynchronization::kGlobalHandles);
5809 
5810  // Iterate over pointers being held by inactive threads.
5811  isolate_->thread_manager()->Iterate(v);
5812  v->Synchronize(VisitorSynchronization::kThreadManager);
5813 
5814  // Iterate over the pointers the Serialization/Deserialization code is
5815  // holding.
5816  // During garbage collection this keeps the partial snapshot cache alive.
5817  // During deserialization of the startup snapshot this creates the partial
5818  // snapshot cache and deserializes the objects it refers to. During
5819  // serialization this does nothing, since the partial snapshot cache is
5820  // empty. However the next thing we do is create the partial snapshot,
5821  // filling up the partial snapshot cache with objects it needs as we go.
5823  // We don't do a v->Synchronize call here, because in debug mode that will
5824  // output a flag to the snapshot. However at this point the serializer and
5825  // deserializer are deliberately a little unsynchronized (see above) so the
5826  // checking of the sync flag in the snapshot would fail.
5827 }
5828 
5829 
5830 // TODO(1236194): Since the heap size is configurable on the command line
5831 // and through the API, we should gracefully handle the case that the heap
5832 // size is not big enough to fit all the initial objects.
5833 bool Heap::ConfigureHeap(int max_semispace_size,
5834  intptr_t max_old_gen_size,
5835  intptr_t max_executable_size) {
5836  if (HasBeenSetUp()) return false;
5837 
5838  if (FLAG_stress_compaction) {
5839  // This will cause more frequent GCs when stressing.
5840  max_semispace_size_ = Page::kPageSize;
5841  }
5842 
5843  if (max_semispace_size > 0) {
5844  if (max_semispace_size < Page::kPageSize) {
5845  max_semispace_size = Page::kPageSize;
5846  if (FLAG_trace_gc) {
5847  PrintPID("Max semispace size cannot be less than %dkbytes\n",
5848  Page::kPageSize >> 10);
5849  }
5850  }
5851  max_semispace_size_ = max_semispace_size;
5852  }
5853 
5854  if (Snapshot::IsEnabled()) {
5855  // If we are using a snapshot we always reserve the default amount
5856  // of memory for each semispace because code in the snapshot has
5857  // write-barrier code that relies on the size and alignment of new
5858  // space. We therefore cannot use a larger max semispace size
5859  // than the default reserved semispace size.
5860  if (max_semispace_size_ > reserved_semispace_size_) {
5861  max_semispace_size_ = reserved_semispace_size_;
5862  if (FLAG_trace_gc) {
5863  PrintPID("Max semispace size cannot be more than %dkbytes\n",
5864  reserved_semispace_size_ >> 10);
5865  }
5866  }
5867  } else {
5868  // If we are not using snapshots we reserve space for the actual
5869  // max semispace size.
5870  reserved_semispace_size_ = max_semispace_size_;
5871  }
5872 
5873  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5874  if (max_executable_size > 0) {
5875  max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5876  }
5877 
5878  // The max executable size must be less than or equal to the max old
5879  // generation size.
5880  if (max_executable_size_ > max_old_generation_size_) {
5881  max_executable_size_ = max_old_generation_size_;
5882  }
5883 
5884  // The new space size must be a power of two to support single-bit testing
5885  // for containment.
5886  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5887  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5888  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5889  external_allocation_limit_ = 16 * max_semispace_size_;
5890 
5891  // The old generation is paged and needs at least one page for each space.
5892  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5893  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5894  Page::kPageSize),
5895  RoundUp(max_old_generation_size_,
5896  Page::kPageSize));
5897 
5898  configured_ = true;
5899  return true;
5900 }
5901 
5902 
5904  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5905  static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5906  static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5907 }
5908 
5909 
5910 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5913  *stats->new_space_size = new_space_.SizeAsInt();
5914  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5915  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5916  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5917  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5918  *stats->old_data_space_capacity = old_data_space_->Capacity();
5919  *stats->code_space_size = code_space_->SizeOfObjects();
5920  *stats->code_space_capacity = code_space_->Capacity();
5921  *stats->map_space_size = map_space_->SizeOfObjects();
5922  *stats->map_space_capacity = map_space_->Capacity();
5923  *stats->cell_space_size = cell_space_->SizeOfObjects();
5924  *stats->cell_space_capacity = cell_space_->Capacity();
5925  *stats->lo_space_size = lo_space_->Size();
5926  isolate_->global_handles()->RecordStats(stats);
5928  *stats->memory_allocator_capacity =
5929  isolate()->memory_allocator()->Size() +
5931  *stats->os_error = OS::GetLastError();
5933  if (take_snapshot) {
5934  HeapIterator iterator;
5935  for (HeapObject* obj = iterator.next();
5936  obj != NULL;
5937  obj = iterator.next()) {
5938  InstanceType type = obj->map()->instance_type();
5939  ASSERT(0 <= type && type <= LAST_TYPE);
5940  stats->objects_per_type[type]++;
5941  stats->size_per_type[type] += obj->Size();
5942  }
5943  }
5944 }
5945 
5946 
5948  return old_pointer_space_->SizeOfObjects()
5949  + old_data_space_->SizeOfObjects()
5950  + code_space_->SizeOfObjects()
5951  + map_space_->SizeOfObjects()
5952  + cell_space_->SizeOfObjects()
5953  + lo_space_->SizeOfObjects();
5954 }
5955 
5956 
5957 intptr_t Heap::PromotedExternalMemorySize() {
5958  if (amount_of_external_allocated_memory_
5959  <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5960  return amount_of_external_allocated_memory_
5961  - amount_of_external_allocated_memory_at_last_global_gc_;
5962 }
5963 
5964 #ifdef DEBUG
5965 
5966 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5967 static const int kMarkTag = 2;
5968 
5969 
5970 class HeapDebugUtils {
5971  public:
5972  explicit HeapDebugUtils(Heap* heap)
5973  : search_for_any_global_(false),
5974  search_target_(NULL),
5975  found_target_(false),
5976  object_stack_(20),
5977  heap_(heap) {
5978  }
5979 
5980  class MarkObjectVisitor : public ObjectVisitor {
5981  public:
5982  explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5983 
5984  void VisitPointers(Object** start, Object** end) {
5985  // Copy all HeapObject pointers in [start, end)
5986  for (Object** p = start; p < end; p++) {
5987  if ((*p)->IsHeapObject())
5988  utils_->MarkObjectRecursively(p);
5989  }
5990  }
5991 
5992  HeapDebugUtils* utils_;
5993  };
5994 
5995  void MarkObjectRecursively(Object** p) {
5996  if (!(*p)->IsHeapObject()) return;
5997 
5998  HeapObject* obj = HeapObject::cast(*p);
5999 
6000  Object* map = obj->map();
6001 
6002  if (!map->IsHeapObject()) return; // visited before
6003 
6004  if (found_target_) return; // stop if target found
6005  object_stack_.Add(obj);
6006  if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
6007  (!search_for_any_global_ && (obj == search_target_))) {
6008  found_target_ = true;
6009  return;
6010  }
6011 
6012  // not visited yet
6013  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6014 
6015  Address map_addr = map_p->address();
6016 
6017  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6018 
6019  MarkObjectRecursively(&map);
6020 
6021  MarkObjectVisitor mark_visitor(this);
6022 
6023  obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
6024  &mark_visitor);
6025 
6026  if (!found_target_) // don't pop if found the target
6027  object_stack_.RemoveLast();
6028  }
6029 
6030 
6031  class UnmarkObjectVisitor : public ObjectVisitor {
6032  public:
6033  explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
6034 
6035  void VisitPointers(Object** start, Object** end) {
6036  // Copy all HeapObject pointers in [start, end)
6037  for (Object** p = start; p < end; p++) {
6038  if ((*p)->IsHeapObject())
6039  utils_->UnmarkObjectRecursively(p);
6040  }
6041  }
6042 
6043  HeapDebugUtils* utils_;
6044  };
6045 
6046 
6047  void UnmarkObjectRecursively(Object** p) {
6048  if (!(*p)->IsHeapObject()) return;
6049 
6050  HeapObject* obj = HeapObject::cast(*p);
6051 
6052  Object* map = obj->map();
6053 
6054  if (map->IsHeapObject()) return; // unmarked already
6055 
6056  Address map_addr = reinterpret_cast<Address>(map);
6057 
6058  map_addr -= kMarkTag;
6059 
6060  ASSERT_TAG_ALIGNED(map_addr);
6061 
6062  HeapObject* map_p = HeapObject::FromAddress(map_addr);
6063 
6064  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6065 
6066  UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
6067 
6068  UnmarkObjectVisitor unmark_visitor(this);
6069 
6070  obj->IterateBody(Map::cast(map_p)->instance_type(),
6071  obj->SizeFromMap(Map::cast(map_p)),
6072  &unmark_visitor);
6073  }
6074 
6075 
6076  void MarkRootObjectRecursively(Object** root) {
6077  if (search_for_any_global_) {
6078  ASSERT(search_target_ == NULL);
6079  } else {
6080  ASSERT(search_target_->IsHeapObject());
6081  }
6082  found_target_ = false;
6083  object_stack_.Clear();
6084 
6085  MarkObjectRecursively(root);
6086  UnmarkObjectRecursively(root);
6087 
6088  if (found_target_) {
6089  PrintF("=====================================\n");
6090  PrintF("==== Path to object ====\n");
6091  PrintF("=====================================\n\n");
6092 
6093  ASSERT(!object_stack_.is_empty());
6094  for (int i = 0; i < object_stack_.length(); i++) {
6095  if (i > 0) PrintF("\n |\n |\n V\n\n");
6096  Object* obj = object_stack_[i];
6097  obj->Print();
6098  }
6099  PrintF("=====================================\n");
6100  }
6101  }
6102 
6103  // Helper class for visiting HeapObjects recursively.
6104  class MarkRootVisitor: public ObjectVisitor {
6105  public:
6106  explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
6107 
6108  void VisitPointers(Object** start, Object** end) {
6109  // Visit all HeapObject pointers in [start, end)
6110  for (Object** p = start; p < end; p++) {
6111  if ((*p)->IsHeapObject())
6112  utils_->MarkRootObjectRecursively(p);
6113  }
6114  }
6115 
6116  HeapDebugUtils* utils_;
6117  };
6118 
6119  bool search_for_any_global_;
6120  Object* search_target_;
6121  bool found_target_;
6122  List<Object*> object_stack_;
6123  Heap* heap_;
6124 
6125  friend class Heap;
6126 };
6127 
6128 #endif
6129 
6130 
6131 V8_DECLARE_ONCE(initialize_gc_once);
6132 
6133 static void InitializeGCOnce() {
6134  InitializeScavengingVisitorsTables();
6137 }
6138 
6139 bool Heap::SetUp(bool create_heap_objects) {
6140 #ifdef DEBUG
6141  allocation_timeout_ = FLAG_gc_interval;
6142  debug_utils_ = new HeapDebugUtils(this);
6143 #endif
6144 
6145  // Initialize heap spaces and initial maps and objects. Whenever something
6146  // goes wrong, just return false. The caller should check the results and
6147  // call Heap::TearDown() to release allocated memory.
6148  //
6149  // If the heap is not yet configured (e.g. through the API), configure it.
6150  // Configuration is based on the flags new-space-size (really the semispace
6151  // size) and old-space-size if set or the initial values of semispace_size_
6152  // and old_generation_size_ otherwise.
6153  if (!configured_) {
6154  if (!ConfigureHeapDefault()) return false;
6155  }
6156 
6157  CallOnce(&initialize_gc_once, &InitializeGCOnce);
6158 
6159  MarkMapPointersAsEncoded(false);
6160 
6161  // Set up memory allocator.
6162  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6163  return false;
6164 
6165  // Set up new space.
6166  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6167  return false;
6168  }
6169 
6170  // Initialize old pointer space.
6171  old_pointer_space_ =
6172  new OldSpace(this,
6173  max_old_generation_size_,
6175  NOT_EXECUTABLE);
6176  if (old_pointer_space_ == NULL) return false;
6177  if (!old_pointer_space_->SetUp()) return false;
6178 
6179  // Initialize old data space.
6180  old_data_space_ =
6181  new OldSpace(this,
6182  max_old_generation_size_,
6184  NOT_EXECUTABLE);
6185  if (old_data_space_ == NULL) return false;
6186  if (!old_data_space_->SetUp()) return false;
6187 
6188  // Initialize the code space, set its maximum capacity to the old
6189  // generation size. It needs executable memory.
6190  // On 64-bit platform(s), we put all code objects in a 2 GB range of
6191  // virtual address space, so that they can call each other with near calls.
6192  if (code_range_size_ > 0) {
6193  if (!isolate_->code_range()->SetUp(code_range_size_)) {
6194  return false;
6195  }
6196  }
6197 
6198  code_space_ =
6199  new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6200  if (code_space_ == NULL) return false;
6201  if (!code_space_->SetUp()) return false;
6202 
6203  // Initialize map space.
6204  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6205  if (map_space_ == NULL) return false;
6206  if (!map_space_->SetUp()) return false;
6207 
6208  // Initialize global property cell space.
6209  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6210  if (cell_space_ == NULL) return false;
6211  if (!cell_space_->SetUp()) return false;
6212 
6213  // The large object code space may contain code or data. We set the memory
6214  // to be non-executable here for safety, but this means we need to enable it
6215  // explicitly when allocating large code objects.
6216  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6217  if (lo_space_ == NULL) return false;
6218  if (!lo_space_->SetUp()) return false;
6219 
6220  // Set up the seed that is used to randomize the string hash function.
6221  ASSERT(hash_seed() == 0);
6222  if (FLAG_randomize_hashes) {
6223  if (FLAG_hash_seed == 0) {
6224  set_hash_seed(
6225  Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6226  } else {
6227  set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6228  }
6229  }
6230 
6231  if (create_heap_objects) {
6232  // Create initial maps.
6233  if (!CreateInitialMaps()) return false;
6234  if (!CreateApiObjects()) return false;
6235 
6236  // Create initial objects
6237  if (!CreateInitialObjects()) return false;
6238 
6239  native_contexts_list_ = undefined_value();
6240  }
6241 
6242  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6243  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6244 
6245  store_buffer()->SetUp();
6246 
6247  if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6248 
6249  return true;
6250 }
6251 
6252 
6254  ASSERT(isolate_ != NULL);
6255  ASSERT(isolate_ == isolate());
6256  // On 64 bit machines, pointers are generally out of range of Smis. We write
6257  // something that looks like an out of range Smi to the GC.
6258 
6259  // Set up the special root array entries containing the stack limits.
6260  // These are actually addresses, but the tag makes the GC ignore it.
6261  roots_[kStackLimitRootIndex] =
6262  reinterpret_cast<Object*>(
6263  (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6264  roots_[kRealStackLimitRootIndex] =
6265  reinterpret_cast<Object*>(
6266  (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6267 }
6268 
6269 
6271 #ifdef VERIFY_HEAP
6272  if (FLAG_verify_heap) {
6273  Verify();
6274  }
6275 #endif
6276 
6277  if (FLAG_print_cumulative_gc_stat) {
6278  PrintF("\n\n");
6279  PrintF("gc_count=%d ", gc_count_);
6280  PrintF("mark_sweep_count=%d ", ms_count_);
6281  PrintF("max_gc_pause=%d ", get_max_gc_pause());
6282  PrintF("total_gc_time=%d ", total_gc_time_ms_);
6283  PrintF("min_in_mutator=%d ", get_min_in_mutator());
6284  PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6286  PrintF("\n\n");
6287  }
6288 
6289  isolate_->global_handles()->TearDown();
6290 
6291  external_string_table_.TearDown();
6292 
6293  new_space_.TearDown();
6294 
6295  if (old_pointer_space_ != NULL) {
6296  old_pointer_space_->TearDown();
6297  delete old_pointer_space_;
6298  old_pointer_space_ = NULL;
6299  }
6300 
6301  if (old_data_space_ != NULL) {
6302  old_data_space_->TearDown();
6303  delete old_data_space_;
6304  old_data_space_ = NULL;
6305  }
6306 
6307  if (code_space_ != NULL) {
6308  code_space_->TearDown();
6309  delete code_space_;
6310  code_space_ = NULL;
6311  }
6312 
6313  if (map_space_ != NULL) {
6314  map_space_->TearDown();
6315  delete map_space_;
6316  map_space_ = NULL;
6317  }
6318 
6319  if (cell_space_ != NULL) {
6320  cell_space_->TearDown();
6321  delete cell_space_;
6322  cell_space_ = NULL;
6323  }
6324 
6325  if (lo_space_ != NULL) {
6326  lo_space_->TearDown();
6327  delete lo_space_;
6328  lo_space_ = NULL;
6329  }
6330 
6331  store_buffer()->TearDown();
6333 
6334  isolate_->memory_allocator()->TearDown();
6335 
6336  delete relocation_mutex_;
6337 
6338 #ifdef DEBUG
6339  delete debug_utils_;
6340  debug_utils_ = NULL;
6341 #endif
6342 }
6343 
6344 
6346  // Try to shrink all paged spaces.
6347  PagedSpaces spaces;
6348  for (PagedSpace* space = spaces.next();
6349  space != NULL;
6350  space = spaces.next()) {
6351  space->ReleaseAllUnusedPages();
6352  }
6353 }
6354 
6355 
6357  ASSERT(callback != NULL);
6358  GCPrologueCallbackPair pair(callback, gc_type);
6359  ASSERT(!gc_prologue_callbacks_.Contains(pair));
6360  return gc_prologue_callbacks_.Add(pair);
6361 }
6362 
6363 
6365  ASSERT(callback != NULL);
6366  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6367  if (gc_prologue_callbacks_[i].callback == callback) {
6368  gc_prologue_callbacks_.Remove(i);
6369  return;
6370  }
6371  }
6372  UNREACHABLE();
6373 }
6374 
6375 
6377  ASSERT(callback != NULL);
6378  GCEpilogueCallbackPair pair(callback, gc_type);
6379  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6380  return gc_epilogue_callbacks_.Add(pair);
6381 }
6382 
6383 
6385  ASSERT(callback != NULL);
6386  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6387  if (gc_epilogue_callbacks_[i].callback == callback) {
6388  gc_epilogue_callbacks_.Remove(i);
6389  return;
6390  }
6391  }
6392  UNREACHABLE();
6393 }
6394 
6395 
6396 #ifdef DEBUG
6397 
6398 class PrintHandleVisitor: public ObjectVisitor {
6399  public:
6400  void VisitPointers(Object** start, Object** end) {
6401  for (Object** p = start; p < end; p++)
6402  PrintF(" handle %p to %p\n",
6403  reinterpret_cast<void*>(p),
6404  reinterpret_cast<void*>(*p));
6405  }
6406 };
6407 
6408 void Heap::PrintHandles() {
6409  PrintF("Handles:\n");
6410  PrintHandleVisitor v;
6411  isolate_->handle_scope_implementer()->Iterate(&v);
6412 }
6413 
6414 #endif
6415 
6416 
6417 Space* AllSpaces::next() {
6418  switch (counter_++) {
6419  case NEW_SPACE:
6420  return HEAP->new_space();
6421  case OLD_POINTER_SPACE:
6422  return HEAP->old_pointer_space();
6423  case OLD_DATA_SPACE:
6424  return HEAP->old_data_space();
6425  case CODE_SPACE:
6426  return HEAP->code_space();
6427  case MAP_SPACE:
6428  return HEAP->map_space();
6429  case CELL_SPACE:
6430  return HEAP->cell_space();
6431  case LO_SPACE:
6432  return HEAP->lo_space();
6433  default:
6434  return NULL;
6435  }
6436 }
6437 
6438 
6439 PagedSpace* PagedSpaces::next() {
6440  switch (counter_++) {
6441  case OLD_POINTER_SPACE:
6442  return HEAP->old_pointer_space();
6443  case OLD_DATA_SPACE:
6444  return HEAP->old_data_space();
6445  case CODE_SPACE:
6446  return HEAP->code_space();
6447  case MAP_SPACE:
6448  return HEAP->map_space();
6449  case CELL_SPACE:
6450  return HEAP->cell_space();
6451  default:
6452  return NULL;
6453  }
6454 }
6455 
6456 
6457 
6458 OldSpace* OldSpaces::next() {
6459  switch (counter_++) {
6460  case OLD_POINTER_SPACE:
6461  return HEAP->old_pointer_space();
6462  case OLD_DATA_SPACE:
6463  return HEAP->old_data_space();
6464  case CODE_SPACE:
6465  return HEAP->code_space();
6466  default:
6467  return NULL;
6468  }
6469 }
6470 
6471 
6473  : current_space_(FIRST_SPACE),
6474  iterator_(NULL),
6475  size_func_(NULL) {
6476 }
6477 
6478 
6480  : current_space_(FIRST_SPACE),
6481  iterator_(NULL),
6482  size_func_(size_func) {
6483 }
6484 
6485 
6487  // Delete active iterator if any.
6488  delete iterator_;
6489 }
6490 
6491 
6493  // Iterate until no more spaces.
6494  return current_space_ != LAST_SPACE;
6495 }
6496 
6497 
6499  if (iterator_ != NULL) {
6500  delete iterator_;
6501  iterator_ = NULL;
6502  // Move to the next space
6503  current_space_++;
6504  if (current_space_ > LAST_SPACE) {
6505  return NULL;
6506  }
6507  }
6508 
6509  // Return iterator for the new current space.
6510  return CreateIterator();
6511 }
6512 
6513 
6514 // Create an iterator for the space to iterate.
6515 ObjectIterator* SpaceIterator::CreateIterator() {
6516  ASSERT(iterator_ == NULL);
6517 
6518  switch (current_space_) {
6519  case NEW_SPACE:
6520  iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
6521  break;
6522  case OLD_POINTER_SPACE:
6523  iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
6524  break;
6525  case OLD_DATA_SPACE:
6526  iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
6527  break;
6528  case CODE_SPACE:
6529  iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
6530  break;
6531  case MAP_SPACE:
6532  iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
6533  break;
6534  case CELL_SPACE:
6535  iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
6536  break;
6537  case LO_SPACE:
6538  iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
6539  break;
6540  }
6541 
6542  // Return the newly allocated iterator;
6543  ASSERT(iterator_ != NULL);
6544  return iterator_;
6545 }
6546 
6547 
6549  public:
6550  virtual ~HeapObjectsFilter() {}
6551  virtual bool SkipObject(HeapObject* object) = 0;
6552 };
6553 
6554 
6556  public:
6558  MarkReachableObjects();
6559  }
6560 
6562  Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
6563  }
6564 
6565  bool SkipObject(HeapObject* object) {
6566  MarkBit mark_bit = Marking::MarkBitFrom(object);
6567  return !mark_bit.Get();
6568  }
6569 
6570  private:
6571  class MarkingVisitor : public ObjectVisitor {
6572  public:
6573  MarkingVisitor() : marking_stack_(10) {}
6574 
6575  void VisitPointers(Object** start, Object** end) {
6576  for (Object** p = start; p < end; p++) {
6577  if (!(*p)->IsHeapObject()) continue;
6578  HeapObject* obj = HeapObject::cast(*p);
6579  MarkBit mark_bit = Marking::MarkBitFrom(obj);
6580  if (!mark_bit.Get()) {
6581  mark_bit.Set();
6582  marking_stack_.Add(obj);
6583  }
6584  }
6585  }
6586 
6587  void TransitiveClosure() {
6588  while (!marking_stack_.is_empty()) {
6589  HeapObject* obj = marking_stack_.RemoveLast();
6590  obj->Iterate(this);
6591  }
6592  }
6593 
6594  private:
6595  List<HeapObject*> marking_stack_;
6596  };
6597 
6598  void MarkReachableObjects() {
6599  Heap* heap = Isolate::Current()->heap();
6600  MarkingVisitor visitor;
6601  heap->IterateRoots(&visitor, VISIT_ALL);
6602  visitor.TransitiveClosure();
6603  }
6604 
6605  AssertNoAllocation no_alloc;
6606 };
6607 
6608 
6609 HeapIterator::HeapIterator()
6610  : filtering_(HeapIterator::kNoFiltering),
6611  filter_(NULL) {
6612  Init();
6613 }
6614 
6615 
6616 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
6617  : filtering_(filtering),
6618  filter_(NULL) {
6619  Init();
6620 }
6621 
6622 
6623 HeapIterator::~HeapIterator() {
6624  Shutdown();
6625 }
6626 
6627 
6628 void HeapIterator::Init() {
6629  // Start the iteration.
6630  space_iterator_ = new SpaceIterator;
6631  switch (filtering_) {
6632  case kFilterUnreachable:
6633  filter_ = new UnreachableObjectsFilter;
6634  break;
6635  default:
6636  break;
6637  }
6638  object_iterator_ = space_iterator_->next();
6639 }
6640 
6641 
6642 void HeapIterator::Shutdown() {
6643 #ifdef DEBUG
6644  // Assert that in filtering mode we have iterated through all
6645  // objects. Otherwise, heap will be left in an inconsistent state.
6646  if (filtering_ != kNoFiltering) {
6647  ASSERT(object_iterator_ == NULL);
6648  }
6649 #endif
6650  // Make sure the last iterator is deallocated.
6651  delete space_iterator_;
6652  space_iterator_ = NULL;
6653  object_iterator_ = NULL;
6654  delete filter_;
6655  filter_ = NULL;
6656 }
6657 
6658 
6659 HeapObject* HeapIterator::next() {
6660  if (filter_ == NULL) return NextObject();
6661 
6662  HeapObject* obj = NextObject();
6663  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6664  return obj;
6665 }
6666 
6667 
6668 HeapObject* HeapIterator::NextObject() {
6669  // No iterator means we are done.
6670  if (object_iterator_ == NULL) return NULL;
6671 
6672  if (HeapObject* obj = object_iterator_->next_object()) {
6673  // If the current iterator has more objects we are fine.
6674  return obj;
6675  } else {
6676  // Go though the spaces looking for one that has objects.
6677  while (space_iterator_->has_next()) {
6678  object_iterator_ = space_iterator_->next();
6679  if (HeapObject* obj = object_iterator_->next_object()) {
6680  return obj;
6681  }
6682  }
6683  }
6684  // Done with the last space.
6685  object_iterator_ = NULL;
6686  return NULL;
6687 }
6688 
6689 
6690 void HeapIterator::reset() {
6691  // Restart the iterator.
6692  Shutdown();
6693  Init();
6694 }
6695 
6696 
6697 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
6698 
6699 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
6700 
6701 class PathTracer::MarkVisitor: public ObjectVisitor {
6702  public:
6703  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6704  void VisitPointers(Object** start, Object** end) {
6705  // Scan all HeapObject pointers in [start, end)
6706  for (Object** p = start; !tracer_->found() && (p < end); p++) {
6707  if ((*p)->IsHeapObject())
6708  tracer_->MarkRecursively(p, this);
6709  }
6710  }
6711 
6712  private:
6713  PathTracer* tracer_;
6714 };
6715 
6716 
6717 class PathTracer::UnmarkVisitor: public ObjectVisitor {
6718  public:
6719  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6720  void VisitPointers(Object** start, Object** end) {
6721  // Scan all HeapObject pointers in [start, end)
6722  for (Object** p = start; p < end; p++) {
6723  if ((*p)->IsHeapObject())
6724  tracer_->UnmarkRecursively(p, this);
6725  }
6726  }
6727 
6728  private:
6729  PathTracer* tracer_;
6730 };
6731 
6732 
6733 void PathTracer::VisitPointers(Object** start, Object** end) {
6734  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6735  // Visit all HeapObject pointers in [start, end)
6736  for (Object** p = start; !done && (p < end); p++) {
6737  if ((*p)->IsHeapObject()) {
6738  TracePathFrom(p);
6739  done = ((what_to_find_ == FIND_FIRST) && found_target_);
6740  }
6741  }
6742 }
6743 
6744 
6745 void PathTracer::Reset() {
6746  found_target_ = false;
6747  object_stack_.Clear();
6748 }
6749 
6750 
6751 void PathTracer::TracePathFrom(Object** root) {
6752  ASSERT((search_target_ == kAnyGlobalObject) ||
6753  search_target_->IsHeapObject());
6754  found_target_in_trace_ = false;
6755  Reset();
6756 
6757  MarkVisitor mark_visitor(this);
6758  MarkRecursively(root, &mark_visitor);
6759 
6760  UnmarkVisitor unmark_visitor(this);
6761  UnmarkRecursively(root, &unmark_visitor);
6762 
6763  ProcessResults();
6764 }
6765 
6766 
6767 static bool SafeIsNativeContext(HeapObject* obj) {
6768  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6769 }
6770 
6771 
6772 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6773  if (!(*p)->IsHeapObject()) return;
6774 
6775  HeapObject* obj = HeapObject::cast(*p);
6776 
6777  Object* map = obj->map();
6778 
6779  if (!map->IsHeapObject()) return; // visited before
6780 
6781  if (found_target_in_trace_) return; // stop if target found
6782  object_stack_.Add(obj);
6783  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6784  (obj == search_target_)) {
6785  found_target_in_trace_ = true;
6786  found_target_ = true;
6787  return;
6788  }
6789 
6790  bool is_native_context = SafeIsNativeContext(obj);
6791 
6792  // not visited yet
6793  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
6794 
6795  Address map_addr = map_p->address();
6796 
6797  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
6798 
6799  // Scan the object body.
6800  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6801  // This is specialized to scan Context's properly.
6802  Object** start = reinterpret_cast<Object**>(obj->address() +
6804  Object** end = reinterpret_cast<Object**>(obj->address() +
6806  mark_visitor->VisitPointers(start, end);
6807  } else {
6808  obj->IterateBody(map_p->instance_type(),
6809  obj->SizeFromMap(map_p),
6810  mark_visitor);
6811  }
6812 
6813  // Scan the map after the body because the body is a lot more interesting
6814  // when doing leak detection.
6815  MarkRecursively(&map, mark_visitor);
6816 
6817  if (!found_target_in_trace_) // don't pop if found the target
6818  object_stack_.RemoveLast();
6819 }
6820 
6821 
6822 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6823  if (!(*p)->IsHeapObject()) return;
6824 
6825  HeapObject* obj = HeapObject::cast(*p);
6826 
6827  Object* map = obj->map();
6828 
6829  if (map->IsHeapObject()) return; // unmarked already
6830 
6831  Address map_addr = reinterpret_cast<Address>(map);
6832 
6833  map_addr -= kMarkTag;
6834 
6835  ASSERT_TAG_ALIGNED(map_addr);
6836 
6837  HeapObject* map_p = HeapObject::FromAddress(map_addr);
6838 
6839  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
6840 
6841  UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
6842 
6843  obj->IterateBody(Map::cast(map_p)->instance_type(),
6844  obj->SizeFromMap(Map::cast(map_p)),
6845  unmark_visitor);
6846 }
6847 
6848 
6849 void PathTracer::ProcessResults() {
6850  if (found_target_) {
6851  PrintF("=====================================\n");
6852  PrintF("==== Path to object ====\n");
6853  PrintF("=====================================\n\n");
6854 
6855  ASSERT(!object_stack_.is_empty());
6856  for (int i = 0; i < object_stack_.length(); i++) {
6857  if (i > 0) PrintF("\n |\n |\n V\n\n");
6858  Object* obj = object_stack_[i];
6859  obj->Print();
6860  }
6861  PrintF("=====================================\n");
6862  }
6863 }
6864 #endif // DEBUG || LIVE_OBJECT_LIST
6865 
6866 
6867 #ifdef DEBUG
6868 // Triggers a depth-first traversal of reachable objects from one
6869 // given root object and finds a path to a specific heap object and
6870 // prints it.
6871 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6872  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6873  tracer.VisitPointer(&root);
6874 }
6875 
6876 
6877 // Triggers a depth-first traversal of reachable objects from roots
6878 // and finds a path to a specific heap object and prints it.
6879 void Heap::TracePathToObject(Object* target) {
6880  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6881  IterateRoots(&tracer, VISIT_ONLY_STRONG);
6882 }
6883 
6884 
6885 // Triggers a depth-first traversal of reachable objects from roots
6886 // and finds a path to any global object and prints it. Useful for
6887 // determining the source for leaks of global objects.
6888 void Heap::TracePathToGlobal() {
6889  PathTracer tracer(PathTracer::kAnyGlobalObject,
6890  PathTracer::FIND_ALL,
6891  VISIT_ALL);
6892  IterateRoots(&tracer, VISIT_ONLY_STRONG);
6893 }
6894 #endif
6895 
6896 
6897 static intptr_t CountTotalHolesSize() {
6898  intptr_t holes_size = 0;
6899  OldSpaces spaces;
6900  for (OldSpace* space = spaces.next();
6901  space != NULL;
6902  space = spaces.next()) {
6903  holes_size += space->Waste() + space->Available();
6904  }
6905  return holes_size;
6906 }
6907 
6908 
6909 GCTracer::GCTracer(Heap* heap,
6910  const char* gc_reason,
6911  const char* collector_reason)
6912  : start_time_(0.0),
6913  start_object_size_(0),
6914  start_memory_size_(0),
6915  gc_count_(0),
6916  full_gc_count_(0),
6917  allocated_since_last_gc_(0),
6918  spent_in_mutator_(0),
6919  promoted_objects_size_(0),
6920  heap_(heap),
6921  gc_reason_(gc_reason),
6922  collector_reason_(collector_reason) {
6923  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6924  start_time_ = OS::TimeCurrentMillis();
6925  start_object_size_ = heap_->SizeOfObjects();
6926  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6927 
6928  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6929  scopes_[i] = 0;
6930  }
6931 
6932  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6933 
6934  allocated_since_last_gc_ =
6935  heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6936 
6937  if (heap_->last_gc_end_timestamp_ > 0) {
6938  spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6939  }
6940 
6941  steps_count_ = heap_->incremental_marking()->steps_count();
6942  steps_took_ = heap_->incremental_marking()->steps_took();
6943  longest_step_ = heap_->incremental_marking()->longest_step();
6944  steps_count_since_last_gc_ =
6945  heap_->incremental_marking()->steps_count_since_last_gc();
6946  steps_took_since_last_gc_ =
6947  heap_->incremental_marking()->steps_took_since_last_gc();
6948 }
6949 
6950 
6951 GCTracer::~GCTracer() {
6952  // Printf ONE line iff flag is set.
6953  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6954 
6955  bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6956 
6957  heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6958  heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6959 
6960  int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
6961 
6962  // Update cumulative GC statistics if required.
6963  if (FLAG_print_cumulative_gc_stat) {
6964  heap_->total_gc_time_ms_ += time;
6965  heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6966  heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6967  heap_->alive_after_last_gc_);
6968  if (!first_gc) {
6969  heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6970  static_cast<int>(spent_in_mutator_));
6971  }
6972  } else if (FLAG_trace_gc_verbose) {
6973  heap_->total_gc_time_ms_ += time;
6974  }
6975 
6976  if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6977 
6978  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6979 
6980  if (!FLAG_trace_gc_nvp) {
6981  int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6982 
6983  double end_memory_size_mb =
6984  static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6985 
6986  PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6987  CollectorString(),
6988  static_cast<double>(start_object_size_) / MB,
6989  static_cast<double>(start_memory_size_) / MB,
6990  SizeOfHeapObjects(),
6991  end_memory_size_mb);
6992 
6993  if (external_time > 0) PrintF("%d / ", external_time);
6994  PrintF("%d ms", time);
6995  if (steps_count_ > 0) {
6996  if (collector_ == SCAVENGER) {
6997  PrintF(" (+ %d ms in %d steps since last GC)",
6998  static_cast<int>(steps_took_since_last_gc_),
6999  steps_count_since_last_gc_);
7000  } else {
7001  PrintF(" (+ %d ms in %d steps since start of marking, "
7002  "biggest step %f ms)",
7003  static_cast<int>(steps_took_),
7004  steps_count_,
7005  longest_step_);
7006  }
7007  }
7008 
7009  if (gc_reason_ != NULL) {
7010  PrintF(" [%s]", gc_reason_);
7011  }
7012 
7013  if (collector_reason_ != NULL) {
7014  PrintF(" [%s]", collector_reason_);
7015  }
7016 
7017  PrintF(".\n");
7018  } else {
7019  PrintF("pause=%d ", time);
7020  PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
7021  PrintF("gc=");
7022  switch (collector_) {
7023  case SCAVENGER:
7024  PrintF("s");
7025  break;
7026  case MARK_COMPACTOR:
7027  PrintF("ms");
7028  break;
7029  default:
7030  UNREACHABLE();
7031  }
7032  PrintF(" ");
7033 
7034  PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
7035  PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
7036  PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
7037  PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
7038  PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
7039  PrintF("new_new=%d ",
7040  static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
7041  PrintF("root_new=%d ",
7042  static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
7043  PrintF("old_new=%d ",
7044  static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
7045  PrintF("compaction_ptrs=%d ",
7046  static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
7047  PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
7048  Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
7049  PrintF("misc_compaction=%d ",
7050  static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
7051 
7052  PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7053  PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7054  PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7055  in_free_list_or_wasted_before_gc_);
7056  PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
7057 
7058  PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7059  PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7060 
7061  if (collector_ == SCAVENGER) {
7062  PrintF("stepscount=%d ", steps_count_since_last_gc_);
7063  PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
7064  } else {
7065  PrintF("stepscount=%d ", steps_count_);
7066  PrintF("stepstook=%d ", static_cast<int>(steps_took_));
7067  }
7068 
7069  PrintF("\n");
7070  }
7071 
7072  heap_->PrintShortHeapStatistics();
7073 }
7074 
7075 
7076 const char* GCTracer::CollectorString() {
7077  switch (collector_) {
7078  case SCAVENGER:
7079  return "Scavenge";
7080  case MARK_COMPACTOR:
7081  return "Mark-sweep";
7082  }
7083  return "Unknown GC";
7084 }
7085 
7086 
7087 int KeyedLookupCache::Hash(Map* map, String* name) {
7088  // Uses only lower 32 bits if pointers are larger.
7089  uintptr_t addr_hash =
7090  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7091  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7092 }
7093 
7094 
7096  int index = (Hash(map, name) & kHashMask);
7097  for (int i = 0; i < kEntriesPerBucket; i++) {
7098  Key& key = keys_[index + i];
7099  if ((key.map == map) && key.name->Equals(name)) {
7100  return field_offsets_[index + i];
7101  }
7102  }
7103  return kNotFound;
7104 }
7105 
7106 
7107 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
7108  String* symbol;
7109  if (HEAP->LookupSymbolIfExists(name, &symbol)) {
7110  int index = (Hash(map, symbol) & kHashMask);
7111  // After a GC there will be free slots, so we use them in order (this may
7112  // help to get the most frequently used one in position 0).
7113  for (int i = 0; i< kEntriesPerBucket; i++) {
7114  Key& key = keys_[index];
7115  Object* free_entry_indicator = NULL;
7116  if (key.map == free_entry_indicator) {
7117  key.map = map;
7118  key.name = symbol;
7119  field_offsets_[index + i] = field_offset;
7120  return;
7121  }
7122  }
7123  // No free entry found in this bucket, so we move them all down one and
7124  // put the new entry at position zero.
7125  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7126  Key& key = keys_[index + i];
7127  Key& key2 = keys_[index + i - 1];
7128  key = key2;
7129  field_offsets_[index + i] = field_offsets_[index + i - 1];
7130  }
7131 
7132  // Write the new first entry.
7133  Key& key = keys_[index];
7134  key.map = map;
7135  key.name = symbol;
7136  field_offsets_[index] = field_offset;
7137  }
7138 }
7139 
7140 
7142  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7143 }
7144 
7145 
7147  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7148 }
7149 
7150 
7151 #ifdef DEBUG
7152 void Heap::GarbageCollectionGreedyCheck() {
7153  ASSERT(FLAG_gc_greedy);
7154  if (isolate_->bootstrapper()->IsActive()) return;
7155  if (disallow_allocation_failure()) return;
7157 }
7158 #endif
7159 
7160 
7161 TranscendentalCache::SubCache::SubCache(Type t)
7162  : type_(t),
7163  isolate_(Isolate::Current()) {
7164  uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
7165  uint32_t in1 = 0xffffffffu; // generated by the FPU.
7166  for (int i = 0; i < kCacheSize; i++) {
7167  elements_[i].in[0] = in0;
7168  elements_[i].in[1] = in1;
7169  elements_[i].output = NULL;
7170  }
7171 }
7172 
7173 
7175  for (int i = 0; i < kNumberOfCaches; i++) {
7176  if (caches_[i] != NULL) {
7177  delete caches_[i];
7178  caches_[i] = NULL;
7179  }
7180  }
7181 }
7182 
7183 
7185  int last = 0;
7186  for (int i = 0; i < new_space_strings_.length(); ++i) {
7187  if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7188  continue;
7189  }
7190  if (heap_->InNewSpace(new_space_strings_[i])) {
7191  new_space_strings_[last++] = new_space_strings_[i];
7192  } else {
7193  old_space_strings_.Add(new_space_strings_[i]);
7194  }
7195  }
7196  new_space_strings_.Rewind(last);
7197  last = 0;
7198  for (int i = 0; i < old_space_strings_.length(); ++i) {
7199  if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
7200  continue;
7201  }
7202  ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7203  old_space_strings_[last++] = old_space_strings_[i];
7204  }
7205  old_space_strings_.Rewind(last);
7206 #ifdef VERIFY_HEAP
7207  if (FLAG_verify_heap) {
7208  Verify();
7209  }
7210 #endif
7211 }
7212 
7213 
7215  new_space_strings_.Free();
7216  old_space_strings_.Free();
7217 }
7218 
7219 
7221  chunk->set_next_chunk(chunks_queued_for_free_);
7222  chunks_queued_for_free_ = chunk;
7223 }
7224 
7225 
7227  if (chunks_queued_for_free_ == NULL) return;
7228  MemoryChunk* next;
7229  MemoryChunk* chunk;
7230  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7231  next = chunk->next_chunk();
7233 
7234  if (chunk->owner()->identity() == LO_SPACE) {
7235  // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7236  // If FromAnyPointerAddress encounters a slot that belongs to a large
7237  // chunk queued for deletion it will fail to find the chunk because
7238  // it try to perform a search in the list of pages owned by of the large
7239  // object space and queued chunks were detached from that list.
7240  // To work around this we split large chunk into normal kPageSize aligned
7241  // pieces and initialize size, owner and flags field of every piece.
7242  // If FromAnyPointerAddress encounters a slot that belongs to one of
7243  // these smaller pieces it will treat it as a slot on a normal Page.
7244  Address chunk_end = chunk->address() + chunk->size();
7246  chunk->address() + Page::kPageSize);
7247  MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7248  while (inner <= inner_last) {
7249  // Size of a large chunk is always a multiple of
7250  // OS::AllocateAlignment() so there is always
7251  // enough space for a fake MemoryChunk header.
7252  Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7253  // Guard against overflow.
7254  if (area_end < inner->address()) area_end = chunk_end;
7255  inner->SetArea(inner->address(), area_end);
7256  inner->set_size(Page::kPageSize);
7257  inner->set_owner(lo_space());
7259  inner = MemoryChunk::FromAddress(
7260  inner->address() + Page::kPageSize);
7261  }
7262  }
7263  }
7264  isolate_->heap()->store_buffer()->Compact();
7266  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7267  next = chunk->next_chunk();
7268  isolate_->memory_allocator()->Free(chunk);
7269  }
7270  chunks_queued_for_free_ = NULL;
7271 }
7272 
7273 
7274 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7275  uintptr_t p = reinterpret_cast<uintptr_t>(page);
7276  // Tag the page pointer to make it findable in the dump file.
7277  if (compacted) {
7278  p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7279  } else {
7280  p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7281  }
7282  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7283  reinterpret_cast<Address>(p);
7284  remembered_unmapped_pages_index_++;
7285  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7286 }
7287 
7288 
7289 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7290  memset(object_counts_, 0, sizeof(object_counts_));
7291  memset(object_sizes_, 0, sizeof(object_sizes_));
7292  if (clear_last_time_stats) {
7293  memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7294  memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7295  }
7296 }
7297 
7298 
7299 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7300 
7301 
7303  ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7304  Counters* counters = isolate()->counters();
7305 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7306  counters->count_of_##name()->Increment( \
7307  static_cast<int>(object_counts_[name])); \
7308  counters->count_of_##name()->Decrement( \
7309  static_cast<int>(object_counts_last_time_[name])); \
7310  counters->size_of_##name()->Increment( \
7311  static_cast<int>(object_sizes_[name])); \
7312  counters->size_of_##name()->Decrement( \
7313  static_cast<int>(object_sizes_last_time_[name]));
7315 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7316  int index;
7317 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7318  index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7319  counters->count_of_CODE_TYPE_##name()->Increment( \
7320  static_cast<int>(object_counts_[index])); \
7321  counters->count_of_CODE_TYPE_##name()->Decrement( \
7322  static_cast<int>(object_counts_last_time_[index])); \
7323  counters->size_of_CODE_TYPE_##name()->Increment( \
7324  static_cast<int>(object_sizes_[index])); \
7325  counters->size_of_CODE_TYPE_##name()->Decrement( \
7326  static_cast<int>(object_sizes_last_time_[index]));
7328 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7329 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7330  index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7331  counters->count_of_FIXED_ARRAY_##name()->Increment( \
7332  static_cast<int>(object_counts_[index])); \
7333  counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7334  static_cast<int>(object_counts_last_time_[index])); \
7335  counters->size_of_FIXED_ARRAY_##name()->Increment( \
7336  static_cast<int>(object_sizes_[index])); \
7337  counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7338  static_cast<int>(object_sizes_last_time_[index]));
7340 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7341 
7342  memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7343  memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7344  ClearObjectStats();
7345 }
7346 
7347 } } // namespace v8::internal
static int SizeOfMarkedObject(HeapObject *object)
Definition: heap.h:2753
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
Address FromSpaceEnd()
Definition: spaces.h:2234
MUST_USE_RESULT MaybeObject * AllocateJSModule(Context *context, ScopeInfo *scope_info)
Definition: heap.cc:4064
intptr_t OldGenPromotionLimit(intptr_t old_gen_size)
Definition: heap.h:1402
ContextSlotCache * context_slot_cache()
Definition: isolate.h:853
const uint32_t kShortcutTypeTag
Definition: objects.h:515
void GarbageCollectionEpilogue()
Definition: heap.cc:449
static const int kEmptyStringHash
Definition: objects.h:7383
static const int kPointerFieldsEndOffset
Definition: objects.h:5144
MUST_USE_RESULT MaybeObject * CopyCode(Code *code)
Definition: heap.cc:3667
void set_elements_kind(ElementsKind elements_kind)
Definition: objects.h:4773
static void Clear(FixedArray *cache)
Definition: heap.cc:2904
static uchar TrailSurrogate(int char_code)
Definition: unicode.h:146
virtual intptr_t Size()
Definition: spaces.h:2515
static const int kMaxLength
Definition: objects.h:2366
Code * builtin(Name name)
Definition: builtins.h:320
TranscendentalCache * transcendental_cache() const
Definition: isolate.h:841
static int NumberOfHandles()
Definition: handles.cc:48
#define SLOW_ASSERT(condition)
Definition: checks.h:276
const intptr_t kSmiTagMask
Definition: v8.h:4016
MUST_USE_RESULT MaybeObject * AllocateStringFromUtf8Slow(Vector< const char > str, int non_ascii_start, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4430
static uchar LeadSurrogate(int char_code)
Definition: unicode.h:143
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
static const int kCodeEntryOffset
Definition: objects.h:6182
MUST_USE_RESULT MaybeObject * AllocateFixedArray(int length, PretenureFlag pretenure)
Definition: heap.cc:4819
void TearDown()
Definition: heap.cc:6270
static const int kMaxAsciiCharCode
Definition: objects.h:7327
bool Contains(const T &elm) const
Definition: list-inl.h:178
void SetStackLimits()
Definition: heap.cc:6253
bool NextGCIsLikelyToBeFull()
Definition: heap.h:1479
MUST_USE_RESULT MaybeObject * AllocateExternalStringFromAscii(const ExternalAsciiString::Resource *resource)
Definition: heap.cc:3438
MUST_USE_RESULT MaybeObject * AllocateSymbol(Vector< const char > str, int chars, uint32_t hash_field)
Definition: heap-inl.h:101
CodeRange * code_range()
Definition: isolate.h:825
void Callback(MemoryChunk *page, StoreBufferEvent event)
Definition: heap.cc:1122
intptr_t Available()
Definition: spaces.h:1506
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:318
static const int kSize
Definition: objects.h:7655
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1827
MUST_USE_RESULT MaybeObject * CopyDropDescriptors()
Definition: objects.cc:4938
#define STRUCT_TABLE_ELEMENT(NAME, Name, name)
MUST_USE_RESULT MaybeObject * CopyFixedDoubleArray(FixedDoubleArray *src)
Definition: heap-inl.h:181
intptr_t * old_pointer_space_size
Definition: heap.h:2179
MUST_USE_RESULT MaybeObject * AllocateFunctionPrototype(JSFunction *function)
Definition: heap.cc:3782
bool Contains(Address addr)
Definition: spaces.h:364
void set(int index, Object *value)
Definition: objects-inl.h:1757
CompilationCache * compilation_cache()
Definition: isolate.h:827
intptr_t * cell_space_size
Definition: heap.h:2187
static const int kMapHashShift
Definition: heap.h:2350
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void PrintStack(StringStream *accumulator)
Definition: isolate.cc:739
#define ASSERT_TAG_ALIGNED(address)
Definition: v8checks.h:59
bool OldGenerationPromotionLimitReached()
Definition: heap.h:1386
void set_function_with_prototype(bool value)
Definition: objects-inl.h:3063
bool InNewSpace(Object *object)
Definition: heap-inl.h:288
static String * cast(Object *obj)
MUST_USE_RESULT MaybeObject * Add(Key key, Object *value, PropertyDetails details)
Definition: objects.cc:12556
static const int kArgumentsObjectSize
Definition: heap.h:895
bool IsHeapIterable()
Definition: heap.cc:5103
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f)
MUST_USE_RESULT MaybeObject * AllocateFunctionContext(int length, JSFunction *function)
Definition: heap.cc:4999
MUST_USE_RESULT MaybeObject * Allocate(Map *map, AllocationSpace space)
Definition: heap.cc:3749
MUST_USE_RESULT MaybeObject * AllocateSubString(String *buffer, int start, int end, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3344
int * new_space_capacity
Definition: heap.h:2178
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:42
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:864
void set_opt_count(int opt_count)
unsigned Utf16Length()
Definition: unicode.cc:342
static DescriptorArray * cast(Object *obj)
static Failure * InternalError()
Definition: objects-inl.h:1019
void IterateWeakRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5746
bool SkipObject(HeapObject *object)
Definition: heap.cc:6565
static int SizeOf(Map *map, HeapObject *object)
Definition: objects.h:2393
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
Isolate * isolate()
Definition: heap-inl.h:503
MUST_USE_RESULT MaybeObject * ReinitializeJSGlobalProxy(JSFunction *constructor, JSGlobalProxy *global)
Definition: heap.cc:4386
int unused_property_fields()
Definition: objects-inl.h:3019
void set_length(Smi *length)
Definition: objects-inl.h:5278
bool SetUp(const size_t requested_size)
Definition: spaces.cc:135
MUST_USE_RESULT MaybeObject * CopyFixedDoubleArrayWithMap(FixedDoubleArray *src, Map *map)
Definition: heap.cc:4737
MUST_USE_RESULT MaybeObject * AllocateGlobalObject(JSFunction *constructor)
Definition: heap.cc:4186
static uint32_t encode(intvalue)
Definition: utils.h:262
void Prepare(GCTracer *tracer)
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
static Smi * FromInt(int value)
Definition: objects-inl.h:981
#define LOG(isolate, Call)
Definition: log.h:81
MUST_USE_RESULT MaybeObject * AllocateJSFunctionProxy(Object *handler, Object *call_trap, Object *construct_trap, Object *prototype)
Definition: heap.cc:4160
const int KB
Definition: globals.h:207
void set_second(String *second, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:2593
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:3570
MUST_USE_RESULT MaybeObject * AllocateJSObject(JSFunction *constructor, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4041
intptr_t MaxReserved()
Definition: heap.h:471
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:657
V8_DECLARE_ONCE(initialize_gc_once)
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
void set_ic_age(int count)
void CollectAllGarbage(int flags, const char *gc_reason=NULL)
Definition: heap.cc:538
Address FromSpaceStart()
Definition: spaces.h:2233
bool is_logging()
Definition: log.h:286
static HeapObject * cast(Object *obj)
Map * MapForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3048
void SetNumberStringCache(Object *number, String *str)
Definition: heap.cc:2989
static const byte kArgumentMarker
Definition: objects.h:7968
void AgeInlineCaches()
Definition: heap.h:1638
MUST_USE_RESULT MaybeObject * AllocateModuleContext(ScopeInfo *scope_info)
Definition: heap.cc:4985
MUST_USE_RESULT MaybeObject * AllocateCodeCache()
Definition: heap.cc:2117
void set_pre_allocated_property_fields(int value)
Definition: objects-inl.h:3001
void CallOnce(OnceType *once, NoArgFunction init_func)
Definition: once.h:105
static const byte kUndefined
Definition: objects.h:7969
T Max(T a, T b)
Definition: utils.h:222
MUST_USE_RESULT MaybeObject * AllocateNativeContext()
Definition: heap.cc:4951
void AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type_filter)
Definition: heap.cc:6356
const int kVariableSizeSentinel
Definition: objects.h:199
static const int kAlignedSize
Definition: objects.h:5895
RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3053
MUST_USE_RESULT MaybeObject * LookupAsciiSymbol(Vector< const char > str)
Definition: heap.cc:5449
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1029
static bool IsOutsideAllocatedSpace(void *pointer)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:273
bool IsAsciiRepresentation()
Definition: objects-inl.h:290
static ExternalTwoByteString * cast(Object *obj)
void VisitExternalResources(v8::ExternalResourceVisitor *visitor)
Definition: heap.cc:1548
void IterateDeferredHandles(ObjectVisitor *visitor)
Definition: isolate.cc:480
intptr_t OldGenAllocationLimit(intptr_t old_gen_size)
Definition: heap.h:1412
static Map * cast(Object *obj)
void set_start_position(int value)
static const int kEmptyHashField
Definition: objects.h:7379
void ResetAllocationInfo()
Definition: spaces.cc:1192
MUST_USE_RESULT MaybeObject * AllocateByteArray(int length, PretenureFlag pretenure)
Definition: heap.cc:3517
static const byte kTheHole
Definition: objects.h:7966
static ByteArray * cast(Object *obj)
bool has_fast_object_elements()
Definition: objects.h:4792
Builtins * builtins()
Definition: isolate.h:924
void set_end_position(int end_position)
void set_context(Object *context)
Definition: objects-inl.h:4395
static FreeSpace * cast(Object *obj)
void mark_out_of_memory()
Definition: isolate.h:1446
Bootstrapper * bootstrapper()
Definition: isolate.h:818
void Set(int descriptor_number, Descriptor *desc, const WhitenessWitness &)
Definition: objects-inl.h:2171
bool InFromSpace(Object *object)
Definition: heap-inl.h:302
MUST_USE_RESULT MaybeObject * Uint32ToString(uint32_t value, bool check_number_string_cache=true)
Definition: heap.cc:3039
void Relocate(intptr_t delta)
Definition: objects.cc:8283
PromotionQueue * promotion_queue()
Definition: heap.h:1130
const int kMaxInt
Definition: globals.h:210
void SetTop(Object ***top)
Definition: store-buffer.h:99
static Foreign * cast(Object *obj)
static bool enabled()
Definition: serialize.h:481
void set_map(Map *value)
Definition: objects-inl.h:1143
Map * SymbolMapForString(String *str)
Definition: heap.cc:4490
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2300
intptr_t * code_space_size
Definition: heap.h:2183
static const int kSize
Definition: objects.h:8113
void AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type_filter)
Definition: heap.cc:6376
MUST_USE_RESULT MaybeObject * AllocateRawAsciiString(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4579
byte * instruction_end()
Definition: objects-inl.h:4654
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5758
#define ASSERT(condition)
Definition: checks.h:270
bool InSpace(Address addr, AllocationSpace space)
Definition: heap.cc:5385
void(* GCPrologueCallback)(GCType type, GCCallbackFlags flags)
Definition: v8.h:2759
v8::Handle< v8::Value > Print(const v8::Arguments &args)
MUST_USE_RESULT MaybeObject * AllocateGlobalContext(JSFunction *function, ScopeInfo *scope_info)
Definition: heap.cc:4966
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:484
void Step(intptr_t allocated, CompletionAction action)
#define PROFILE(isolate, Call)
Definition: cpu-profiler.h:190
KeyedLookupCache * keyed_lookup_cache()
Definition: isolate.h:849
static const int kReduceMemoryFootprintMask
Definition: heap.h:1083
MUST_USE_RESULT MaybeObject * LookupTwoByteSymbol(Vector< const uc16 > str)
Definition: heap.cc:5484
ExternalArrayType
Definition: v8.h:1431
unsigned short uint16_t
Definition: unicode.cc:46
void IterateStrongRoots(ObjectVisitor *v)
virtual Object * RetainAs(Object *object)
Definition: heap.cc:1212
static Context * cast(Object *context)
Definition: contexts.h:212
static const int kMaxLength
Definition: objects.h:7608
const intptr_t kCodeAlignment
Definition: v8globals.h:58
MUST_USE_RESULT MaybeObject * LookupSymbol(Vector< const char > str)
Definition: heap.cc:5434
bool SetUp(bool create_heap_objects)
Definition: heap.cc:6139
ThreadManager * thread_manager()
Definition: isolate.h:882
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)
static bool IsEnabled()
Definition: snapshot.h:49
MUST_USE_RESULT MaybeObject * AllocateBlockContext(JSFunction *function, Context *previous, ScopeInfo *info)
Definition: heap.cc:5053
int SizeFromMap(Map *map)
Definition: objects-inl.h:2954
static const int kMaxPreAllocatedPropertyFields
Definition: objects.h:5118
intptr_t CommittedMemoryExecutable()
Definition: heap.cc:215
#define CHECK(condition)
Definition: checks.h:56
ObjectIterator * next()
Definition: heap.cc:6498
void set_is_undetectable()
Definition: objects.h:4753
static void Iterate(ObjectVisitor *visitor)
Definition: serialize.cc:1118
static const int kSize
Definition: objects.h:8152
void VisitPointers(Object **start, Object **end)
Definition: heap.cc:1042
#define STRING_TYPE_LIST(V)
Definition: objects.h:325
MUST_USE_RESULT MaybeObject * CopyJSObject(JSObject *source)
Definition: heap.cc:4254
void set_first(String *first, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:2577
MUST_USE_RESULT MaybeObject * Copy()
Definition: objects.cc:5130
static ExternalAsciiString * cast(Object *obj)
static const int kMaxSize
Definition: objects.h:7605
static const int kPageSize
Definition: spaces.h:711
void init_back_pointer(Object *undefined)
Definition: objects-inl.h:3769
void set_foreign_address(Address value)
Definition: objects-inl.h:4581
friend class GCTracer
Definition: heap.h:2158
void SeqTwoByteStringSet(int index, uint16_t value)
Definition: objects-inl.h:2536
static Code * cast(Object *obj)
virtual const uint16_t * data() const =0
MUST_USE_RESULT MaybeObject * AllocateInternalSymbol(unibrow::CharacterStream *buffer, int chars, uint32_t hash_field)
Definition: heap.cc:4514
static bool IsAtEnd(Address addr)
Definition: spaces.h:1787
void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback)
Definition: heap.cc:5537
static PolymorphicCodeCache * cast(Object *obj)
MUST_USE_RESULT MaybeObject * AllocateJSArrayWithElements(FixedArrayBase *array_base, ElementsKind elements_kind, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4125
ArrayStorageAllocationMode
Definition: heap.h:436
static const int kSize
Definition: objects.h:6016
virtual Object * RetainAs(Object *object)=0
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:971
StoreBuffer * store_buffer()
Definition: heap.h:1545
static const byte kOther
Definition: objects.h:7970
MUST_USE_RESULT MaybeObject * AllocateHeapNumber(double value, PretenureFlag pretenure)
Definition: heap.cc:2483
static Smi * cast(Object *object)
void set_function_token_position(int function_token_position)
static const int kInvalidEnumCache
Definition: objects.h:5121
#define STRING_TYPE_ELEMENT(type, size, name, camel_name)
#define V8_INFINITY
Definition: globals.h:32
static bool IsAscii(const char *chars, int length)
Definition: objects.h:7443
static MUST_USE_RESULT MaybeObject * InitializeIntrinsicFunctionNames(Heap *heap, Object *dictionary)
Definition: runtime.cc:13235
bool CollectGarbage(AllocationSpace space, GarbageCollector collector, const char *gc_reason, const char *collector_reason)
Definition: heap.cc:577
void set_closure(JSFunction *closure)
Definition: contexts.h:308
static MarkBit MarkBitFrom(Address addr)
StackGuard * stack_guard()
Definition: isolate.h:834
MUST_USE_RESULT MaybeObject * AllocateWithContext(JSFunction *function, Context *previous, JSObject *extension)
Definition: heap.cc:5036
void set_dictionary_map(bool value)
Definition: objects-inl.h:3123
void Free(MemoryChunk *chunk)
Definition: spaces.cc:598
void set_size(int value)
intptr_t * lo_space_size
Definition: heap.h:2189
MUST_USE_RESULT MaybeObject * CopyFixedArrayWithMap(FixedArray *src, Map *map)
Definition: heap.cc:4711
uint8_t byte
Definition: globals.h:156
#define UPDATE_COUNTERS_FOR_SPACE(space)
GlobalObject * global_object()
Definition: contexts.h:328
Object * InObjectPropertyAtPut(int index, Object *value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:1611
MUST_USE_RESULT MaybeObject * AllocateConsString(String *first, String *second)
Definition: heap.cc:3225
static Struct * cast(Object *that)
void InitializeBody(int object_size, Object *value)
Definition: objects-inl.h:4562
void RepairFreeListsAfterBoot()
Definition: heap.cc:439
MUST_USE_RESULT MaybeObject * NumberToString(Object *number, bool check_number_string_cache=true)
Definition: heap.cc:3009
static const int kMinLength
Definition: objects.h:7666
UnicodeCache * unicode_cache()
Definition: isolate.h:870
T ** location() const
Definition: handles.h:75
String *(* ExternalStringTableUpdaterCallback)(Heap *heap, Object **pointer)
Definition: heap.h:262
String * GetKey(int descriptor_number)
Definition: objects-inl.h:2093
uintptr_t real_jslimit()
Definition: execution.h:223
static const int kEndMarker
Definition: heap.h:2174
bool IdleNotification(int hint)
Definition: heap.cc:5139
MUST_USE_RESULT MaybeObject * AllocateStringFromAscii(Vector< const char > str, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4412
virtual size_t length() const =0
#define UNREACHABLE()
Definition: checks.h:50
void EnsureHeapIsIterable()
Definition: heap.cc:5109
static const int kArgumentsObjectSizeStrict
Definition: heap.h:898
T * start() const
Definition: utils.h:390
MUST_USE_RESULT MaybeObject * AllocateUninitializedFixedDoubleArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4866
bool PostGarbageCollectionProcessing(GarbageCollector collector)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:238
bool always_allocate()
Definition: heap.h:531
void set_global_object(GlobalObject *object)
Definition: contexts.h:333
static const int kMaxLength
Definition: objects.h:3675
const char * IntToCString(int n, Vector< char > buffer)
Definition: conversions.cc:123
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)
void set_unchecked(int index, Smi *value)
Definition: objects-inl.h:1910
void Register(StaticVisitorBase::VisitorId id, Callback callback)
intptr_t CommittedMemory()
Definition: spaces.h:1491
static bool IsMarked(HeapObject *object)
Definition: heap.h:2732
#define MUST_USE_RESULT
Definition: globals.h:346
bool Contains(Address a)
Definition: spaces-inl.h:178
void IteratePointersToNewSpace(ObjectSlotCallback callback)
#define HEAP_PROFILE(heap, call)
Definition: heap-profiler.h:39
void RemoveGCEpilogueCallback(GCEpilogueCallback callback)
Definition: heap.cc:6384
static SlicedString * cast(Object *obj)
Address ToSpaceEnd()
Definition: spaces.h:2238
void SetFlag(int flag)
Definition: spaces.h:421
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
intptr_t CommittedMemory()
Definition: spaces.h:2156
int pre_allocated_property_fields()
Definition: objects-inl.h:2949
void set_expected_nof_properties(int value)
Address ToSpaceStart()
Definition: spaces.h:2237
void set_instruction_size(int value)
Context * native_context()
Definition: contexts.cc:58
void InitializeBody(int object_size)
Definition: objects-inl.h:1674
virtual intptr_t SizeOfObjects()
Definition: spaces.h:1515
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2216
static const int kStoreBufferSize
Definition: store-buffer.h:85
static const uchar kMaxNonSurrogateCharCode
Definition: unicode.h:133
static bool IsValid(intptr_t value)
Definition: objects-inl.h:1059
void set_resource(const Resource *buffer)
Definition: objects-inl.h:2618
#define MAKE_CASE(NAME, Name, name)
void CollectAllAvailableGarbage(const char *gc_reason=NULL)
Definition: heap.cc:548
bool ConfigureHeapDefault()
Definition: heap.cc:5903
PagedSpace * paged_space(int idx)
Definition: heap.h:512
void set_aliased_context_slot(int count)
ElementsKind GetElementsKind()
Definition: objects-inl.h:4776
static const int kNoGCFlags
Definition: heap.h:1081
MUST_USE_RESULT MaybeObject * AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4827
const int kPointerSize
Definition: globals.h:220
virtual intptr_t Size()
Definition: spaces.h:2133
MemoryAllocator * memory_allocator()
Definition: isolate.h:845
MUST_USE_RESULT MaybeObject * AllocateInitialMap(JSFunction *fun)
Definition: heap.cc:3901
static Oddball * cast(Object *obj)
static Address & Address_at(Address addr)
Definition: v8memory.h:71
MUST_USE_RESULT MaybeObject * AllocateForeign(Address address, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3102
const char * DoubleToCString(double v, Vector< char > buffer)
Definition: conversions.cc:68
intptr_t OffsetFrom(T x)
Definition: utils.h:126
static UnseededNumberDictionary * cast(Object *obj)
Definition: objects.h:3284
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:7220
void CheckpointObjectStats()
Definition: heap.cc:7302
MUST_USE_RESULT MaybeObject * AllocateExternalArray(int length, ExternalArrayType array_type, void *external_pointer, PretenureFlag pretenure)
Definition: heap.cc:3572
#define CONSTANT_SYMBOL_ELEMENT(name, contents)
intptr_t * cell_space_capacity
Definition: heap.h:2188
bool IsAligned(T value, U alignment)
Definition: utils.h:206
intptr_t * memory_allocator_size
Definition: heap.h:2195
T Remove(int i)
Definition: list-inl.h:116
static SeqAsciiString * cast(Object *obj)
void set_inobject_properties(int value)
Definition: objects-inl.h:2995
void set_hash_field(uint32_t value)
Definition: objects-inl.h:2411
friend class Page
Definition: heap.h:2161
void Iterate(ObjectVisitor *v)
Definition: isolate.cc:475
GlobalHandles * global_handles()
Definition: isolate.h:880
intptr_t Available()
Definition: spaces.h:2162
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1473
MUST_USE_RESULT MaybeObject * AllocatePolymorphicCodeCache()
Definition: heap.cc:2128
intptr_t * code_space_capacity
Definition: heap.h:2184
void VisitPointer(Object **p)
Definition: heap.cc:1040
static void Enter(Heap *heap, String *key_string, Object *key_pattern, FixedArray *value_array, ResultsCacheType type)
Definition: heap.cc:2847
void Update(Map *map, String *name, int field_offset)
Definition: heap.cc:7107
const uint32_t kShortcutTypeMask
Definition: objects.h:511
void ReserveSpace(int *sizes, Address *addresses)
Definition: heap.cc:696
static Handle< Object > SetLocalPropertyIgnoreAttributes(Handle< JSObject > object, Handle< String > key, Handle< Object > value, PropertyAttributes attributes)
Definition: objects.cc:2952
void set_end_position(int value)
int length() const
Definition: utils.h:384
OldSpace * old_pointer_space()
Definition: heap.h:506
bool ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size)
Definition: heap.cc:5833
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
static Mutex * CreateMutex()
intptr_t * map_space_size
Definition: heap.h:2185
#define LUMP_OF_MEMORY
static double TimeCurrentMillis()
static FixedDoubleArray * cast(Object *obj)
MUST_USE_RESULT MaybeObject * AllocateTypeFeedbackInfo()
Definition: heap.cc:2144
bool CreateApiObjects()
Definition: heap.cc:2542
size_t size() const
Definition: spaces.h:519
GCType
Definition: v8.h:2748
HeapState gc_state()
Definition: heap.h:1326
bool IsTwoByteRepresentation()
Definition: objects-inl.h:296
static const int kSize
Definition: objects.h:5139
#define SYMBOL_LIST(V)
Definition: heap.h:163
void set_age_mark(Address mark)
Definition: spaces.h:2189
void IterateAllRoots(ObjectVisitor *v)
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:717
bool contains(Address address)
Definition: spaces.h:838
static const int kMinLength
Definition: objects.h:7717
OldSpace * code_space()
Definition: heap.h:508
static const int kMakeHeapIterableMask
Definition: heap.h:1088
MUST_USE_RESULT MaybeObject * AllocateJSArrayAndStorage(ElementsKind elements_kind, int length, int capacity, ArrayStorageAllocationMode mode=DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4079
void EnsureSpace(intptr_t space_needed)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:590
void set_kind(byte kind)
Definition: objects-inl.h:1463
#define V8_PTR_PREFIX
Definition: globals.h:181
static const int kNextFunctionLinkOffset
Definition: objects.h:6190
bool InToSpace(Object *object)
Definition: heap-inl.h:307
bool UncommitFromSpace()
Definition: heap.h:547
void CopyFrom(const CodeDesc &desc)
Definition: objects.cc:8291
static int SizeFor(int length)
Definition: objects.h:2434
static int IterateBody(Map *map, HeapObject *obj)
void set_start_position_and_type(int value)
WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation &)
Definition: objects-inl.h:1831
bool IsPowerOf2(T x)
Definition: utils.h:50
void set_resource(const Resource *buffer)
Definition: objects-inl.h:2650
PropertyDetails GetDetails(int descriptor_number)
Definition: objects-inl.h:2129
void GarbageCollectionPrologue()
Definition: heap.cc:403
void Iterate(ObjectVisitor *v)
Definition: v8threads.cc:387
void Iterate(ObjectVisitor *v)
bool is_call_stub()
Definition: objects.h:4301
bool HasBeenSetUp()
Definition: heap.cc:234
byte * relocation_start()
Definition: objects-inl.h:4675
LargeObjectSpace * lo_space()
Definition: heap.h:511
static ScopeInfo * Empty()
Definition: scopeinfo.cc:152
const Address kFromSpaceZapValue
Definition: v8globals.h:82
bool ToSpaceContains(Address address)
Definition: spaces.h:2240
MUST_USE_RESULT MaybeObject * AllocateJSMessageObject(String *type, JSArray *arguments, int start_position, int end_position, Object *script, Object *stack_trace, Object *stack_frames)
Definition: heap.cc:3156
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:838
static MUST_USE_RESULT MaybeObject * Allocate(int at_least_space_for)
Callback GetVisitorById(StaticVisitorBase::VisitorId id)
MUST_USE_RESULT MaybeObject * AllocateExternalStringFromTwoByte(const ExternalTwoByteString::Resource *resource)
Definition: heap.cc:3463
MUST_USE_RESULT MaybeObject * AllocatePartialMap(InstanceType instance_type, int instance_size)
Definition: heap.cc:2061
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false)
Definition: heap.cc:3594
void PerformScavenge()
Definition: heap.cc:652
static Object * Lookup(Heap *heap, String *key_string, Object *key_pattern, ResultsCacheType type)
Definition: heap.cc:2814
virtual bool SkipObject(HeapObject *object)=0
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
DescriptorLookupCache * descriptor_lookup_cache()
Definition: isolate.h:857
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1154
void set_check_type(CheckType value)
Definition: objects-inl.h:3374
static JSMessageObject * cast(Object *obj)
Definition: objects-inl.h:4634
static const int kAbortIncrementalMarkingMask
Definition: heap.h:1084
static const int kNonWeakFieldsEndOffset
Definition: objects.h:6189
void RemoveGCPrologueCallback(GCPrologueCallback callback)
Definition: heap.cc:6364
Vector< const char > CStrVector(const char *data)
Definition: utils.h:526
void FreeQueuedChunks()
Definition: heap.cc:7226
CellSpace * cell_space()
Definition: heap.h:510
LazyDynamicInstance< Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait >::type LazyMutex
Definition: platform.h:583
static Local< Context > ToLocal(v8::internal::Handle< v8::internal::Context > obj)
intptr_t CommittedMemory()
Definition: heap.cc:203
Object * GetNumberStringCache(Object *number)
Definition: heap.cc:2969
intptr_t SizeOfObjects()
Definition: heap.cc:429
MUST_USE_RESULT MaybeObject * AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure)
Definition: heap.cc:4904
static int SizeFor(int length)
Definition: objects.h:2353
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:525
static const int kMaxSize
Definition: objects.h:7553
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor *v)
Context * context()
Definition: isolate.h:520
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
void RecordWrites(Address address, int start, int len)
Definition: heap-inl.h:336
static SeqTwoByteString * cast(Object *obj)
static const int kSize
Definition: objects.h:7961
static JSFunctionResultCache * cast(Object *obj)
void Iterate(ObjectVisitor *v)
void(* GCEpilogueCallback)(GCType type, GCCallbackFlags flags)
Definition: v8.h:2760
intptr_t get_max_alive_after_gc()
Definition: heap.h:1536
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1404
MUST_USE_RESULT MaybeObject * AllocateJSProxy(Object *handler, Object *prototype)
Definition: heap.cc:4140
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1467
void ClearNormalizedMapCaches()
Definition: heap.cc:781
static const int kHeaderSize
Definition: objects.h:2296
static void VisitPointer(Heap *heap, Object **p)
Definition: heap.cc:1572
MUST_USE_RESULT MaybeObject * NumberFromDouble(double value, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3081
bool SlowContains(Address addr)
Definition: spaces.h:2567
static const int kLength
Definition: heap.h:2348
intptr_t * old_data_space_capacity
Definition: heap.h:2182
static int SizeFor(int length)
Definition: objects.h:3635
int signbit(double x)
intptr_t Available()
Definition: heap.cc:222
Space * owner() const
Definition: spaces.h:320
MUST_USE_RESULT MaybeObject * AllocateArgumentsObject(Object *callee, int length)
Definition: heap.cc:3827
int Lookup(Map *map, String *name)
Definition: heap.cc:7095
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:874
void set_instance_type(InstanceType value)
Definition: objects-inl.h:3014
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V)
Definition: objects.h:675
static HeapNumber * cast(Object *obj)
static void WriteToFlat(String *source, sinkchar *sink, int from, int to)
Definition: objects.cc:6891
int get_min_in_mutator()
Definition: heap.h:1539
intptr_t Capacity()
Definition: spaces.h:2150
int get_max_gc_pause()
Definition: heap.h:1533
static StringDictionary * cast(Object *obj)
Definition: objects.h:3182
void set_value(double value)
Definition: objects-inl.h:1203
MUST_USE_RESULT MaybeObject * CopyFixedArray(FixedArray *src)
Definition: heap-inl.h:176
virtual size_t length() const =0
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:5740
static const int kLengthOffset
Definition: objects.h:2295
static double nan_value()
static const int kSize
Definition: objects.h:1350
MUST_USE_RESULT MaybeObject * ReinitializeJSReceiver(JSReceiver *object, InstanceType type, int size)
Definition: heap.cc:4326
MUST_USE_RESULT MaybeObject * AllocateAccessorPair()
Definition: heap.cc:2133
bool is_null() const
Definition: handles.h:87
MUST_USE_RESULT MaybeObject * AllocateCatchContext(JSFunction *function, Context *previous, String *name, Object *thrown_object)
Definition: heap.cc:5015
const uint32_t kFreeListZapValue
Definition: v8globals.h:85
#define STRUCT_LIST(V)
Definition: objects.h:448
static uint32_t RandomPrivate(Isolate *isolate)
Definition: v8.cc:181
static const int kArgumentsLengthIndex
Definition: heap.h:901
#define CODE_KIND_LIST(V)
Definition: objects.h:4187
static int SizeFor(int length)
Definition: objects.h:7548
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1095
const intptr_t kObjectAlignment
Definition: v8globals.h:44
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:1806
void RecordStats(HeapStats *stats)
MUST_USE_RESULT MaybeObject * AllocateScopeInfo(int length)
Definition: heap.cc:5071
bool LookupSymbolIfExists(String *str, String **symbol)
Definition: heap.cc:5515
static JSGlobalPropertyCell * cast(Object *obj)
MUST_USE_RESULT MaybeObject * NumberFromUint32(uint32_t value, PretenureFlag pretenure=NOT_TENURED)
Definition: heap-inl.h:240
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
bool Contains(Address addr)
Definition: heap.cc:5367
uint16_t uc16
Definition: globals.h:259
MUST_USE_RESULT MaybeObject * AllocateUninitializedFixedArray(int length)
Definition: heap.cc:4836
void set_extension(Object *object)
Definition: contexts.h:319
static const int kStartMarker
Definition: heap.h:2173
void set_bit_field(byte value)
Definition: objects-inl.h:3034
static TypeFeedbackCells * cast(Object *obj)
static int SizeFor(int length)
Definition: objects.h:7600
static const int kSize
Definition: objects.h:6543
virtual const char * data() const =0
MUST_USE_RESULT MaybeObject * Initialize(const char *to_string, Object *to_number, byte kind)
Definition: objects.cc:7799
void Iterate(v8::internal::ObjectVisitor *v)
NewSpacePage * next_page() const
Definition: spaces.h:1760
void MemsetPointer(T **dest, U *value, int counter)
Definition: v8utils.h:149
void set_owner(Space *space)
Definition: spaces.h:329
LoggingAndProfiling
Definition: heap.cc:1649
MUST_USE_RESULT MaybeObject * AllocateJSObjectFromMap(Map *map, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4002
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:7274
static MUST_USE_RESULT MaybeObject * Allocate(int number_of_descriptors, int slack=0)
Definition: objects.cc:6032
static void UpdateReferencesForScavengeGC()
void Set(int index, uint16_t value)
Definition: objects-inl.h:2470
static const int kNotFound
Definition: heap.h:2353
static const int kRegExpResultsCacheSize
Definition: heap.h:2627
#define HEAP
Definition: isolate.h:1433
void PrintPID(const char *format,...)
Definition: v8utils.cc:56
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static const byte kNull
Definition: objects.h:7967
static const int kBodyOffset
Definition: spaces.h:509
MUST_USE_RESULT MaybeObject * LookupSingleCharacterStringFromCode(uint16_t code)
Definition: heap.cc:3492
InstanceType instance_type()
Definition: objects-inl.h:3009
static void CopyBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:379
MUST_USE_RESULT MaybeObject * AllocateJSGlobalPropertyCell(Object *value)
Definition: heap.cc:2519
static bool ShouldZapGarbage()
Definition: heap.h:1287
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
void USE(T)
Definition: globals.h:289
void set_size(size_t size)
Definition: spaces.h:521
MUST_USE_RESULT MaybeObject * AllocateFixedDoubleArrayWithHoles(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4883
MUST_USE_RESULT MaybeObject * AllocateRawFixedArray(int length)
Definition: heap.cc:4696
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
Counters * counters()
Definition: isolate.h:819
ScavengeVisitor(Heap *heap)
Definition: heap.cc:1038
static const unsigned kMaxAsciiCharCodeU
Definition: objects.h:7328
static const int kArgumentsCalleeIndex
Definition: heap.h:903
const int kSmiTag
Definition: v8.h:4014
MUST_USE_RESULT MaybeObject * AllocateHashTable(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4939
virtual void Rewind()=0
static FixedArray * cast(Object *obj)
static const unsigned kMaxOneByteChar
Definition: unicode.h:164
static const int kHeaderSize
Definition: objects.h:2173
Object * FindCodeObject(Address a)
Definition: heap.cc:1029
MapSpace * map_space()
Definition: heap.h:509
void set_previous(Context *context)
Definition: contexts.h:315
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:5947
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor *v)
intptr_t * old_pointer_space_capacity
Definition: heap.h:2180
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:38
Logger * logger()
Definition: isolate.h:828
StaticResource< Utf8Decoder > * utf8_decoder()
Definition: scanner.h:150
Object * GetCallbacksObject(int descriptor_number)
Definition: objects-inl.h:2151
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
void set_instance_size(int value)
Definition: objects-inl.h:2987
Object * get(int index)
Definition: objects-inl.h:1737
static VisitorId GetVisitorId(int instance_type, int instance_size)
void ClearJSFunctionResultCaches()
Definition: heap.cc:757
GCCallbackFlags
Definition: v8.h:2754
void RecordStats(HeapStats *stats, bool take_snapshot=false)
Definition: heap.cc:5910
void ZapFromSpace()
Definition: heap.cc:5523
void set_formal_parameter_count(int value)
static const int kMaxLength
Definition: objects.h:2453
String * TryFlattenGetString(PretenureFlag pretenure=NOT_TENURED)
Definition: objects-inl.h:2436
void set_bit_field2(byte value)
Definition: objects-inl.h:3044
void CopyFrom(VisitorDispatchTable *other)
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:3558
static int GetLastError()
MUST_USE_RESULT MaybeObject * AllocateSharedFunctionInfo(Object *name)
Definition: heap.cc:3114
bool AdvanceSweepers(int step_size)
Definition: heap.h:1562
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static NormalizedMapCache * cast(Object *obj)
static const int kMaxLength
Definition: objects.h:7386
intptr_t * map_space_capacity
Definition: heap.h:2186
static int SizeFor(int body_size)
Definition: objects.h:4487
void set_stress_deopt_counter(int counter)
static intptr_t MaxVirtualMemory()
static const intptr_t kAllocatedThreshold
static const int kCapacityMask
Definition: heap.h:2349
static const byte kFalse
Definition: objects.h:7963
static void ScavengeObject(HeapObject **p, HeapObject *object)
Definition: heap-inl.h:410
Definition: objects.h:6953
void remove(HeapObject **target, int *size)
Definition: heap.h:342
bool is_keyed_call_stub()
Definition: objects.h:4302
void set_visitor_id(int visitor_id)
Definition: objects-inl.h:2933
bool IsSweepingComplete()
Definition: heap.h:1557
void set_length(int value)
void set_this_property_assignments_count(int value)
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1046
void IterateBuiltins(ObjectVisitor *v)
Definition: builtins.cc:1687
void CopyChars(sinkchar *dest, const sourcechar *src, int chars)
Definition: v8utils.h:211
static VisitorDispatchTable< ScavengingCallback > * GetTable()
Definition: heap.cc:1715
T Min(T a, T b)
Definition: utils.h:229
intptr_t * memory_allocator_capacity
Definition: heap.h:2196
static ConsString * cast(Object *obj)
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2519
void set_offset(int offset)
Definition: code-stubs.h:662
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
static FixedArrayBase * cast(Object *object)
Definition: objects-inl.h:1731
void set_flags(Flags flags)
Definition: objects-inl.h:3153
intptr_t Capacity()
Definition: heap.cc:191
MUST_USE_RESULT MaybeObject * AllocateStruct(InstanceType type)
Definition: heap.cc:5080
void EnterDirectlyIntoStoreBuffer(Address addr)
intptr_t * old_data_space_size
Definition: heap.h:2181
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2650
GCTracer * tracer()
Definition: heap.h:1522
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
NewSpace * new_space()
Definition: heap.h:505
MUST_USE_RESULT MaybeObject * AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind=TERMINAL_FAST_ELEMENTS_KIND)
Definition: heap.cc:2085
#define ARRAY_SIZE(a)
Definition: globals.h:281
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
const int kCharSize
Definition: globals.h:215
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
intptr_t MaxExecutableSize()
Definition: heap.h:478
static const byte kTrue
Definition: objects.h:7964
static const int kMaxLength
Definition: objects.h:7556
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes, AllocationSpace space, AllocationSpace retry_space)
Definition: heap-inl.h:186
static const int kSize
Definition: objects.h:8274
static MUST_USE_RESULT MaybeObject * Allocate(int at_least_space_for, MinimumCapacity capacity_option=USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure=NOT_TENURED)
void InitializeDescriptors(DescriptorArray *descriptors)
Definition: objects-inl.h:3608
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:317
void PrintShortHeapStatistics()
Definition: heap.cc:327
static JSObject * cast(Object *obj)
static const int kHashMask
Definition: heap.h:2351
AllocationSpace TargetSpaceId(InstanceType type)
Definition: heap-inl.h:354
uint32_t RoundUpToPowerOf2(uint32_t x)
Definition: utils.h:186
OldSpace * old_data_space()
Definition: heap.h:507
MUST_USE_RESULT MaybeObject * AllocateRawTwoByteString(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4626
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1948
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1541
MUST_USE_RESULT MaybeObject * AllocateFunction(Map *function_map, SharedFunctionInfo *shared, Object *prototype, PretenureFlag pretenure=TENURED)
Definition: heap.cc:3812
int FastD2I(double x)
Definition: conversions.h:69
void UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1367
void set_initial_map(Map *value)
Definition: objects-inl.h:4410
static const int kAlignedSize
Definition: objects.h:3744
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2290
AllocationSpace identity()
Definition: spaces.h:788
void set_unused_property_fields(int value)
Definition: objects-inl.h:3024
#define LAZY_MUTEX_INITIALIZER
Definition: platform.h:585
static const int kIsExtensible
Definition: objects.h:5176
MUST_USE_RESULT MaybeObject * AllocateStringFromTwoByte(Vector< const uc16 > str, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4470
static const int kNonCodeObjectAreaSize
Definition: spaces.h:714
static const int kEntriesPerBucket
Definition: heap.h:2352
static const int kPointerFieldsBeginOffset
Definition: objects.h:5143
void set_bit_field3(int value)
void EnsureFromSpaceIsCommitted()
Definition: heap.cc:743
void InitializeBody(Map *map, Object *pre_allocated_value, Object *filler_value)
Definition: objects-inl.h:1625
MUST_USE_RESULT MaybeObject * AllocateAliasedArgumentsEntry(int slot)
Definition: heap.cc:2156
MemoryChunk * next_chunk() const
Definition: spaces.h:314
void set_parent(String *parent, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:2557
const int MB
Definition: globals.h:208
static JSFunction * cast(Object *obj)