v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
heap.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
43 #include "natives.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
46 #include "once.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
49 #include "snapshot.h"
50 #include "store-buffer.h"
52 #include "v8conversions.h"
53 #include "v8threads.h"
54 #include "v8utils.h"
55 #include "vm-state-inl.h"
56 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
59 #endif
60 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
61 #include "regexp-macro-assembler.h"
63 #endif
64 
65 namespace v8 {
66 namespace internal {
67 
68 
69 Heap::Heap()
70  : isolate_(NULL),
71  code_range_size_(kIs64BitArch ? 512 * MB : 0),
72 // semispace_size_ should be a power of 2 and old_generation_size_ should be
73 // a multiple of Page::kPageSize.
74  reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
75  max_semispace_size_(8 * (kPointerSize / 4) * MB),
76  initial_semispace_size_(Page::kPageSize),
77  max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
78  max_executable_size_(256ul * (kPointerSize / 4) * MB),
79 // Variables set based on semispace_size_ and old_generation_size_ in
80 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
81 // Will be 4 * reserved_semispace_size_ to ensure that young
82 // generation can be aligned to its size.
83  maximum_committed_(0),
84  survived_since_last_expansion_(0),
85  sweep_generation_(0),
86  always_allocate_scope_depth_(0),
87  linear_allocation_scope_depth_(0),
88  contexts_disposed_(0),
89  global_ic_age_(0),
90  flush_monomorphic_ics_(false),
91  scan_on_scavenge_pages_(0),
92  new_space_(this),
93  old_pointer_space_(NULL),
94  old_data_space_(NULL),
95  code_space_(NULL),
96  map_space_(NULL),
97  cell_space_(NULL),
98  property_cell_space_(NULL),
99  lo_space_(NULL),
100  gc_state_(NOT_IN_GC),
101  gc_post_processing_depth_(0),
102  ms_count_(0),
103  gc_count_(0),
104  remembered_unmapped_pages_index_(0),
105  unflattened_strings_length_(0),
106 #ifdef DEBUG
107  allocation_timeout_(0),
108 #endif // DEBUG
109  new_space_high_promotion_mode_active_(false),
110  old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
111  size_of_old_gen_at_last_old_space_gc_(0),
112  external_allocation_limit_(0),
113  amount_of_external_allocated_memory_(0),
114  amount_of_external_allocated_memory_at_last_global_gc_(0),
115  old_gen_exhausted_(false),
116  inline_allocation_disabled_(false),
117  store_buffer_rebuilder_(store_buffer()),
118  hidden_string_(NULL),
119  gc_safe_size_of_old_object_(NULL),
120  total_regexp_code_generated_(0),
121  tracer_(NULL),
122  young_survivors_after_last_gc_(0),
123  high_survival_rate_period_length_(0),
124  low_survival_rate_period_length_(0),
125  survival_rate_(0),
126  previous_survival_rate_trend_(Heap::STABLE),
127  survival_rate_trend_(Heap::STABLE),
128  max_gc_pause_(0.0),
129  total_gc_time_ms_(0.0),
130  max_alive_after_gc_(0),
131  min_in_mutator_(kMaxInt),
132  alive_after_last_gc_(0),
133  last_gc_end_timestamp_(0.0),
134  marking_time_(0.0),
135  sweeping_time_(0.0),
136  mark_compact_collector_(this),
137  store_buffer_(this),
138  marking_(this),
139  incremental_marking_(this),
140  number_idle_notifications_(0),
141  last_idle_notification_gc_count_(0),
142  last_idle_notification_gc_count_init_(false),
143  mark_sweeps_since_idle_round_started_(0),
144  gc_count_at_last_idle_gc_(0),
145  scavenges_since_last_idle_round_(kIdleScavengeThreshold),
146  full_codegen_bytes_generated_(0),
147  crankshaft_codegen_bytes_generated_(0),
148  gcs_since_last_deopt_(0),
149 #ifdef VERIFY_HEAP
150  no_weak_object_verification_scope_depth_(0),
151 #endif
152  allocation_sites_scratchpad_length_(0),
153  promotion_queue_(this),
154  configured_(false),
155  external_string_table_(this),
156  chunks_queued_for_free_(NULL),
157  gc_callbacks_depth_(0) {
158  // Allow build-time customization of the max semispace size. Building
159  // V8 with snapshots and a non-default max semispace size is much
160  // easier if you can define it as part of the build environment.
161 #if defined(V8_MAX_SEMISPACE_SIZE)
162  max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
163 #endif
164 
165  // Ensure old_generation_size_ is a multiple of kPageSize.
167 
168  intptr_t max_virtual = OS::MaxVirtualMemory();
169 
170  if (max_virtual > 0) {
171  if (code_range_size_ > 0) {
172  // Reserve no more than 1/8 of the memory for the code range.
173  code_range_size_ = Min(code_range_size_, max_virtual >> 3);
174  }
175  }
176 
177  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
178  native_contexts_list_ = NULL;
179  array_buffers_list_ = Smi::FromInt(0);
180  allocation_sites_list_ = Smi::FromInt(0);
181  // Put a dummy entry in the remembered pages so we can find the list the
182  // minidump even if there are no real unmapped pages.
183  RememberUnmappedPage(NULL, false);
184 
185  ClearObjectStats(true);
186 }
187 
188 
189 intptr_t Heap::Capacity() {
190  if (!HasBeenSetUp()) return 0;
191 
192  return new_space_.Capacity() +
193  old_pointer_space_->Capacity() +
194  old_data_space_->Capacity() +
195  code_space_->Capacity() +
196  map_space_->Capacity() +
197  cell_space_->Capacity() +
198  property_cell_space_->Capacity();
199 }
200 
201 
203  if (!HasBeenSetUp()) return 0;
204 
205  return new_space_.CommittedMemory() +
206  old_pointer_space_->CommittedMemory() +
207  old_data_space_->CommittedMemory() +
208  code_space_->CommittedMemory() +
209  map_space_->CommittedMemory() +
210  cell_space_->CommittedMemory() +
211  property_cell_space_->CommittedMemory() +
212  lo_space_->Size();
213 }
214 
215 
217  if (!HasBeenSetUp()) return 0;
218 
219  return new_space_.CommittedPhysicalMemory() +
220  old_pointer_space_->CommittedPhysicalMemory() +
221  old_data_space_->CommittedPhysicalMemory() +
222  code_space_->CommittedPhysicalMemory() +
223  map_space_->CommittedPhysicalMemory() +
224  cell_space_->CommittedPhysicalMemory() +
225  property_cell_space_->CommittedPhysicalMemory() +
226  lo_space_->CommittedPhysicalMemory();
227 }
228 
229 
231  if (!HasBeenSetUp()) return 0;
232 
233  return isolate()->memory_allocator()->SizeExecutable();
234 }
235 
236 
238  if (!HasBeenSetUp()) return;
239 
240  intptr_t current_committed_memory = CommittedMemory();
241  if (current_committed_memory > maximum_committed_) {
242  maximum_committed_ = current_committed_memory;
243  }
244 }
245 
246 
247 intptr_t Heap::Available() {
248  if (!HasBeenSetUp()) return 0;
249 
250  return new_space_.Available() +
251  old_pointer_space_->Available() +
252  old_data_space_->Available() +
253  code_space_->Available() +
254  map_space_->Available() +
255  cell_space_->Available() +
256  property_cell_space_->Available();
257 }
258 
259 
261  return old_pointer_space_ != NULL &&
262  old_data_space_ != NULL &&
263  code_space_ != NULL &&
264  map_space_ != NULL &&
265  cell_space_ != NULL &&
266  property_cell_space_ != NULL &&
267  lo_space_ != NULL;
268 }
269 
270 
271 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
272  if (IntrusiveMarking::IsMarked(object)) {
274  }
275  return object->SizeFromMap(object->map());
276 }
277 
278 
279 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
280  const char** reason) {
281  // Is global GC requested?
282  if (space != NEW_SPACE) {
283  isolate_->counters()->gc_compactor_caused_by_request()->Increment();
284  *reason = "GC in old space requested";
285  return MARK_COMPACTOR;
286  }
287 
288  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
289  *reason = "GC in old space forced by flags";
290  return MARK_COMPACTOR;
291  }
292 
293  // Is enough data promoted to justify a global GC?
295  isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
296  *reason = "promotion limit reached";
297  return MARK_COMPACTOR;
298  }
299 
300  // Have allocation in OLD and LO failed?
301  if (old_gen_exhausted_) {
302  isolate_->counters()->
303  gc_compactor_caused_by_oldspace_exhaustion()->Increment();
304  *reason = "old generations exhausted";
305  return MARK_COMPACTOR;
306  }
307 
308  // Is there enough space left in OLD to guarantee that a scavenge can
309  // succeed?
310  //
311  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
312  // for object promotion. It counts only the bytes that the memory
313  // allocator has not yet allocated from the OS and assigned to any space,
314  // and does not count available bytes already in the old space or code
315  // space. Undercounting is safe---we may get an unrequested full GC when
316  // a scavenge would have succeeded.
317  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
318  isolate_->counters()->
319  gc_compactor_caused_by_oldspace_exhaustion()->Increment();
320  *reason = "scavenge might not succeed";
321  return MARK_COMPACTOR;
322  }
323 
324  // Default
325  *reason = NULL;
326  return SCAVENGER;
327 }
328 
329 
330 // TODO(1238405): Combine the infrastructure for --heap-stats and
331 // --log-gc to avoid the complicated preprocessor and flag testing.
332 void Heap::ReportStatisticsBeforeGC() {
333  // Heap::ReportHeapStatistics will also log NewSpace statistics when
334  // compiled --log-gc is set. The following logic is used to avoid
335  // double logging.
336 #ifdef DEBUG
337  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
338  if (FLAG_heap_stats) {
339  ReportHeapStatistics("Before GC");
340  } else if (FLAG_log_gc) {
341  new_space_.ReportStatistics();
342  }
343  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
344 #else
345  if (FLAG_log_gc) {
346  new_space_.CollectStatistics();
347  new_space_.ReportStatistics();
348  new_space_.ClearHistograms();
349  }
350 #endif // DEBUG
351 }
352 
353 
355  if (!FLAG_trace_gc_verbose) return;
356  PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
357  ", available: %6" V8_PTR_PREFIX "d KB\n",
358  isolate_->memory_allocator()->Size() / KB,
359  isolate_->memory_allocator()->Available() / KB);
360  PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
361  ", available: %6" V8_PTR_PREFIX "d KB"
362  ", committed: %6" V8_PTR_PREFIX "d KB\n",
363  new_space_.Size() / KB,
364  new_space_.Available() / KB,
365  new_space_.CommittedMemory() / KB);
366  PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
367  ", available: %6" V8_PTR_PREFIX "d KB"
368  ", committed: %6" V8_PTR_PREFIX "d KB\n",
369  old_pointer_space_->SizeOfObjects() / KB,
370  old_pointer_space_->Available() / KB,
371  old_pointer_space_->CommittedMemory() / KB);
372  PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
373  ", available: %6" V8_PTR_PREFIX "d KB"
374  ", committed: %6" V8_PTR_PREFIX "d KB\n",
375  old_data_space_->SizeOfObjects() / KB,
376  old_data_space_->Available() / KB,
377  old_data_space_->CommittedMemory() / KB);
378  PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
379  ", available: %6" V8_PTR_PREFIX "d KB"
380  ", committed: %6" V8_PTR_PREFIX "d KB\n",
381  code_space_->SizeOfObjects() / KB,
382  code_space_->Available() / KB,
383  code_space_->CommittedMemory() / KB);
384  PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
385  ", available: %6" V8_PTR_PREFIX "d KB"
386  ", committed: %6" V8_PTR_PREFIX "d KB\n",
387  map_space_->SizeOfObjects() / KB,
388  map_space_->Available() / KB,
389  map_space_->CommittedMemory() / KB);
390  PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
391  ", available: %6" V8_PTR_PREFIX "d KB"
392  ", committed: %6" V8_PTR_PREFIX "d KB\n",
393  cell_space_->SizeOfObjects() / KB,
394  cell_space_->Available() / KB,
395  cell_space_->CommittedMemory() / KB);
396  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
397  ", available: %6" V8_PTR_PREFIX "d KB"
398  ", committed: %6" V8_PTR_PREFIX "d KB\n",
399  property_cell_space_->SizeOfObjects() / KB,
400  property_cell_space_->Available() / KB,
401  property_cell_space_->CommittedMemory() / KB);
402  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
403  ", available: %6" V8_PTR_PREFIX "d KB"
404  ", committed: %6" V8_PTR_PREFIX "d KB\n",
405  lo_space_->SizeOfObjects() / KB,
406  lo_space_->Available() / KB,
407  lo_space_->CommittedMemory() / KB);
408  PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
409  ", available: %6" V8_PTR_PREFIX "d KB"
410  ", committed: %6" V8_PTR_PREFIX "d KB\n",
411  this->SizeOfObjects() / KB,
412  this->Available() / KB,
413  this->CommittedMemory() / KB);
414  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
415  static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
416  PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
417 }
418 
419 
420 // TODO(1238405): Combine the infrastructure for --heap-stats and
421 // --log-gc to avoid the complicated preprocessor and flag testing.
422 void Heap::ReportStatisticsAfterGC() {
423  // Similar to the before GC, we use some complicated logic to ensure that
424  // NewSpace statistics are logged exactly once when --log-gc is turned on.
425 #if defined(DEBUG)
426  if (FLAG_heap_stats) {
427  new_space_.CollectStatistics();
428  ReportHeapStatistics("After GC");
429  } else if (FLAG_log_gc) {
430  new_space_.ReportStatistics();
431  }
432 #else
433  if (FLAG_log_gc) new_space_.ReportStatistics();
434 #endif // DEBUG
435 }
436 
437 
438 void Heap::GarbageCollectionPrologue() {
439  { AllowHeapAllocation for_the_first_part_of_prologue;
441  gc_count_++;
442  unflattened_strings_length_ = 0;
443 
444  if (FLAG_flush_code && FLAG_flush_code_incrementally) {
446  }
447 
448 #ifdef VERIFY_HEAP
449  if (FLAG_verify_heap) {
450  Verify();
451  }
452 #endif
453  }
454 
456 
457 #ifdef DEBUG
458  ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
459 
460  if (FLAG_gc_verbose) Print();
461 
462  ReportStatisticsBeforeGC();
463 #endif // DEBUG
464 
466 
467  if (isolate()->concurrent_osr_enabled()) {
469  }
470 }
471 
472 
474  intptr_t total = 0;
475  AllSpaces spaces(this);
476  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
477  total += space->SizeOfObjects();
478  }
479  return total;
480 }
481 
482 
485 
486  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
487  Code* code = Code::cast(object);
488  Code::Kind current_kind = code->kind();
489  if (current_kind == Code::FUNCTION ||
490  current_kind == Code::OPTIMIZED_FUNCTION) {
491  code->ClearInlineCaches(kind);
492  }
493  }
494 }
495 
496 
498  PagedSpaces spaces(this);
499  for (PagedSpace* space = spaces.next();
500  space != NULL;
501  space = spaces.next()) {
502  space->RepairFreeListsAfterBoot();
503  }
504 }
505 
506 
507 void Heap::ProcessPretenuringFeedback() {
508  if (FLAG_allocation_site_pretenuring) {
509  int tenure_decisions = 0;
510  int dont_tenure_decisions = 0;
511  int allocation_mementos_found = 0;
512  int allocation_sites = 0;
513  int active_allocation_sites = 0;
514 
515  // If the scratchpad overflowed, we have to iterate over the allocation
516  // sites list.
517  bool use_scratchpad =
518  allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize;
519 
520  int i = 0;
521  Object* list_element = allocation_sites_list();
522  bool trigger_deoptimization = false;
523  while (use_scratchpad ?
524  i < allocation_sites_scratchpad_length_ :
525  list_element->IsAllocationSite()) {
526  AllocationSite* site = use_scratchpad ?
527  AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
528  AllocationSite::cast(list_element);
529  allocation_mementos_found += site->memento_found_count();
530  if (site->memento_found_count() > 0) {
531  active_allocation_sites++;
532  }
533  if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
534  if (site->GetPretenureMode() == TENURED) {
535  tenure_decisions++;
536  } else {
537  dont_tenure_decisions++;
538  }
539  allocation_sites++;
540  if (use_scratchpad) {
541  i++;
542  } else {
543  list_element = site->weak_next();
544  }
545  }
546 
547  if (trigger_deoptimization) {
549  }
550 
551  FlushAllocationSitesScratchpad();
552 
553  if (FLAG_trace_pretenuring_statistics &&
554  (allocation_mementos_found > 0 ||
555  tenure_decisions > 0 ||
556  dont_tenure_decisions > 0)) {
557  PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
558  "#mementos, #tenure decisions, #donttenure decisions) "
559  "(%s, %d, %d, %d, %d, %d)\n",
560  use_scratchpad ? "use scratchpad" : "use list",
561  allocation_sites,
562  active_allocation_sites,
563  allocation_mementos_found,
564  tenure_decisions,
565  dont_tenure_decisions);
566  }
567  }
568 }
569 
570 
572  // TODO(hpayer): If iterating over the allocation sites list becomes a
573  // performance issue, use a cache heap data structure instead (similar to the
574  // allocation sites scratchpad).
575  Object* list_element = allocation_sites_list();
576  while (list_element->IsAllocationSite()) {
577  AllocationSite* site = AllocationSite::cast(list_element);
578  if (site->deopt_dependent_code()) {
579  site->dependent_code()->MarkCodeForDeoptimization(
580  isolate_,
582  site->set_deopt_dependent_code(false);
583  }
584  list_element = site->weak_next();
585  }
587 }
588 
589 
590 void Heap::GarbageCollectionEpilogue() {
592 
593  // In release mode, we only zap the from space under heap verification.
594  if (Heap::ShouldZapGarbage()) {
595  ZapFromSpace();
596  }
597 
598  // Process pretenuring feedback and update allocation sites.
599  ProcessPretenuringFeedback();
600 
601 #ifdef VERIFY_HEAP
602  if (FLAG_verify_heap) {
603  Verify();
604  }
605 #endif
606 
607  AllowHeapAllocation for_the_rest_of_the_epilogue;
608 
609 #ifdef DEBUG
610  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
611  if (FLAG_print_handles) PrintHandles();
612  if (FLAG_gc_verbose) Print();
613  if (FLAG_code_stats) ReportCodeStatistics("After GC");
614 #endif
615  if (FLAG_deopt_every_n_garbage_collections > 0) {
616  // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
617  // the topmost optimized frame can be deoptimized safely, because it
618  // might not have a lazy bailout point right after its current PC.
619  if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
621  gcs_since_last_deopt_ = 0;
622  }
623  }
624 
626 
627  isolate_->counters()->alive_after_last_gc()->Set(
628  static_cast<int>(SizeOfObjects()));
629 
630  isolate_->counters()->string_table_capacity()->Set(
631  string_table()->Capacity());
632  isolate_->counters()->number_of_symbols()->Set(
633  string_table()->NumberOfElements());
634 
635  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
636  isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
637  static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
638  (crankshaft_codegen_bytes_generated_
639  + full_codegen_bytes_generated_)));
640  }
641 
642  if (CommittedMemory() > 0) {
643  isolate_->counters()->external_fragmentation_total()->AddSample(
644  static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
645 
646  isolate_->counters()->heap_fraction_new_space()->
647  AddSample(static_cast<int>(
648  (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
649  isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
650  static_cast<int>(
651  (old_pointer_space()->CommittedMemory() * 100.0) /
652  CommittedMemory()));
653  isolate_->counters()->heap_fraction_old_data_space()->AddSample(
654  static_cast<int>(
655  (old_data_space()->CommittedMemory() * 100.0) /
656  CommittedMemory()));
657  isolate_->counters()->heap_fraction_code_space()->
658  AddSample(static_cast<int>(
659  (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
660  isolate_->counters()->heap_fraction_map_space()->AddSample(
661  static_cast<int>(
662  (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
663  isolate_->counters()->heap_fraction_cell_space()->AddSample(
664  static_cast<int>(
665  (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
666  isolate_->counters()->heap_fraction_property_cell_space()->
667  AddSample(static_cast<int>(
668  (property_cell_space()->CommittedMemory() * 100.0) /
669  CommittedMemory()));
670  isolate_->counters()->heap_fraction_lo_space()->
671  AddSample(static_cast<int>(
672  (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
673 
674  isolate_->counters()->heap_sample_total_committed()->AddSample(
675  static_cast<int>(CommittedMemory() / KB));
676  isolate_->counters()->heap_sample_total_used()->AddSample(
677  static_cast<int>(SizeOfObjects() / KB));
678  isolate_->counters()->heap_sample_map_space_committed()->AddSample(
679  static_cast<int>(map_space()->CommittedMemory() / KB));
680  isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
681  static_cast<int>(cell_space()->CommittedMemory() / KB));
682  isolate_->counters()->
683  heap_sample_property_cell_space_committed()->
684  AddSample(static_cast<int>(
686  isolate_->counters()->heap_sample_code_space_committed()->AddSample(
687  static_cast<int>(code_space()->CommittedMemory() / KB));
688 
689  isolate_->counters()->heap_sample_maximum_committed()->AddSample(
690  static_cast<int>(MaximumCommittedMemory() / KB));
691  }
692 
693 #define UPDATE_COUNTERS_FOR_SPACE(space) \
694  isolate_->counters()->space##_bytes_available()->Set( \
695  static_cast<int>(space()->Available())); \
696  isolate_->counters()->space##_bytes_committed()->Set( \
697  static_cast<int>(space()->CommittedMemory())); \
698  isolate_->counters()->space##_bytes_used()->Set( \
699  static_cast<int>(space()->SizeOfObjects()));
700 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
701  if (space()->CommittedMemory() > 0) { \
702  isolate_->counters()->external_fragmentation_##space()->AddSample( \
703  static_cast<int>(100 - \
704  (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
705  }
706 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
707  UPDATE_COUNTERS_FOR_SPACE(space) \
708  UPDATE_FRAGMENTATION_FOR_SPACE(space)
709 
718 #undef UPDATE_COUNTERS_FOR_SPACE
719 #undef UPDATE_FRAGMENTATION_FOR_SPACE
720 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
721 
722 #if defined(DEBUG)
723  ReportStatisticsAfterGC();
724 #endif // DEBUG
725 #ifdef ENABLE_DEBUGGER_SUPPORT
726  isolate_->debug()->AfterGarbageCollection();
727 #endif // ENABLE_DEBUGGER_SUPPORT
728 }
729 
730 
732  const char* gc_reason,
733  const v8::GCCallbackFlags gc_callback_flags) {
734  // Since we are ignoring the return value, the exact choice of space does
735  // not matter, so long as we do not specify NEW_SPACE, which would not
736  // cause a full GC.
737  mark_compact_collector_.SetFlags(flags);
738  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
739  mark_compact_collector_.SetFlags(kNoGCFlags);
740 }
741 
742 
743 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
744  // Since we are ignoring the return value, the exact choice of space does
745  // not matter, so long as we do not specify NEW_SPACE, which would not
746  // cause a full GC.
747  // Major GC would invoke weak handle callbacks on weakly reachable
748  // handles, but won't collect weakly reachable objects until next
749  // major GC. Therefore if we collect aggressively and weak handle callback
750  // has been invoked, we rerun major GC to release objects which become
751  // garbage.
752  // Note: as weak callbacks can execute arbitrary code, we cannot
753  // hope that eventually there will be no weak callbacks invocations.
754  // Therefore stop recollecting after several attempts.
755  if (isolate()->concurrent_recompilation_enabled()) {
756  // The optimizing compiler may be unnecessarily holding on to memory.
757  DisallowHeapAllocation no_recursive_gc;
759  }
762  isolate_->compilation_cache()->Clear();
763  const int kMaxNumberOfAttempts = 7;
764  const int kMinNumberOfAttempts = 2;
765  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
766  if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
767  attempt + 1 >= kMinNumberOfAttempts) {
768  break;
769  }
770  }
772  new_space_.Shrink();
773  UncommitFromSpace();
775 }
776 
777 
778 void Heap::EnsureFillerObjectAtTop() {
779  // There may be an allocation memento behind every object in new space.
780  // If we evacuate a not full new space or if we are on the last page of
781  // the new space, then there may be uninitialized memory behind the top
782  // pointer of the new space page. We store a filler object there to
783  // identify the unused space.
784  Address from_top = new_space_.top();
785  Address from_limit = new_space_.limit();
786  if (from_top < from_limit) {
787  int remaining_in_page = static_cast<int>(from_limit - from_top);
788  CreateFillerObjectAt(from_top, remaining_in_page);
789  }
790 }
791 
792 
794  const char* gc_reason,
795  const char* collector_reason,
796  const v8::GCCallbackFlags gc_callback_flags) {
797  // The VM is in the GC state until exiting this function.
798  VMState<GC> state(isolate_);
799 
800 #ifdef DEBUG
801  // Reset the allocation timeout to the GC interval, but make sure to
802  // allow at least a few allocations after a collection. The reason
803  // for this is that we have a lot of allocation sequences and we
804  // assume that a garbage collection will allow the subsequent
805  // allocation attempts to go through.
806  allocation_timeout_ = Max(6, FLAG_gc_interval);
807 #endif
808 
809  EnsureFillerObjectAtTop();
810 
811  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
812  if (FLAG_trace_incremental_marking) {
813  PrintF("[IncrementalMarking] Scavenge during marking.\n");
814  }
815  }
816 
817  if (collector == MARK_COMPACTOR &&
818  !mark_compact_collector()->abort_incremental_marking() &&
819  !incremental_marking()->IsStopped() &&
820  !incremental_marking()->should_hurry() &&
821  FLAG_incremental_marking_steps) {
822  // Make progress in incremental marking.
823  const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
824  incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
826  if (!incremental_marking()->IsComplete()) {
827  if (FLAG_trace_incremental_marking) {
828  PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
829  }
830  collector = SCAVENGER;
831  collector_reason = "incremental marking delaying mark-sweep";
832  }
833  }
834 
835  bool next_gc_likely_to_collect_more = false;
836 
837  { GCTracer tracer(this, gc_reason, collector_reason);
838  ASSERT(AllowHeapAllocation::IsAllowed());
839  DisallowHeapAllocation no_allocation_during_gc;
840  GarbageCollectionPrologue();
841  // The GC count was incremented in the prologue. Tell the tracer about
842  // it.
843  tracer.set_gc_count(gc_count_);
844 
845  // Tell the tracer which collector we've selected.
846  tracer.set_collector(collector);
847 
848  {
849  HistogramTimerScope histogram_timer_scope(
850  (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
851  : isolate_->counters()->gc_compactor());
852  next_gc_likely_to_collect_more =
853  PerformGarbageCollection(collector, &tracer, gc_callback_flags);
854  }
855 
856  GarbageCollectionEpilogue();
857  }
858 
859  // Start incremental marking for the next cycle. The heap snapshot
860  // generator needs incremental marking to stay off after it aborted.
861  if (!mark_compact_collector()->abort_incremental_marking() &&
862  incremental_marking()->IsStopped() &&
863  incremental_marking()->WorthActivating() &&
866  }
867 
868  return next_gc_likely_to_collect_more;
869 }
870 
871 
873  if (isolate()->concurrent_recompilation_enabled()) {
874  // Flush the queued recompilation tasks.
876  }
877  flush_monomorphic_ics_ = true;
878  AgeInlineCaches();
879  return ++contexts_disposed_;
880 }
881 
882 
884  int dst_index,
885  int src_index,
886  int len) {
887  if (len == 0) return;
888 
889  ASSERT(array->map() != fixed_cow_array_map());
890  Object** dst_objects = array->data_start() + dst_index;
891  OS::MemMove(dst_objects,
892  array->data_start() + src_index,
893  len * kPointerSize);
894  if (!InNewSpace(array)) {
895  for (int i = 0; i < len; i++) {
896  // TODO(hpayer): check store buffer for entries
897  if (InNewSpace(dst_objects[i])) {
898  RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
899  }
900  }
901  }
903 }
904 
905 
906 #ifdef VERIFY_HEAP
907 // Helper class for verifying the string table.
908 class StringTableVerifier : public ObjectVisitor {
909  public:
910  void VisitPointers(Object** start, Object** end) {
911  // Visit all HeapObject pointers in [start, end).
912  for (Object** p = start; p < end; p++) {
913  if ((*p)->IsHeapObject()) {
914  // Check that the string is actually internalized.
915  CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
916  (*p)->IsInternalizedString());
917  }
918  }
919  }
920 };
921 
922 
923 static void VerifyStringTable(Heap* heap) {
924  StringTableVerifier verifier;
925  heap->string_table()->IterateElements(&verifier);
926 }
927 #endif // VERIFY_HEAP
928 
929 
930 static bool AbortIncrementalMarkingAndCollectGarbage(
931  Heap* heap,
932  AllocationSpace space,
933  const char* gc_reason = NULL) {
934  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
935  bool result = heap->CollectGarbage(space, gc_reason);
936  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
937  return result;
938 }
939 
940 
941 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
942  bool gc_performed = true;
943  int counter = 0;
944  static const int kThreshold = 20;
945  while (gc_performed && counter++ < kThreshold) {
946  gc_performed = false;
948  for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
949  if (sizes[space] != 0) {
950  MaybeObject* allocation;
951  if (space == NEW_SPACE) {
952  allocation = new_space()->AllocateRaw(sizes[space]);
953  } else {
954  allocation = paged_space(space)->AllocateRaw(sizes[space]);
955  }
956  FreeListNode* node;
957  if (!allocation->To<FreeListNode>(&node)) {
958  if (space == NEW_SPACE) {
960  "failed to reserve space in the new space");
961  } else {
962  AbortIncrementalMarkingAndCollectGarbage(
963  this,
964  static_cast<AllocationSpace>(space),
965  "failed to reserve space in paged space");
966  }
967  gc_performed = true;
968  break;
969  } else {
970  // Mark with a free list node, in case we have a GC before
971  // deserializing.
972  node->set_size(this, sizes[space]);
973  locations_out[space] = node->address();
974  }
975  }
976  }
977  }
978 
979  if (gc_performed) {
980  // Failed to reserve the space after several attempts.
981  V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
982  }
983 }
984 
985 
986 void Heap::EnsureFromSpaceIsCommitted() {
987  if (new_space_.CommitFromSpaceIfNeeded()) return;
988 
989  // Committing memory to from space failed.
990  // Memory is exhausted and we will die.
991  V8::FatalProcessOutOfMemory("Committing semi space failed.");
992 }
993 
994 
996  if (isolate_->bootstrapper()->IsActive()) return;
997 
998  Object* context = native_contexts_list_;
999  while (!context->IsUndefined()) {
1000  // Get the caches for this context. GC can happen when the context
1001  // is not fully initialized, so the caches can be undefined.
1002  Object* caches_or_undefined =
1004  if (!caches_or_undefined->IsUndefined()) {
1005  FixedArray* caches = FixedArray::cast(caches_or_undefined);
1006  // Clear the caches:
1007  int length = caches->length();
1008  for (int i = 0; i < length; i++) {
1009  JSFunctionResultCache::cast(caches->get(i))->Clear();
1010  }
1011  }
1012  // Get the next context:
1013  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1014  }
1015 }
1016 
1017 
1019  if (isolate_->bootstrapper()->IsActive() &&
1020  !incremental_marking()->IsMarking()) {
1021  return;
1022  }
1023 
1024  Object* context = native_contexts_list_;
1025  while (!context->IsUndefined()) {
1026  // GC can happen when the context is not fully initialized,
1027  // so the cache can be undefined.
1028  Object* cache =
1030  if (!cache->IsUndefined()) {
1031  NormalizedMapCache::cast(cache)->Clear();
1032  }
1033  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1034  }
1035 }
1036 
1037 
1038 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
1039  if (start_new_space_size == 0) return;
1040 
1041  double survival_rate =
1042  (static_cast<double>(young_survivors_after_last_gc_) * 100) /
1043  start_new_space_size;
1044 
1045  if (survival_rate > kYoungSurvivalRateHighThreshold) {
1046  high_survival_rate_period_length_++;
1047  } else {
1048  high_survival_rate_period_length_ = 0;
1049  }
1050 
1051  if (survival_rate < kYoungSurvivalRateLowThreshold) {
1052  low_survival_rate_period_length_++;
1053  } else {
1054  low_survival_rate_period_length_ = 0;
1055  }
1056 
1057  double survival_rate_diff = survival_rate_ - survival_rate;
1058 
1059  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
1060  set_survival_rate_trend(DECREASING);
1061  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
1062  set_survival_rate_trend(INCREASING);
1063  } else {
1064  set_survival_rate_trend(STABLE);
1065  }
1066 
1067  survival_rate_ = survival_rate;
1068 }
1069 
1070 bool Heap::PerformGarbageCollection(
1071  GarbageCollector collector,
1072  GCTracer* tracer,
1073  const v8::GCCallbackFlags gc_callback_flags) {
1074  bool next_gc_likely_to_collect_more = false;
1075 
1076  if (collector != SCAVENGER) {
1077  PROFILE(isolate_, CodeMovingGCEvent());
1078  }
1079 
1080 #ifdef VERIFY_HEAP
1081  if (FLAG_verify_heap) {
1082  VerifyStringTable(this);
1083  }
1084 #endif
1085 
1086  GCType gc_type =
1088 
1089  { GCCallbacksScope scope(this);
1090  if (scope.CheckReenter()) {
1091  AllowHeapAllocation allow_allocation;
1092  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1093  VMState<EXTERNAL> state(isolate_);
1094  HandleScope handle_scope(isolate_);
1096  }
1097  }
1098 
1099  EnsureFromSpaceIsCommitted();
1100 
1101  int start_new_space_size = Heap::new_space()->SizeAsInt();
1102 
1103  if (IsHighSurvivalRate()) {
1104  // We speed up the incremental marker if it is running so that it
1105  // does not fall behind the rate of promotion, which would cause a
1106  // constantly growing old space.
1108  }
1109 
1110  if (collector == MARK_COMPACTOR) {
1111  // Perform mark-sweep with optional compaction.
1112  MarkCompact(tracer);
1113  sweep_generation_++;
1114 
1115  UpdateSurvivalRateTrend(start_new_space_size);
1116 
1117  size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1118 
1119  old_generation_allocation_limit_ =
1120  OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1121 
1122  old_gen_exhausted_ = false;
1123  } else {
1124  tracer_ = tracer;
1125  Scavenge();
1126  tracer_ = NULL;
1127 
1128  UpdateSurvivalRateTrend(start_new_space_size);
1129  }
1130 
1131  if (!new_space_high_promotion_mode_active_ &&
1132  new_space_.Capacity() == new_space_.MaximumCapacity() &&
1133  IsStableOrIncreasingSurvivalTrend() &&
1134  IsHighSurvivalRate()) {
1135  // Stable high survival rates even though young generation is at
1136  // maximum capacity indicates that most objects will be promoted.
1137  // To decrease scavenger pauses and final mark-sweep pauses, we
1138  // have to limit maximal capacity of the young generation.
1140  if (FLAG_trace_gc) {
1141  PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1142  new_space_.InitialCapacity() / MB);
1143  }
1144  // The high promotion mode is our indicator to turn on pretenuring. We have
1145  // to deoptimize all optimized code in global pretenuring mode and all
1146  // code which should be tenured in local pretenuring mode.
1147  if (FLAG_pretenuring) {
1148  if (!FLAG_allocation_site_pretenuring) {
1149  isolate_->stack_guard()->FullDeopt();
1150  }
1151  }
1152  } else if (new_space_high_promotion_mode_active_ &&
1153  IsStableOrDecreasingSurvivalTrend() &&
1154  IsLowSurvivalRate()) {
1155  // Decreasing low survival rates might indicate that the above high
1156  // promotion mode is over and we should allow the young generation
1157  // to grow again.
1159  if (FLAG_trace_gc) {
1160  PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1161  new_space_.MaximumCapacity() / MB);
1162  }
1163  // Trigger deoptimization here to turn off global pretenuring as soon as
1164  // possible.
1165  if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
1166  isolate_->stack_guard()->FullDeopt();
1167  }
1168  }
1169 
1170  if (new_space_high_promotion_mode_active_ &&
1171  new_space_.Capacity() > new_space_.InitialCapacity()) {
1172  new_space_.Shrink();
1173  }
1174 
1175  isolate_->counters()->objs_since_last_young()->Set(0);
1176 
1177  // Callbacks that fire after this point might trigger nested GCs and
1178  // restart incremental marking, the assertion can't be moved down.
1179  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1180 
1181  gc_post_processing_depth_++;
1182  { AllowHeapAllocation allow_allocation;
1183  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1184  next_gc_likely_to_collect_more =
1186  collector, tracer);
1187  }
1188  gc_post_processing_depth_--;
1189 
1191 
1192  // Update relocatables.
1193  Relocatable::PostGarbageCollectionProcessing(isolate_);
1194 
1195  if (collector == MARK_COMPACTOR) {
1196  // Register the amount of external allocated memory.
1197  amount_of_external_allocated_memory_at_last_global_gc_ =
1198  amount_of_external_allocated_memory_;
1199  }
1200 
1201  { GCCallbacksScope scope(this);
1202  if (scope.CheckReenter()) {
1203  AllowHeapAllocation allow_allocation;
1204  GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1205  VMState<EXTERNAL> state(isolate_);
1206  HandleScope handle_scope(isolate_);
1207  CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1208  }
1209  }
1210 
1211 #ifdef VERIFY_HEAP
1212  if (FLAG_verify_heap) {
1213  VerifyStringTable(this);
1214  }
1215 #endif
1216 
1217  return next_gc_likely_to_collect_more;
1218 }
1219 
1220 
1222  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1223  if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1224  if (!gc_prologue_callbacks_[i].pass_isolate_) {
1225  v8::GCPrologueCallback callback =
1226  reinterpret_cast<v8::GCPrologueCallback>(
1227  gc_prologue_callbacks_[i].callback);
1228  callback(gc_type, flags);
1229  } else {
1230  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1231  gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1232  }
1233  }
1234  }
1235 }
1236 
1237 
1239  GCCallbackFlags gc_callback_flags) {
1240  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1241  if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1242  if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1243  v8::GCPrologueCallback callback =
1244  reinterpret_cast<v8::GCPrologueCallback>(
1245  gc_epilogue_callbacks_[i].callback);
1246  callback(gc_type, gc_callback_flags);
1247  } else {
1248  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1249  gc_epilogue_callbacks_[i].callback(
1250  isolate, gc_type, gc_callback_flags);
1251  }
1252  }
1253  }
1254 }
1255 
1256 
1257 void Heap::MarkCompact(GCTracer* tracer) {
1258  gc_state_ = MARK_COMPACT;
1259  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1260 
1261  uint64_t size_of_objects_before_gc = SizeOfObjects();
1262 
1263  mark_compact_collector_.Prepare(tracer);
1264 
1265  ms_count_++;
1266  tracer->set_full_gc_count(ms_count_);
1267 
1268  MarkCompactPrologue();
1269 
1270  mark_compact_collector_.CollectGarbage();
1271 
1272  LOG(isolate_, ResourceEvent("markcompact", "end"));
1273 
1274  gc_state_ = NOT_IN_GC;
1275 
1276  isolate_->counters()->objs_since_last_full()->Set(0);
1277 
1278  flush_monomorphic_ics_ = false;
1279 
1280  if (FLAG_allocation_site_pretenuring) {
1281  EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1282  }
1283 }
1284 
1285 
1286 void Heap::MarkCompactPrologue() {
1287  // At any old GC clear the keyed lookup cache to enable collection of unused
1288  // maps.
1289  isolate_->keyed_lookup_cache()->Clear();
1290  isolate_->context_slot_cache()->Clear();
1291  isolate_->descriptor_lookup_cache()->Clear();
1292  RegExpResultsCache::Clear(string_split_cache());
1293  RegExpResultsCache::Clear(regexp_multiple_cache());
1294 
1295  isolate_->compilation_cache()->MarkCompactPrologue();
1296 
1298 
1299  FlushNumberStringCache();
1300  if (FLAG_cleanup_code_caches_at_gc) {
1301  polymorphic_code_cache()->set_cache(undefined_value());
1302  }
1303 
1305 }
1306 
1307 
1308 // Helper class for copying HeapObjects
1309 class ScavengeVisitor: public ObjectVisitor {
1310  public:
1311  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1312 
1313  void VisitPointer(Object** p) { ScavengePointer(p); }
1314 
1315  void VisitPointers(Object** start, Object** end) {
1316  // Copy all HeapObject pointers in [start, end)
1317  for (Object** p = start; p < end; p++) ScavengePointer(p);
1318  }
1319 
1320  private:
1321  void ScavengePointer(Object** p) {
1322  Object* object = *p;
1323  if (!heap_->InNewSpace(object)) return;
1324  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1325  reinterpret_cast<HeapObject*>(object));
1326  }
1327 
1328  Heap* heap_;
1329 };
1330 
1331 
1332 #ifdef VERIFY_HEAP
1333 // Visitor class to verify pointers in code or data space do not point into
1334 // new space.
1335 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1336  public:
1337  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1338  void VisitPointers(Object** start, Object**end) {
1339  for (Object** current = start; current < end; current++) {
1340  if ((*current)->IsHeapObject()) {
1341  CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1342  }
1343  }
1344  }
1345 
1346  private:
1347  Heap* heap_;
1348 };
1349 
1350 
1351 static void VerifyNonPointerSpacePointers(Heap* heap) {
1352  // Verify that there are no pointers to new space in spaces where we
1353  // do not expect them.
1354  VerifyNonPointerSpacePointersVisitor v(heap);
1355  HeapObjectIterator code_it(heap->code_space());
1356  for (HeapObject* object = code_it.Next();
1357  object != NULL; object = code_it.Next())
1358  object->Iterate(&v);
1359 
1360  // The old data space was normally swept conservatively so that the iterator
1361  // doesn't work, so we normally skip the next bit.
1362  if (!heap->old_data_space()->was_swept_conservatively()) {
1363  HeapObjectIterator data_it(heap->old_data_space());
1364  for (HeapObject* object = data_it.Next();
1365  object != NULL; object = data_it.Next())
1366  object->Iterate(&v);
1367  }
1368 }
1369 #endif // VERIFY_HEAP
1370 
1371 
1373  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1374  survived_since_last_expansion_ > new_space_.Capacity() &&
1375  !new_space_high_promotion_mode_active_) {
1376  // Grow the size of new space if there is room to grow, enough data
1377  // has survived scavenge since the last expansion and we are not in
1378  // high promotion mode.
1379  new_space_.Grow();
1380  survived_since_last_expansion_ = 0;
1381  }
1382 }
1383 
1384 
1385 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1386  return heap->InNewSpace(*p) &&
1387  !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1388 }
1389 
1390 
1391 void Heap::ScavengeStoreBufferCallback(
1392  Heap* heap,
1393  MemoryChunk* page,
1394  StoreBufferEvent event) {
1395  heap->store_buffer_rebuilder_.Callback(page, event);
1396 }
1397 
1398 
1400  if (event == kStoreBufferStartScanningPagesEvent) {
1401  start_of_current_page_ = NULL;
1402  current_page_ = NULL;
1403  } else if (event == kStoreBufferScanningPageEvent) {
1404  if (current_page_ != NULL) {
1405  // If this page already overflowed the store buffer during this iteration.
1406  if (current_page_->scan_on_scavenge()) {
1407  // Then we should wipe out the entries that have been added for it.
1408  store_buffer_->SetTop(start_of_current_page_);
1409  } else if (store_buffer_->Top() - start_of_current_page_ >=
1410  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1411  // Did we find too many pointers in the previous page? The heuristic is
1412  // that no page can take more then 1/5 the remaining slots in the store
1413  // buffer.
1414  current_page_->set_scan_on_scavenge(true);
1415  store_buffer_->SetTop(start_of_current_page_);
1416  } else {
1417  // In this case the page we scanned took a reasonable number of slots in
1418  // the store buffer. It has now been rehabilitated and is no longer
1419  // marked scan_on_scavenge.
1420  ASSERT(!current_page_->scan_on_scavenge());
1421  }
1422  }
1423  start_of_current_page_ = store_buffer_->Top();
1424  current_page_ = page;
1425  } else if (event == kStoreBufferFullEvent) {
1426  // The current page overflowed the store buffer again. Wipe out its entries
1427  // in the store buffer and mark it scan-on-scavenge again. This may happen
1428  // several times while scanning.
1429  if (current_page_ == NULL) {
1430  // Store Buffer overflowed while scanning promoted objects. These are not
1431  // in any particular page, though they are likely to be clustered by the
1432  // allocation routines.
1433  store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1434  } else {
1435  // Store Buffer overflowed while scanning a particular old space page for
1436  // pointers to new space.
1437  ASSERT(current_page_ == page);
1438  ASSERT(page != NULL);
1439  current_page_->set_scan_on_scavenge(true);
1440  ASSERT(start_of_current_page_ != store_buffer_->Top());
1441  store_buffer_->SetTop(start_of_current_page_);
1442  }
1443  } else {
1444  UNREACHABLE();
1445  }
1446 }
1447 
1448 
1450  // Assumes that a NewSpacePage exactly fits a number of promotion queue
1451  // entries (where each is a pair of intptr_t). This allows us to simplify
1452  // the test fpr when to switch pages.
1454  == 0);
1455  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1456  front_ = rear_ =
1457  reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1458  emergency_stack_ = NULL;
1459  guard_ = false;
1460 }
1461 
1462 
1463 void PromotionQueue::RelocateQueueHead() {
1464  ASSERT(emergency_stack_ == NULL);
1465 
1466  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1467  intptr_t* head_start = rear_;
1468  intptr_t* head_end =
1469  Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1470 
1471  int entries_count =
1472  static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1473 
1474  emergency_stack_ = new List<Entry>(2 * entries_count);
1475 
1476  while (head_start != head_end) {
1477  int size = static_cast<int>(*(head_start++));
1478  HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1479  emergency_stack_->Add(Entry(obj, size));
1480  }
1481  rear_ = head_end;
1482 }
1483 
1484 
1486  public:
1487  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1488 
1489  virtual Object* RetainAs(Object* object) {
1490  if (!heap_->InFromSpace(object)) {
1491  return object;
1492  }
1493 
1494  MapWord map_word = HeapObject::cast(object)->map_word();
1495  if (map_word.IsForwardingAddress()) {
1496  return map_word.ToForwardingAddress();
1497  }
1498  return NULL;
1499  }
1500 
1501  private:
1502  Heap* heap_;
1503 };
1504 
1505 
1506 void Heap::Scavenge() {
1507  RelocationLock relocation_lock(this);
1508 
1509 #ifdef VERIFY_HEAP
1510  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1511 #endif
1512 
1513  gc_state_ = SCAVENGE;
1514 
1515  // Implements Cheney's copying algorithm
1516  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1517 
1518  // Clear descriptor cache.
1519  isolate_->descriptor_lookup_cache()->Clear();
1520 
1521  // Used for updating survived_since_last_expansion_ at function end.
1522  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1523 
1525 
1526  SelectScavengingVisitorsTable();
1527 
1529 
1532 
1533  // Flip the semispaces. After flipping, to space is empty, from space has
1534  // live objects.
1535  new_space_.Flip();
1536  new_space_.ResetAllocationInfo();
1537 
1538  // We need to sweep newly copied objects which can be either in the
1539  // to space or promoted to the old generation. For to-space
1540  // objects, we treat the bottom of the to space as a queue. Newly
1541  // copied and unswept objects lie between a 'front' mark and the
1542  // allocation pointer.
1543  //
1544  // Promoted objects can go into various old-generation spaces, and
1545  // can be allocated internally in the spaces (from the free list).
1546  // We treat the top of the to space as a queue of addresses of
1547  // promoted objects. The addresses of newly promoted and unswept
1548  // objects lie between a 'front' mark and a 'rear' mark that is
1549  // updated as a side effect of promoting an object.
1550  //
1551  // There is guaranteed to be enough room at the top of the to space
1552  // for the addresses of promoted objects: every object promoted
1553  // frees up its size in bytes from the top of the new space, and
1554  // objects are at least one pointer in size.
1555  Address new_space_front = new_space_.ToSpaceStart();
1556  promotion_queue_.Initialize();
1557 
1558 #ifdef DEBUG
1559  store_buffer()->Clean();
1560 #endif
1561 
1562  ScavengeVisitor scavenge_visitor(this);
1563  // Copy roots.
1564  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1565 
1566  // Copy objects reachable from the old generation.
1567  {
1568  StoreBufferRebuildScope scope(this,
1569  store_buffer(),
1570  &ScavengeStoreBufferCallback);
1572  }
1573 
1574  // Copy objects reachable from simple cells by scavenging cell values
1575  // directly.
1576  HeapObjectIterator cell_iterator(cell_space_);
1577  for (HeapObject* heap_object = cell_iterator.Next();
1578  heap_object != NULL;
1579  heap_object = cell_iterator.Next()) {
1580  if (heap_object->IsCell()) {
1581  Cell* cell = Cell::cast(heap_object);
1582  Address value_address = cell->ValueAddress();
1583  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1584  }
1585  }
1586 
1587  // Copy objects reachable from global property cells by scavenging global
1588  // property cell values directly.
1589  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1590  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1591  heap_object != NULL;
1592  heap_object = js_global_property_cell_iterator.Next()) {
1593  if (heap_object->IsPropertyCell()) {
1594  PropertyCell* cell = PropertyCell::cast(heap_object);
1595  Address value_address = cell->ValueAddress();
1596  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1597  Address type_address = cell->TypeAddress();
1598  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1599  }
1600  }
1601 
1602  // Copy objects reachable from the code flushing candidates list.
1604  if (collector->is_code_flushing_enabled()) {
1605  collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1606  }
1607 
1608  // Scavenge object reachable from the native contexts list directly.
1609  scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1610 
1611  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1612 
1613  while (isolate()->global_handles()->IterateObjectGroups(
1614  &scavenge_visitor, &IsUnscavengedHeapObject)) {
1615  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1616  }
1619 
1621  &IsUnscavengedHeapObject);
1623  &scavenge_visitor);
1624  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1625 
1627  &UpdateNewSpaceReferenceInExternalStringTableEntry);
1628 
1629  promotion_queue_.Destroy();
1630 
1632 
1633  ScavengeWeakObjectRetainer weak_object_retainer(this);
1634  ProcessWeakReferences(&weak_object_retainer);
1635 
1636  ASSERT(new_space_front == new_space_.top());
1637 
1638  // Set age mark.
1639  new_space_.set_age_mark(new_space_.top());
1640 
1641  new_space_.LowerInlineAllocationLimit(
1642  new_space_.inline_allocation_limit_step());
1643 
1644  // Update how much has survived scavenge.
1645  IncrementYoungSurvivorsCounter(static_cast<int>(
1646  (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1647 
1648  LOG(isolate_, ResourceEvent("scavenge", "end"));
1649 
1650  gc_state_ = NOT_IN_GC;
1651 
1652  scavenges_since_last_idle_round_++;
1653 }
1654 
1655 
1656 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1657  Object** p) {
1658  MapWord first_word = HeapObject::cast(*p)->map_word();
1659 
1660  if (!first_word.IsForwardingAddress()) {
1661  // Unreachable external string can be finalized.
1662  heap->FinalizeExternalString(String::cast(*p));
1663  return NULL;
1664  }
1665 
1666  // String is still reachable.
1667  return String::cast(first_word.ToForwardingAddress());
1668 }
1669 
1670 
1672  ExternalStringTableUpdaterCallback updater_func) {
1673 #ifdef VERIFY_HEAP
1674  if (FLAG_verify_heap) {
1675  external_string_table_.Verify();
1676  }
1677 #endif
1678 
1679  if (external_string_table_.new_space_strings_.is_empty()) return;
1680 
1681  Object** start = &external_string_table_.new_space_strings_[0];
1682  Object** end = start + external_string_table_.new_space_strings_.length();
1683  Object** last = start;
1684 
1685  for (Object** p = start; p < end; ++p) {
1686  ASSERT(InFromSpace(*p));
1687  String* target = updater_func(this, p);
1688 
1689  if (target == NULL) continue;
1690 
1691  ASSERT(target->IsExternalString());
1692 
1693  if (InNewSpace(target)) {
1694  // String is still in new space. Update the table entry.
1695  *last = target;
1696  ++last;
1697  } else {
1698  // String got promoted. Move it to the old string list.
1699  external_string_table_.AddOldString(target);
1700  }
1701  }
1702 
1703  ASSERT(last <= end);
1704  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1705 }
1706 
1707 
1709  ExternalStringTableUpdaterCallback updater_func) {
1710 
1711  // Update old space string references.
1712  if (external_string_table_.old_space_strings_.length() > 0) {
1713  Object** start = &external_string_table_.old_space_strings_[0];
1714  Object** end = start + external_string_table_.old_space_strings_.length();
1715  for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1716  }
1717 
1719 }
1720 
1721 
1722 template <class T>
1724 
1725 
1726 template <class T>
1727 static Object* VisitWeakList(Heap* heap,
1728  Object* list,
1729  WeakObjectRetainer* retainer,
1730  bool record_slots) {
1731  Object* undefined = heap->undefined_value();
1732  Object* head = undefined;
1733  T* tail = NULL;
1734  MarkCompactCollector* collector = heap->mark_compact_collector();
1735  while (list != undefined) {
1736  // Check whether to keep the candidate in the list.
1737  T* candidate = reinterpret_cast<T*>(list);
1738  Object* retained = retainer->RetainAs(list);
1739  if (retained != NULL) {
1740  if (head == undefined) {
1741  // First element in the list.
1742  head = retained;
1743  } else {
1744  // Subsequent elements in the list.
1745  ASSERT(tail != NULL);
1746  WeakListVisitor<T>::SetWeakNext(tail, retained);
1747  if (record_slots) {
1748  Object** next_slot =
1750  collector->RecordSlot(next_slot, next_slot, retained);
1751  }
1752  }
1753  // Retained object is new tail.
1754  ASSERT(!retained->IsUndefined());
1755  candidate = reinterpret_cast<T*>(retained);
1756  tail = candidate;
1757 
1758 
1759  // tail is a live object, visit it.
1760  WeakListVisitor<T>::VisitLiveObject(
1761  heap, tail, retainer, record_slots);
1762  } else {
1763  WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1764  }
1765 
1766  // Move to next element in the list.
1767  list = WeakListVisitor<T>::WeakNext(candidate);
1768  }
1769 
1770  // Terminate the list if there is one or more elements.
1771  if (tail != NULL) {
1772  WeakListVisitor<T>::SetWeakNext(tail, undefined);
1773  }
1774  return head;
1775 }
1776 
1777 
1778 template <class T>
1779 static void ClearWeakList(Heap* heap,
1780  Object* list) {
1781  Object* undefined = heap->undefined_value();
1782  while (list != undefined) {
1783  T* candidate = reinterpret_cast<T*>(list);
1784  list = WeakListVisitor<T>::WeakNext(candidate);
1785  WeakListVisitor<T>::SetWeakNext(candidate, undefined);
1786  }
1787 }
1788 
1789 
1790 template<>
1792  static void SetWeakNext(JSFunction* function, Object* next) {
1793  function->set_next_function_link(next);
1794  }
1795 
1796  static Object* WeakNext(JSFunction* function) {
1797  return function->next_function_link();
1798  }
1799 
1800  static int WeakNextOffset() {
1802  }
1803 
1805  WeakObjectRetainer*, bool) {
1806  }
1807 
1809  }
1810 };
1811 
1812 
1813 template<>
1815  static void SetWeakNext(Code* code, Object* next) {
1816  code->set_next_code_link(next);
1817  }
1818 
1819  static Object* WeakNext(Code* code) {
1820  return code->next_code_link();
1821  }
1822 
1823  static int WeakNextOffset() {
1825  }
1826 
1827  static void VisitLiveObject(Heap*, Code*,
1828  WeakObjectRetainer*, bool) {
1829  }
1830 
1831  static void VisitPhantomObject(Heap*, Code*) {
1832  }
1833 };
1834 
1835 
1836 template<>
1838  static void SetWeakNext(Context* context, Object* next) {
1840  next,
1842  }
1843 
1844  static Object* WeakNext(Context* context) {
1845  return context->get(Context::NEXT_CONTEXT_LINK);
1846  }
1847 
1848  static void VisitLiveObject(Heap* heap,
1849  Context* context,
1850  WeakObjectRetainer* retainer,
1851  bool record_slots) {
1852  // Process the three weak lists linked off the context.
1853  DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1855  DoWeakList<Code>(heap, context, retainer, record_slots,
1857  DoWeakList<Code>(heap, context, retainer, record_slots,
1859  }
1860 
1861  template<class T>
1862  static void DoWeakList(Heap* heap,
1863  Context* context,
1864  WeakObjectRetainer* retainer,
1865  bool record_slots,
1866  int index) {
1867  // Visit the weak list, removing dead intermediate elements.
1868  Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1869  record_slots);
1870 
1871  // Update the list head.
1872  context->set(index, list_head, UPDATE_WRITE_BARRIER);
1873 
1874  if (record_slots) {
1875  // Record the updated slot if necessary.
1876  Object** head_slot = HeapObject::RawField(
1877  context, FixedArray::SizeFor(index));
1878  heap->mark_compact_collector()->RecordSlot(
1879  head_slot, head_slot, list_head);
1880  }
1881  }
1882 
1883  static void VisitPhantomObject(Heap* heap, Context* context) {
1884  ClearWeakList<JSFunction>(heap,
1886  ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
1887  ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
1888  }
1889 
1890  static int WeakNextOffset() {
1892  }
1893 };
1894 
1895 
1897  // We don't record weak slots during marking or scavenges.
1898  // Instead we do it once when we complete mark-compact cycle.
1899  // Note that write barrier has no effect if we are already in the middle of
1900  // compacting mark-sweep cycle and we have to record slots manually.
1901  bool record_slots =
1902  gc_state() == MARK_COMPACT &&
1904  ProcessArrayBuffers(retainer, record_slots);
1905  ProcessNativeContexts(retainer, record_slots);
1906  // TODO(mvstanton): AllocationSites only need to be processed during
1907  // MARK_COMPACT, as they live in old space. Verify and address.
1908  ProcessAllocationSites(retainer, record_slots);
1909 }
1910 
1911 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1912  bool record_slots) {
1913  Object* head =
1914  VisitWeakList<Context>(
1915  this, native_contexts_list(), retainer, record_slots);
1916  // Update the head of the list of contexts.
1917  native_contexts_list_ = head;
1918 }
1919 
1920 
1921 template<>
1923  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1924  obj->set_weak_next(next);
1925  }
1926 
1928  return obj->weak_next();
1929  }
1930 
1931  static void VisitLiveObject(Heap*,
1932  JSArrayBufferView* obj,
1933  WeakObjectRetainer* retainer,
1934  bool record_slots) {}
1935 
1937 
1938  static int WeakNextOffset() {
1940  }
1941 };
1942 
1943 
1944 template<>
1946  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1947  obj->set_weak_next(next);
1948  }
1949 
1951  return obj->weak_next();
1952  }
1953 
1954  static void VisitLiveObject(Heap* heap,
1955  JSArrayBuffer* array_buffer,
1956  WeakObjectRetainer* retainer,
1957  bool record_slots) {
1958  Object* typed_array_obj =
1959  VisitWeakList<JSArrayBufferView>(
1960  heap,
1961  array_buffer->weak_first_view(),
1962  retainer, record_slots);
1963  array_buffer->set_weak_first_view(typed_array_obj);
1964  if (typed_array_obj != heap->undefined_value() && record_slots) {
1965  Object** slot = HeapObject::RawField(
1966  array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1967  heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1968  }
1969  }
1970 
1971  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1972  Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1973  }
1974 
1975  static int WeakNextOffset() {
1977  }
1978 };
1979 
1980 
1981 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1982  bool record_slots) {
1983  Object* array_buffer_obj =
1984  VisitWeakList<JSArrayBuffer>(this,
1986  retainer, record_slots);
1987  set_array_buffers_list(array_buffer_obj);
1988 }
1989 
1990 
1991 void Heap::TearDownArrayBuffers() {
1992  Object* undefined = undefined_value();
1993  for (Object* o = array_buffers_list(); o != undefined;) {
1994  JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1995  Runtime::FreeArrayBuffer(isolate(), buffer);
1996  o = buffer->weak_next();
1997  }
1998  array_buffers_list_ = undefined;
1999 }
2000 
2001 
2002 template<>
2004  static void SetWeakNext(AllocationSite* obj, Object* next) {
2005  obj->set_weak_next(next);
2006  }
2007 
2009  return obj->weak_next();
2010  }
2011 
2012  static void VisitLiveObject(Heap* heap,
2013  AllocationSite* site,
2014  WeakObjectRetainer* retainer,
2015  bool record_slots) {}
2016 
2017  static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
2018 
2019  static int WeakNextOffset() {
2021  }
2022 };
2023 
2024 
2025 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
2026  bool record_slots) {
2027  Object* allocation_site_obj =
2028  VisitWeakList<AllocationSite>(this,
2030  retainer, record_slots);
2031  set_allocation_sites_list(allocation_site_obj);
2032 }
2033 
2034 
2035 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
2036  DisallowHeapAllocation no_allocation_scope;
2037  Object* cur = allocation_sites_list();
2038  bool marked = false;
2039  while (cur->IsAllocationSite()) {
2040  AllocationSite* casted = AllocationSite::cast(cur);
2041  if (casted->GetPretenureMode() == flag) {
2042  casted->ResetPretenureDecision();
2043  casted->set_deopt_dependent_code(true);
2044  marked = true;
2045  }
2046  cur = casted->weak_next();
2047  }
2048  if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
2049 }
2050 
2051 
2052 void Heap::EvaluateOldSpaceLocalPretenuring(
2053  uint64_t size_of_objects_before_gc) {
2054  uint64_t size_of_objects_after_gc = SizeOfObjects();
2055  double old_generation_survival_rate =
2056  (static_cast<double>(size_of_objects_after_gc) * 100) /
2057  static_cast<double>(size_of_objects_before_gc);
2058 
2059  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2060  // Too many objects died in the old generation, pretenuring of wrong
2061  // allocation sites may be the cause for that. We have to deopt all
2062  // dependent code registered in the allocation sites to re-evaluate
2063  // our pretenuring decisions.
2064  ResetAllAllocationSitesDependentCode(TENURED);
2065  if (FLAG_trace_pretenuring) {
2066  PrintF("Deopt all allocation sites dependent code due to low survival "
2067  "rate in the old generation %f\n", old_generation_survival_rate);
2068  }
2069  }
2070 }
2071 
2072 
2074  DisallowHeapAllocation no_allocation;
2075  // All external strings are listed in the external string table.
2076 
2077  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
2078  public:
2079  explicit ExternalStringTableVisitorAdapter(
2080  v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
2081  virtual void VisitPointers(Object** start, Object** end) {
2082  for (Object** p = start; p < end; p++) {
2083  ASSERT((*p)->IsExternalString());
2084  visitor_->VisitExternalString(Utils::ToLocal(
2086  }
2087  }
2088  private:
2089  v8::ExternalResourceVisitor* visitor_;
2090  } external_string_table_visitor(visitor);
2091 
2092  external_string_table_.Iterate(&external_string_table_visitor);
2093 }
2094 
2095 
2096 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
2097  public:
2098  static inline void VisitPointer(Heap* heap, Object** p) {
2099  Object* object = *p;
2100  if (!heap->InNewSpace(object)) return;
2101  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
2102  reinterpret_cast<HeapObject*>(object));
2103  }
2104 };
2105 
2106 
2107 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2108  Address new_space_front) {
2109  do {
2110  SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2111  // The addresses new_space_front and new_space_.top() define a
2112  // queue of unprocessed copied objects. Process them until the
2113  // queue is empty.
2114  while (new_space_front != new_space_.top()) {
2115  if (!NewSpacePage::IsAtEnd(new_space_front)) {
2116  HeapObject* object = HeapObject::FromAddress(new_space_front);
2117  new_space_front +=
2118  NewSpaceScavenger::IterateBody(object->map(), object);
2119  } else {
2120  new_space_front =
2121  NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2122  }
2123  }
2124 
2125  // Promote and process all the to-be-promoted objects.
2126  {
2127  StoreBufferRebuildScope scope(this,
2128  store_buffer(),
2129  &ScavengeStoreBufferCallback);
2130  while (!promotion_queue()->is_empty()) {
2131  HeapObject* target;
2132  int size;
2133  promotion_queue()->remove(&target, &size);
2134 
2135  // Promoted object might be already partially visited
2136  // during old space pointer iteration. Thus we search specificly
2137  // for pointers to from semispace instead of looking for pointers
2138  // to new space.
2139  ASSERT(!target->IsMap());
2140  IterateAndMarkPointersToFromSpace(target->address(),
2141  target->address() + size,
2142  &ScavengeObject);
2143  }
2144  }
2145 
2146  // Take another spin if there are now unswept objects in new space
2147  // (there are currently no more unswept promoted objects).
2148  } while (new_space_front != new_space_.top());
2149 
2150  return new_space_front;
2151 }
2152 
2153 
2156 
2157 
2158 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2159  HeapObject* object,
2160  int size));
2161 
2162 static HeapObject* EnsureDoubleAligned(Heap* heap,
2163  HeapObject* object,
2164  int size) {
2165  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2166  heap->CreateFillerObjectAt(object->address(), kPointerSize);
2167  return HeapObject::FromAddress(object->address() + kPointerSize);
2168  } else {
2169  heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2170  kPointerSize);
2171  return object;
2172  }
2173 }
2174 
2175 
2179 };
2180 
2181 
2183 
2184 
2185 template<MarksHandling marks_handling,
2186  LoggingAndProfiling logging_and_profiling_mode>
2188  public:
2189  static void Initialize() {
2190  table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2191  table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2192  table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2193  table_.Register(kVisitByteArray, &EvacuateByteArray);
2194  table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2195  table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2196  table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2197  table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2198 
2199  table_.Register(kVisitNativeContext,
2200  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2201  template VisitSpecialized<Context::kSize>);
2202 
2203  table_.Register(kVisitConsString,
2204  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2205  template VisitSpecialized<ConsString::kSize>);
2206 
2207  table_.Register(kVisitSlicedString,
2208  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2209  template VisitSpecialized<SlicedString::kSize>);
2210 
2211  table_.Register(kVisitSymbol,
2212  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2213  template VisitSpecialized<Symbol::kSize>);
2214 
2215  table_.Register(kVisitSharedFunctionInfo,
2216  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2217  template VisitSpecialized<SharedFunctionInfo::kSize>);
2218 
2219  table_.Register(kVisitJSWeakMap,
2220  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2221  Visit);
2222 
2223  table_.Register(kVisitJSWeakSet,
2224  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2225  Visit);
2226 
2227  table_.Register(kVisitJSArrayBuffer,
2228  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2229  Visit);
2230 
2231  table_.Register(kVisitJSTypedArray,
2232  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2233  Visit);
2234 
2235  table_.Register(kVisitJSDataView,
2236  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2237  Visit);
2238 
2239  table_.Register(kVisitJSRegExp,
2240  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2241  Visit);
2242 
2243  if (marks_handling == IGNORE_MARKS) {
2244  table_.Register(kVisitJSFunction,
2245  &ObjectEvacuationStrategy<POINTER_OBJECT>::
2246  template VisitSpecialized<JSFunction::kSize>);
2247  } else {
2248  table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2249  }
2250 
2251  table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2253  kVisitDataObjectGeneric>();
2254 
2255  table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2257  kVisitJSObjectGeneric>();
2258 
2259  table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2260  kVisitStruct,
2261  kVisitStructGeneric>();
2262  }
2263 
2265  return &table_;
2266  }
2267 
2268  private:
2269  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2270 
2271  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2272  bool should_record = false;
2273 #ifdef DEBUG
2274  should_record = FLAG_heap_stats;
2275 #endif
2276  should_record = should_record || FLAG_log_gc;
2277  if (should_record) {
2278  if (heap->new_space()->Contains(obj)) {
2279  heap->new_space()->RecordAllocation(obj);
2280  } else {
2281  heap->new_space()->RecordPromotion(obj);
2282  }
2283  }
2284  }
2285 
2286  // Helper function used by CopyObject to copy a source object to an
2287  // allocated target object and update the forwarding pointer in the source
2288  // object. Returns the target object.
2289  INLINE(static void MigrateObject(Heap* heap,
2290  HeapObject* source,
2291  HeapObject* target,
2292  int size)) {
2293  // Copy the content of source to target.
2294  heap->CopyBlock(target->address(), source->address(), size);
2295 
2296  // Set the forwarding address.
2297  source->set_map_word(MapWord::FromForwardingAddress(target));
2298 
2299  if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2300  // Update NewSpace stats if necessary.
2301  RecordCopiedObject(heap, target);
2302  Isolate* isolate = heap->isolate();
2303  HeapProfiler* heap_profiler = isolate->heap_profiler();
2304  if (heap_profiler->is_tracking_object_moves()) {
2305  heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2306  size);
2307  }
2308  if (isolate->logger()->is_logging_code_events() ||
2309  isolate->cpu_profiler()->is_profiling()) {
2310  if (target->IsSharedFunctionInfo()) {
2311  PROFILE(isolate, SharedFunctionInfoMoveEvent(
2312  source->address(), target->address()));
2313  }
2314  }
2315  }
2316 
2317  if (marks_handling == TRANSFER_MARKS) {
2318  if (Marking::TransferColor(source, target)) {
2319  MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2320  }
2321  }
2322  }
2323 
2324 
2325  template<ObjectContents object_contents, int alignment>
2326  static inline void EvacuateObject(Map* map,
2327  HeapObject** slot,
2328  HeapObject* object,
2329  int object_size) {
2331  SLOW_ASSERT(object->Size() == object_size);
2332 
2333  int allocation_size = object_size;
2334  if (alignment != kObjectAlignment) {
2335  ASSERT(alignment == kDoubleAlignment);
2336  allocation_size += kPointerSize;
2337  }
2338 
2339  Heap* heap = map->GetHeap();
2340  if (heap->ShouldBePromoted(object->address(), object_size)) {
2341  MaybeObject* maybe_result;
2342 
2343  if (object_contents == DATA_OBJECT) {
2344  ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2345  maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2346  } else {
2347  ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2348  maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2349  }
2350 
2351  Object* result = NULL; // Initialization to please compiler.
2352  if (maybe_result->ToObject(&result)) {
2353  HeapObject* target = HeapObject::cast(result);
2354 
2355  if (alignment != kObjectAlignment) {
2356  target = EnsureDoubleAligned(heap, target, allocation_size);
2357  }
2358 
2359  // Order is important: slot might be inside of the target if target
2360  // was allocated over a dead object and slot comes from the store
2361  // buffer.
2362  *slot = target;
2363  MigrateObject(heap, object, target, object_size);
2364 
2365  if (object_contents == POINTER_OBJECT) {
2366  if (map->instance_type() == JS_FUNCTION_TYPE) {
2367  heap->promotion_queue()->insert(
2369  } else {
2370  heap->promotion_queue()->insert(target, object_size);
2371  }
2372  }
2373 
2374  heap->tracer()->increment_promoted_objects_size(object_size);
2375  return;
2376  }
2377  }
2378  ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2379  MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2380  heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2381  Object* result = allocation->ToObjectUnchecked();
2382  HeapObject* target = HeapObject::cast(result);
2383 
2384  if (alignment != kObjectAlignment) {
2385  target = EnsureDoubleAligned(heap, target, allocation_size);
2386  }
2387 
2388  // Order is important: slot might be inside of the target if target
2389  // was allocated over a dead object and slot comes from the store
2390  // buffer.
2391  *slot = target;
2392  MigrateObject(heap, object, target, object_size);
2393  return;
2394  }
2395 
2396 
2397  static inline void EvacuateJSFunction(Map* map,
2398  HeapObject** slot,
2399  HeapObject* object) {
2400  ObjectEvacuationStrategy<POINTER_OBJECT>::
2401  template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2402 
2403  HeapObject* target = *slot;
2404  MarkBit mark_bit = Marking::MarkBitFrom(target);
2405  if (Marking::IsBlack(mark_bit)) {
2406  // This object is black and it might not be rescanned by marker.
2407  // We should explicitly record code entry slot for compaction because
2408  // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2409  // miss it as it is not HeapObject-tagged.
2410  Address code_entry_slot =
2411  target->address() + JSFunction::kCodeEntryOffset;
2412  Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2413  map->GetHeap()->mark_compact_collector()->
2414  RecordCodeEntrySlot(code_entry_slot, code);
2415  }
2416  }
2417 
2418 
2419  static inline void EvacuateFixedArray(Map* map,
2420  HeapObject** slot,
2421  HeapObject* object) {
2422  int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2423  EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2424  map, slot, object, object_size);
2425  }
2426 
2427 
2428  static inline void EvacuateFixedDoubleArray(Map* map,
2429  HeapObject** slot,
2430  HeapObject* object) {
2431  int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2432  int object_size = FixedDoubleArray::SizeFor(length);
2433  EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2434  map, slot, object, object_size);
2435  }
2436 
2437 
2438  static inline void EvacuateFixedTypedArray(Map* map,
2439  HeapObject** slot,
2440  HeapObject* object) {
2441  int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2442  EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2443  map, slot, object, object_size);
2444  }
2445 
2446 
2447  static inline void EvacuateFixedFloat64Array(Map* map,
2448  HeapObject** slot,
2449  HeapObject* object) {
2450  int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2451  EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2452  map, slot, object, object_size);
2453  }
2454 
2455 
2456  static inline void EvacuateByteArray(Map* map,
2457  HeapObject** slot,
2458  HeapObject* object) {
2459  int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2460  EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2461  map, slot, object, object_size);
2462  }
2463 
2464 
2465  static inline void EvacuateSeqOneByteString(Map* map,
2466  HeapObject** slot,
2467  HeapObject* object) {
2468  int object_size = SeqOneByteString::cast(object)->
2469  SeqOneByteStringSize(map->instance_type());
2470  EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2471  map, slot, object, object_size);
2472  }
2473 
2474 
2475  static inline void EvacuateSeqTwoByteString(Map* map,
2476  HeapObject** slot,
2477  HeapObject* object) {
2478  int object_size = SeqTwoByteString::cast(object)->
2479  SeqTwoByteStringSize(map->instance_type());
2480  EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2481  map, slot, object, object_size);
2482  }
2483 
2484 
2485  static inline bool IsShortcutCandidate(int type) {
2486  return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2487  }
2488 
2489  static inline void EvacuateShortcutCandidate(Map* map,
2490  HeapObject** slot,
2491  HeapObject* object) {
2492  ASSERT(IsShortcutCandidate(map->instance_type()));
2493 
2494  Heap* heap = map->GetHeap();
2495 
2496  if (marks_handling == IGNORE_MARKS &&
2497  ConsString::cast(object)->unchecked_second() ==
2498  heap->empty_string()) {
2499  HeapObject* first =
2500  HeapObject::cast(ConsString::cast(object)->unchecked_first());
2501 
2502  *slot = first;
2503 
2504  if (!heap->InNewSpace(first)) {
2505  object->set_map_word(MapWord::FromForwardingAddress(first));
2506  return;
2507  }
2508 
2509  MapWord first_word = first->map_word();
2510  if (first_word.IsForwardingAddress()) {
2511  HeapObject* target = first_word.ToForwardingAddress();
2512 
2513  *slot = target;
2514  object->set_map_word(MapWord::FromForwardingAddress(target));
2515  return;
2516  }
2517 
2518  heap->DoScavengeObject(first->map(), slot, first);
2519  object->set_map_word(MapWord::FromForwardingAddress(*slot));
2520  return;
2521  }
2522 
2523  int object_size = ConsString::kSize;
2524  EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2525  map, slot, object, object_size);
2526  }
2527 
2528  template<ObjectContents object_contents>
2529  class ObjectEvacuationStrategy {
2530  public:
2531  template<int object_size>
2532  static inline void VisitSpecialized(Map* map,
2533  HeapObject** slot,
2534  HeapObject* object) {
2535  EvacuateObject<object_contents, kObjectAlignment>(
2536  map, slot, object, object_size);
2537  }
2538 
2539  static inline void Visit(Map* map,
2540  HeapObject** slot,
2541  HeapObject* object) {
2542  int object_size = map->instance_size();
2543  EvacuateObject<object_contents, kObjectAlignment>(
2544  map, slot, object, object_size);
2545  }
2546  };
2547 
2548  static VisitorDispatchTable<ScavengingCallback> table_;
2549 };
2550 
2551 
2552 template<MarksHandling marks_handling,
2553  LoggingAndProfiling logging_and_profiling_mode>
2554 VisitorDispatchTable<ScavengingCallback>
2555  ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2556 
2557 
2558 static void InitializeScavengingVisitorsTables() {
2559  ScavengingVisitor<TRANSFER_MARKS,
2560  LOGGING_AND_PROFILING_DISABLED>::Initialize();
2562  ScavengingVisitor<TRANSFER_MARKS,
2563  LOGGING_AND_PROFILING_ENABLED>::Initialize();
2565 }
2566 
2567 
2568 void Heap::SelectScavengingVisitorsTable() {
2569  bool logging_and_profiling =
2570  isolate()->logger()->is_logging() ||
2571  isolate()->cpu_profiler()->is_profiling() ||
2572  (isolate()->heap_profiler() != NULL &&
2574 
2575  if (!incremental_marking()->IsMarking()) {
2576  if (!logging_and_profiling) {
2577  scavenging_visitors_table_.CopyFrom(
2578  ScavengingVisitor<IGNORE_MARKS,
2579  LOGGING_AND_PROFILING_DISABLED>::GetTable());
2580  } else {
2581  scavenging_visitors_table_.CopyFrom(
2582  ScavengingVisitor<IGNORE_MARKS,
2583  LOGGING_AND_PROFILING_ENABLED>::GetTable());
2584  }
2585  } else {
2586  if (!logging_and_profiling) {
2587  scavenging_visitors_table_.CopyFrom(
2588  ScavengingVisitor<TRANSFER_MARKS,
2589  LOGGING_AND_PROFILING_DISABLED>::GetTable());
2590  } else {
2591  scavenging_visitors_table_.CopyFrom(
2592  ScavengingVisitor<TRANSFER_MARKS,
2593  LOGGING_AND_PROFILING_ENABLED>::GetTable());
2594  }
2595 
2596  if (incremental_marking()->IsCompacting()) {
2597  // When compacting forbid short-circuiting of cons-strings.
2598  // Scavenging code relies on the fact that new space object
2599  // can't be evacuated into evacuation candidate but
2600  // short-circuiting violates this assumption.
2601  scavenging_visitors_table_.Register(
2602  StaticVisitorBase::kVisitShortcutCandidate,
2603  scavenging_visitors_table_.GetVisitorById(
2604  StaticVisitorBase::kVisitConsString));
2605  }
2606  }
2607 }
2608 
2609 
2610 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2611  SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2612  MapWord first_word = object->map_word();
2613  SLOW_ASSERT(!first_word.IsForwardingAddress());
2614  Map* map = first_word.ToMap();
2615  map->GetHeap()->DoScavengeObject(map, p, object);
2616 }
2617 
2618 
2619 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2620  int instance_size) {
2621  Object* result;
2622  MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2623  if (!maybe_result->ToObject(&result)) return maybe_result;
2624 
2625  // Map::cast cannot be used due to uninitialized map field.
2626  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2627  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2628  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2629  reinterpret_cast<Map*>(result)->set_visitor_id(
2630  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2631  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2632  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2633  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2634  reinterpret_cast<Map*>(result)->set_bit_field(0);
2635  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2636  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2638  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2639  return result;
2640 }
2641 
2642 
2643 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2644  int instance_size,
2645  ElementsKind elements_kind) {
2646  Object* result;
2647  MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2648  if (!maybe_result->To(&result)) return maybe_result;
2649 
2650  Map* map = reinterpret_cast<Map*>(result);
2651  map->set_map_no_write_barrier(meta_map());
2652  map->set_instance_type(instance_type);
2653  map->set_visitor_id(
2654  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2655  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2656  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2657  map->set_instance_size(instance_size);
2658  map->set_inobject_properties(0);
2660  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2661  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2663  map->init_back_pointer(undefined_value());
2665  map->set_instance_descriptors(empty_descriptor_array());
2666  map->set_bit_field(0);
2668  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2670  map->set_bit_field3(bit_field3);
2671  map->set_elements_kind(elements_kind);
2672 
2673  return map;
2674 }
2675 
2676 
2677 MaybeObject* Heap::AllocateCodeCache() {
2678  CodeCache* code_cache;
2679  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2680  if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2681  }
2682  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2683  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2684  return code_cache;
2685 }
2686 
2687 
2690 }
2691 
2692 
2694  AccessorPair* accessors;
2695  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2696  if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2697  }
2698  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2699  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2701  return accessors;
2702 }
2703 
2704 
2707  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2708  if (!maybe_info->To(&info)) return maybe_info;
2709  }
2710  info->initialize_storage();
2711  info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
2712  return info;
2713 }
2714 
2715 
2716 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2717  AliasedArgumentsEntry* entry;
2718  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2719  if (!maybe_entry->To(&entry)) return maybe_entry;
2720  }
2721  entry->set_aliased_context_slot(aliased_context_slot);
2722  return entry;
2723 }
2724 
2725 
2726 const Heap::StringTypeTable Heap::string_type_table[] = {
2727 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2728  {type, size, k##camel_name##MapRootIndex},
2730 #undef STRING_TYPE_ELEMENT
2731 };
2732 
2733 
2734 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2735 #define CONSTANT_STRING_ELEMENT(name, contents) \
2736  {contents, k##name##RootIndex},
2738 #undef CONSTANT_STRING_ELEMENT
2739 };
2740 
2741 
2742 const Heap::StructTable Heap::struct_table[] = {
2743 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2744  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2746 #undef STRUCT_TABLE_ELEMENT
2747 };
2748 
2749 
2750 bool Heap::CreateInitialMaps() {
2751  Object* obj;
2752  { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2753  if (!maybe_obj->ToObject(&obj)) return false;
2754  }
2755  // Map::cast cannot be used due to uninitialized map field.
2756  Map* new_meta_map = reinterpret_cast<Map*>(obj);
2757  set_meta_map(new_meta_map);
2758  new_meta_map->set_map(new_meta_map);
2759 
2760  { MaybeObject* maybe_obj =
2762  if (!maybe_obj->ToObject(&obj)) return false;
2763  }
2764  set_fixed_array_map(Map::cast(obj));
2765 
2766  { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2767  if (!maybe_obj->ToObject(&obj)) return false;
2768  }
2769  set_oddball_map(Map::cast(obj));
2770 
2771  { MaybeObject* maybe_obj =
2773  if (!maybe_obj->ToObject(&obj)) return false;
2774  }
2775  set_constant_pool_array_map(Map::cast(obj));
2776 
2777  // Allocate the empty array.
2778  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2779  if (!maybe_obj->ToObject(&obj)) return false;
2780  }
2781  set_empty_fixed_array(FixedArray::cast(obj));
2782 
2783  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2784  if (!maybe_obj->ToObject(&obj)) return false;
2785  }
2786  set_null_value(Oddball::cast(obj));
2788 
2789  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2790  if (!maybe_obj->ToObject(&obj)) return false;
2791  }
2792  set_undefined_value(Oddball::cast(obj));
2794  ASSERT(!InNewSpace(undefined_value()));
2795 
2796  // Allocate the empty descriptor array.
2797  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2798  if (!maybe_obj->ToObject(&obj)) return false;
2799  }
2800  set_empty_descriptor_array(DescriptorArray::cast(obj));
2801 
2802  // Allocate the constant pool array.
2803  { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
2804  if (!maybe_obj->ToObject(&obj)) return false;
2805  }
2806  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2807 
2808  // Fix the instance_descriptors for the existing maps.
2809  meta_map()->set_code_cache(empty_fixed_array());
2810  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2811  meta_map()->init_back_pointer(undefined_value());
2812  meta_map()->set_instance_descriptors(empty_descriptor_array());
2813 
2814  fixed_array_map()->set_code_cache(empty_fixed_array());
2815  fixed_array_map()->set_dependent_code(
2816  DependentCode::cast(empty_fixed_array()));
2817  fixed_array_map()->init_back_pointer(undefined_value());
2818  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2819 
2820  oddball_map()->set_code_cache(empty_fixed_array());
2821  oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2822  oddball_map()->init_back_pointer(undefined_value());
2823  oddball_map()->set_instance_descriptors(empty_descriptor_array());
2824 
2825  constant_pool_array_map()->set_code_cache(empty_fixed_array());
2826  constant_pool_array_map()->set_dependent_code(
2827  DependentCode::cast(empty_fixed_array()));
2828  constant_pool_array_map()->init_back_pointer(undefined_value());
2829  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2830 
2831  // Fix prototype object for existing maps.
2832  meta_map()->set_prototype(null_value());
2833  meta_map()->set_constructor(null_value());
2834 
2835  fixed_array_map()->set_prototype(null_value());
2836  fixed_array_map()->set_constructor(null_value());
2837 
2838  oddball_map()->set_prototype(null_value());
2839  oddball_map()->set_constructor(null_value());
2840 
2841  constant_pool_array_map()->set_prototype(null_value());
2842  constant_pool_array_map()->set_constructor(null_value());
2843 
2844  { // Map allocation
2845 #define ALLOCATE_MAP(instance_type, size, field_name) \
2846  { Map* map; \
2847  if (!AllocateMap((instance_type), size)->To(&map)) return false; \
2848  set_##field_name##_map(map); \
2849  }
2850 
2851 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2852  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2853 
2854  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2855  ASSERT(fixed_array_map() != fixed_cow_array_map());
2856 
2858  ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2859  ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2860  ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2861 
2862  for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2863  const StringTypeTable& entry = string_type_table[i];
2864  { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2865  if (!maybe_obj->ToObject(&obj)) return false;
2866  }
2867  roots_[entry.index] = Map::cast(obj);
2868  }
2869 
2870  ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2871  undetectable_string_map()->set_is_undetectable();
2872 
2873  ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2874  undetectable_ascii_string_map()->set_is_undetectable();
2875 
2876  ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2879 
2880 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2881  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2882  external_##type##_array)
2883 
2885 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2886 
2887 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2888  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
2889  fixed_##type##_array)
2890 
2892 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2893 
2894  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2895 
2897 
2898  ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2899  ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2900  ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2901  ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2902 
2903 
2904  for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2905  const StructTable& entry = struct_table[i];
2906  Map* map;
2907  if (!AllocateMap(entry.type, entry.size)->To(&map))
2908  return false;
2909  roots_[entry.index] = map;
2910  }
2911 
2912  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2913 
2914  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2915  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2916  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2917  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2918  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2919  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2920 
2921  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2922  native_context_map()->set_dictionary_map(true);
2923  native_context_map()->set_visitor_id(
2924  StaticVisitorBase::kVisitNativeContext);
2925 
2926  ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2927  shared_function_info)
2928 
2929  ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2930  message_object)
2931  ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2932  external)
2933  external_map()->set_is_extensible(false);
2934 #undef ALLOCATE_VARSIZE_MAP
2935 #undef ALLOCATE_MAP
2936  }
2937 
2938  { // Empty arrays
2939  { ByteArray* byte_array;
2940  if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
2941  set_empty_byte_array(byte_array);
2942  }
2943 
2944 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2945  { ExternalArray* obj; \
2946  if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj)) \
2947  return false; \
2948  set_empty_external_##type##_array(obj); \
2949  }
2950 
2952 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2953 
2954 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2955  { FixedTypedArrayBase* obj; \
2956  if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array)->To(&obj)) \
2957  return false; \
2958  set_empty_fixed_##type##_array(obj); \
2959  }
2960 
2962 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2963  }
2964  ASSERT(!InNewSpace(empty_fixed_array()));
2965  return true;
2966 }
2967 
2968 
2969 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2970  // Statically ensure that it is safe to allocate heap numbers in paged
2971  // spaces.
2972  int size = HeapNumber::kSize;
2974 
2975  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2976 
2977  Object* result;
2978  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2979  if (!maybe_result->ToObject(&result)) return maybe_result;
2980  }
2981 
2982  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2983  HeapNumber::cast(result)->set_value(value);
2984  return result;
2985 }
2986 
2987 
2988 MaybeObject* Heap::AllocateCell(Object* value) {
2989  int size = Cell::kSize;
2991 
2992  Object* result;
2993  { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2994  if (!maybe_result->ToObject(&result)) return maybe_result;
2995  }
2996  HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2997  Cell::cast(result)->set_value(value);
2998  return result;
2999 }
3000 
3001 
3002 MaybeObject* Heap::AllocatePropertyCell() {
3003  int size = PropertyCell::kSize;
3005 
3006  Object* result;
3007  MaybeObject* maybe_result =
3009  if (!maybe_result->ToObject(&result)) return maybe_result;
3010 
3012  global_property_cell_map());
3013  PropertyCell* cell = PropertyCell::cast(result);
3014  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
3016  cell->set_value(the_hole_value());
3017  cell->set_type(HeapType::None());
3018  return result;
3019 }
3020 
3021 
3022 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
3023  Box* result;
3024  MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
3025  if (!maybe_result->To(&result)) return maybe_result;
3026  result->set_value(value);
3027  return result;
3028 }
3029 
3030 
3032  AllocationSite* site;
3033  MaybeObject* maybe_result = Allocate(allocation_site_map(),
3035  if (!maybe_result->To(&site)) return maybe_result;
3036  site->Initialize();
3037 
3038  // Link the site
3039  site->set_weak_next(allocation_sites_list());
3041  return site;
3042 }
3043 
3044 
3045 MaybeObject* Heap::CreateOddball(const char* to_string,
3046  Object* to_number,
3047  byte kind) {
3048  Object* result;
3049  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
3050  if (!maybe_result->ToObject(&result)) return maybe_result;
3051  }
3052  return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3053 }
3054 
3055 
3057  Object* obj;
3058 
3059  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3060  if (!maybe_obj->ToObject(&obj)) return false;
3061  }
3062  // Don't use Smi-only elements optimizations for objects with the neander
3063  // map. There are too many cases where element values are set directly with a
3064  // bottleneck to trap the Smi-only -> fast elements transition, and there
3065  // appears to be no benefit for optimize this case.
3066  Map* new_neander_map = Map::cast(obj);
3068  set_neander_map(new_neander_map);
3069 
3070  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3071  if (!maybe_obj->ToObject(&obj)) return false;
3072  }
3073  Object* elements;
3074  { MaybeObject* maybe_elements = AllocateFixedArray(2);
3075  if (!maybe_elements->ToObject(&elements)) return false;
3076  }
3077  FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3078  JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3079  set_message_listeners(JSObject::cast(obj));
3080 
3081  return true;
3082 }
3083 
3084 
3085 void Heap::CreateJSEntryStub() {
3086  JSEntryStub stub;
3087  set_js_entry_code(*stub.GetCode(isolate()));
3088 }
3089 
3090 
3091 void Heap::CreateJSConstructEntryStub() {
3092  JSConstructEntryStub stub;
3093  set_js_construct_entry_code(*stub.GetCode(isolate()));
3094 }
3095 
3096 
3097 void Heap::CreateFixedStubs() {
3098  // Here we create roots for fixed stubs. They are needed at GC
3099  // for cooking and uncooking (check out frames.cc).
3100  // The eliminates the need for doing dictionary lookup in the
3101  // stub cache for these stubs.
3102  HandleScope scope(isolate());
3103 
3104  // Create stubs that should be there, so we don't unexpectedly have to
3105  // create them if we need them during the creation of another stub.
3106  // Stub creation mixes raw pointers and handles in an unsafe manner so
3107  // we cannot create stubs while we are creating stubs.
3108  CodeStub::GenerateStubsAheadOfTime(isolate());
3109 
3110  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
3111  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
3112  // is created.
3113 
3114  // gcc-4.4 has problem generating correct code of following snippet:
3115  // { JSEntryStub stub;
3116  // js_entry_code_ = *stub.GetCode();
3117  // }
3118  // { JSConstructEntryStub stub;
3119  // js_construct_entry_code_ = *stub.GetCode();
3120  // }
3121  // To workaround the problem, make separate functions without inlining.
3122  Heap::CreateJSEntryStub();
3123  Heap::CreateJSConstructEntryStub();
3124 }
3125 
3126 
3127 bool Heap::CreateInitialObjects() {
3128  Object* obj;
3129 
3130  // The -0 value must be set before NumberFromDouble works.
3131  { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3132  if (!maybe_obj->ToObject(&obj)) return false;
3133  }
3134  set_minus_zero_value(HeapNumber::cast(obj));
3135  ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3136 
3137  { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3138  if (!maybe_obj->ToObject(&obj)) return false;
3139  }
3140  set_nan_value(HeapNumber::cast(obj));
3141 
3142  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3143  if (!maybe_obj->ToObject(&obj)) return false;
3144  }
3145  set_infinity_value(HeapNumber::cast(obj));
3146 
3147  // The hole has not been created yet, but we want to put something
3148  // predictable in the gaps in the string table, so lets make that Smi zero.
3149  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3150 
3151  // Allocate initial string table.
3152  { MaybeObject* maybe_obj =
3153  StringTable::Allocate(this, kInitialStringTableSize);
3154  if (!maybe_obj->ToObject(&obj)) return false;
3155  }
3156  // Don't use set_string_table() due to asserts.
3157  roots_[kStringTableRootIndex] = obj;
3158 
3159  // Finish initializing oddballs after creating the string table.
3160  { MaybeObject* maybe_obj =
3161  undefined_value()->Initialize(this,
3162  "undefined",
3163  nan_value(),
3165  if (!maybe_obj->ToObject(&obj)) return false;
3166  }
3167 
3168  // Initialize the null_value.
3169  { MaybeObject* maybe_obj = null_value()->Initialize(
3170  this, "null", Smi::FromInt(0), Oddball::kNull);
3171  if (!maybe_obj->ToObject(&obj)) return false;
3172  }
3173 
3174  { MaybeObject* maybe_obj = CreateOddball("true",
3175  Smi::FromInt(1),
3176  Oddball::kTrue);
3177  if (!maybe_obj->ToObject(&obj)) return false;
3178  }
3179  set_true_value(Oddball::cast(obj));
3180 
3181  { MaybeObject* maybe_obj = CreateOddball("false",
3182  Smi::FromInt(0),
3183  Oddball::kFalse);
3184  if (!maybe_obj->ToObject(&obj)) return false;
3185  }
3186  set_false_value(Oddball::cast(obj));
3187 
3188  { MaybeObject* maybe_obj = CreateOddball("hole",
3189  Smi::FromInt(-1),
3191  if (!maybe_obj->ToObject(&obj)) return false;
3192  }
3193  set_the_hole_value(Oddball::cast(obj));
3194 
3195  { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3196  Smi::FromInt(-1),
3198  if (!maybe_obj->ToObject(&obj)) return false;
3199  }
3200  set_uninitialized_value(Oddball::cast(obj));
3201 
3202  { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3203  Smi::FromInt(-4),
3205  if (!maybe_obj->ToObject(&obj)) return false;
3206  }
3207  set_arguments_marker(Oddball::cast(obj));
3208 
3209  { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3210  Smi::FromInt(-2),
3211  Oddball::kOther);
3212  if (!maybe_obj->ToObject(&obj)) return false;
3213  }
3214  set_no_interceptor_result_sentinel(obj);
3215 
3216  { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3217  Smi::FromInt(-3),
3218  Oddball::kOther);
3219  if (!maybe_obj->ToObject(&obj)) return false;
3220  }
3221  set_termination_exception(obj);
3222 
3223  for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3224  { MaybeObject* maybe_obj =
3225  InternalizeUtf8String(constant_string_table[i].contents);
3226  if (!maybe_obj->ToObject(&obj)) return false;
3227  }
3228  roots_[constant_string_table[i].index] = String::cast(obj);
3229  }
3230 
3231  // Allocate the hidden string which is used to identify the hidden properties
3232  // in JSObjects. The hash code has a special value so that it will not match
3233  // the empty string when searching for the property. It cannot be part of the
3234  // loop above because it needs to be allocated manually with the special
3235  // hash code in place. The hash code for the hidden_string is zero to ensure
3236  // that it will always be at the first entry in property descriptors.
3237  { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3239  if (!maybe_obj->ToObject(&obj)) return false;
3240  }
3241  hidden_string_ = String::cast(obj);
3242 
3243  // Allocate the code_stubs dictionary. The initial size is set to avoid
3244  // expanding the dictionary during bootstrapping.
3245  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3246  if (!maybe_obj->ToObject(&obj)) return false;
3247  }
3248  set_code_stubs(UnseededNumberDictionary::cast(obj));
3249 
3250 
3251  // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3252  // is set to avoid expanding the dictionary during bootstrapping.
3253  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3254  if (!maybe_obj->ToObject(&obj)) return false;
3255  }
3256  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3257 
3258  { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3259  if (!maybe_obj->ToObject(&obj)) return false;
3260  }
3261  set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3262 
3263  set_instanceof_cache_function(Smi::FromInt(0));
3264  set_instanceof_cache_map(Smi::FromInt(0));
3265  set_instanceof_cache_answer(Smi::FromInt(0));
3266 
3267  CreateFixedStubs();
3268 
3269  // Allocate the dictionary of intrinsic function names.
3270  { MaybeObject* maybe_obj =
3272  if (!maybe_obj->ToObject(&obj)) return false;
3273  }
3274  { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3275  obj);
3276  if (!maybe_obj->ToObject(&obj)) return false;
3277  }
3278  set_intrinsic_function_names(NameDictionary::cast(obj));
3279 
3280  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3281  if (!maybe_obj->ToObject(&obj)) return false;
3282  }
3283  set_number_string_cache(FixedArray::cast(obj));
3284 
3285  // Allocate cache for single character one byte strings.
3286  { MaybeObject* maybe_obj =
3288  if (!maybe_obj->ToObject(&obj)) return false;
3289  }
3290  set_single_character_string_cache(FixedArray::cast(obj));
3291 
3292  // Allocate cache for string split.
3293  { MaybeObject* maybe_obj = AllocateFixedArray(
3295  if (!maybe_obj->ToObject(&obj)) return false;
3296  }
3297  set_string_split_cache(FixedArray::cast(obj));
3298 
3299  { MaybeObject* maybe_obj = AllocateFixedArray(
3301  if (!maybe_obj->ToObject(&obj)) return false;
3302  }
3303  set_regexp_multiple_cache(FixedArray::cast(obj));
3304 
3305  // Allocate cache for external strings pointing to native source code.
3306  { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3307  if (!maybe_obj->ToObject(&obj)) return false;
3308  }
3309  set_natives_source_cache(FixedArray::cast(obj));
3310 
3311  { MaybeObject* maybe_obj = AllocateCell(undefined_value());
3312  if (!maybe_obj->ToObject(&obj)) return false;
3313  }
3314  set_undefined_cell(Cell::cast(obj));
3315 
3316  // The symbol registry is initialized lazily.
3317  set_symbol_registry(undefined_value());
3318 
3319  // Allocate object to hold object observation state.
3320  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3321  if (!maybe_obj->ToObject(&obj)) return false;
3322  }
3323  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3324  if (!maybe_obj->ToObject(&obj)) return false;
3325  }
3326  set_observation_state(JSObject::cast(obj));
3327 
3328  // Allocate object to hold object microtask state.
3329  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3330  if (!maybe_obj->ToObject(&obj)) return false;
3331  }
3332  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3333  if (!maybe_obj->ToObject(&obj)) return false;
3334  }
3335  set_microtask_state(JSObject::cast(obj));
3336 
3337  { MaybeObject* maybe_obj = AllocateSymbol();
3338  if (!maybe_obj->ToObject(&obj)) return false;
3339  }
3340  Symbol::cast(obj)->set_is_private(true);
3341  set_frozen_symbol(Symbol::cast(obj));
3342 
3343  { MaybeObject* maybe_obj = AllocateSymbol();
3344  if (!maybe_obj->ToObject(&obj)) return false;
3345  }
3346  Symbol::cast(obj)->set_is_private(true);
3347  set_nonexistent_symbol(Symbol::cast(obj));
3348 
3349  { MaybeObject* maybe_obj = AllocateSymbol();
3350  if (!maybe_obj->ToObject(&obj)) return false;
3351  }
3352  Symbol::cast(obj)->set_is_private(true);
3353  set_elements_transition_symbol(Symbol::cast(obj));
3354 
3355  { MaybeObject* maybe_obj = AllocateSymbol();
3356  if (!maybe_obj->ToObject(&obj)) return false;
3357  }
3358  Symbol::cast(obj)->set_is_private(true);
3359  set_uninitialized_symbol(Symbol::cast(obj));
3360 
3361  { MaybeObject* maybe_obj = AllocateSymbol();
3362  if (!maybe_obj->ToObject(&obj)) return false;
3363  }
3364  Symbol::cast(obj)->set_is_private(true);
3365  set_megamorphic_symbol(Symbol::cast(obj));
3366 
3367  { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3368  if (!maybe_obj->ToObject(&obj)) return false;
3369  }
3371  set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3372 
3373  { MaybeObject* maybe_obj = AllocateSymbol();
3374  if (!maybe_obj->ToObject(&obj)) return false;
3375  }
3376  Symbol::cast(obj)->set_is_private(true);
3377  set_observed_symbol(Symbol::cast(obj));
3378 
3379  { MaybeObject* maybe_obj = AllocateFixedArray(0, TENURED);
3380  if (!maybe_obj->ToObject(&obj)) return false;
3381  }
3382  set_materialized_objects(FixedArray::cast(obj));
3383 
3384  // Handling of script id generation is in Factory::NewScript.
3385  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
3386 
3387  { MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
3388  if (!maybe_obj->ToObject(&obj)) return false;
3389  }
3390  set_allocation_sites_scratchpad(FixedArray::cast(obj));
3391  InitializeAllocationSitesScratchpad();
3392 
3393  // Initialize keyed lookup cache.
3394  isolate_->keyed_lookup_cache()->Clear();
3395 
3396  // Initialize context slot cache.
3397  isolate_->context_slot_cache()->Clear();
3398 
3399  // Initialize descriptor cache.
3400  isolate_->descriptor_lookup_cache()->Clear();
3401 
3402  // Initialize compilation cache.
3403  isolate_->compilation_cache()->Clear();
3404 
3405  return true;
3406 }
3407 
3408 
3410  RootListIndex writable_roots[] = {
3411  kStoreBufferTopRootIndex,
3412  kStackLimitRootIndex,
3413  kNumberStringCacheRootIndex,
3414  kInstanceofCacheFunctionRootIndex,
3415  kInstanceofCacheMapRootIndex,
3416  kInstanceofCacheAnswerRootIndex,
3417  kCodeStubsRootIndex,
3418  kNonMonomorphicCacheRootIndex,
3419  kPolymorphicCodeCacheRootIndex,
3420  kLastScriptIdRootIndex,
3421  kEmptyScriptRootIndex,
3422  kRealStackLimitRootIndex,
3423  kArgumentsAdaptorDeoptPCOffsetRootIndex,
3424  kConstructStubDeoptPCOffsetRootIndex,
3425  kGetterStubDeoptPCOffsetRootIndex,
3426  kSetterStubDeoptPCOffsetRootIndex,
3428  };
3429 
3430  for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3431  if (root_index == writable_roots[i])
3432  return true;
3433  }
3434  return false;
3435 }
3436 
3437 
3439  return !RootCanBeWrittenAfterInitialization(root_index) &&
3440  !InNewSpace(roots_array_start()[root_index]);
3441 }
3442 
3443 
3445  String* key_string,
3446  Object* key_pattern,
3447  ResultsCacheType type) {
3448  FixedArray* cache;
3449  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3450  if (type == STRING_SPLIT_SUBSTRINGS) {
3451  ASSERT(key_pattern->IsString());
3452  if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3453  cache = heap->string_split_cache();
3454  } else {
3456  ASSERT(key_pattern->IsFixedArray());
3457  cache = heap->regexp_multiple_cache();
3458  }
3459 
3460  uint32_t hash = key_string->Hash();
3461  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3462  ~(kArrayEntriesPerCacheEntry - 1));
3463  if (cache->get(index + kStringOffset) == key_string &&
3464  cache->get(index + kPatternOffset) == key_pattern) {
3465  return cache->get(index + kArrayOffset);
3466  }
3467  index =
3468  ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3469  if (cache->get(index + kStringOffset) == key_string &&
3470  cache->get(index + kPatternOffset) == key_pattern) {
3471  return cache->get(index + kArrayOffset);
3472  }
3473  return Smi::FromInt(0);
3474 }
3475 
3476 
3478  String* key_string,
3479  Object* key_pattern,
3480  FixedArray* value_array,
3481  ResultsCacheType type) {
3482  FixedArray* cache;
3483  if (!key_string->IsInternalizedString()) return;
3484  if (type == STRING_SPLIT_SUBSTRINGS) {
3485  ASSERT(key_pattern->IsString());
3486  if (!key_pattern->IsInternalizedString()) return;
3487  cache = heap->string_split_cache();
3488  } else {
3490  ASSERT(key_pattern->IsFixedArray());
3491  cache = heap->regexp_multiple_cache();
3492  }
3493 
3494  uint32_t hash = key_string->Hash();
3495  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3496  ~(kArrayEntriesPerCacheEntry - 1));
3497  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3498  cache->set(index + kStringOffset, key_string);
3499  cache->set(index + kPatternOffset, key_pattern);
3500  cache->set(index + kArrayOffset, value_array);
3501  } else {
3502  uint32_t index2 =
3503  ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3504  if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3505  cache->set(index2 + kStringOffset, key_string);
3506  cache->set(index2 + kPatternOffset, key_pattern);
3507  cache->set(index2 + kArrayOffset, value_array);
3508  } else {
3509  cache->set(index2 + kStringOffset, Smi::FromInt(0));
3510  cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3511  cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3512  cache->set(index + kStringOffset, key_string);
3513  cache->set(index + kPatternOffset, key_pattern);
3514  cache->set(index + kArrayOffset, value_array);
3515  }
3516  }
3517  // If the array is a reasonably short list of substrings, convert it into a
3518  // list of internalized strings.
3519  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3520  for (int i = 0; i < value_array->length(); i++) {
3521  String* str = String::cast(value_array->get(i));
3522  Object* internalized_str;
3523  MaybeObject* maybe_string = heap->InternalizeString(str);
3524  if (maybe_string->ToObject(&internalized_str)) {
3525  value_array->set(i, internalized_str);
3526  }
3527  }
3528  }
3529  // Convert backing store to a copy-on-write array.
3530  value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3531 }
3532 
3533 
3535  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3536  cache->set(i, Smi::FromInt(0));
3537  }
3538 }
3539 
3540 
3541 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3542  MaybeObject* maybe_obj =
3543  AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3544  return maybe_obj;
3545 }
3546 
3547 
3548 int Heap::FullSizeNumberStringCacheLength() {
3549  // Compute the size of the number string cache based on the max newspace size.
3550  // The number string cache has a minimum size based on twice the initial cache
3551  // size to ensure that it is bigger after being made 'full size'.
3552  int number_string_cache_size = max_semispace_size_ / 512;
3553  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3554  Min(0x4000, number_string_cache_size));
3555  // There is a string and a number per entry so the length is twice the number
3556  // of entries.
3557  return number_string_cache_size * 2;
3558 }
3559 
3560 
3561 void Heap::AllocateFullSizeNumberStringCache() {
3562  // The idea is to have a small number string cache in the snapshot to keep
3563  // boot-time memory usage down. If we expand the number string cache already
3564  // while creating the snapshot then that didn't work out.
3565  ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3566  MaybeObject* maybe_obj =
3567  AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3568  Object* new_cache;
3569  if (maybe_obj->ToObject(&new_cache)) {
3570  // We don't bother to repopulate the cache with entries from the old cache.
3571  // It will be repopulated soon enough with new strings.
3572  set_number_string_cache(FixedArray::cast(new_cache));
3573  }
3574  // If allocation fails then we just return without doing anything. It is only
3575  // a cache, so best effort is OK here.
3576 }
3577 
3578 
3579 void Heap::FlushNumberStringCache() {
3580  // Flush the number to string cache.
3581  int len = number_string_cache()->length();
3582  for (int i = 0; i < len; i++) {
3583  number_string_cache()->set_undefined(i);
3584  }
3585 }
3586 
3587 
3588 static inline int double_get_hash(double d) {
3589  DoubleRepresentation rep(d);
3590  return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3591 }
3592 
3593 
3594 static inline int smi_get_hash(Smi* smi) {
3595  return smi->value();
3596 }
3597 
3598 
3600  int hash;
3601  int mask = (number_string_cache()->length() >> 1) - 1;
3602  if (number->IsSmi()) {
3603  hash = smi_get_hash(Smi::cast(number)) & mask;
3604  } else {
3605  hash = double_get_hash(number->Number()) & mask;
3606  }
3607  Object* key = number_string_cache()->get(hash * 2);
3608  if (key == number) {
3609  return String::cast(number_string_cache()->get(hash * 2 + 1));
3610  } else if (key->IsHeapNumber() &&
3611  number->IsHeapNumber() &&
3612  key->Number() == number->Number()) {
3613  return String::cast(number_string_cache()->get(hash * 2 + 1));
3614  }
3615  return undefined_value();
3616 }
3617 
3618 
3619 void Heap::SetNumberStringCache(Object* number, String* string) {
3620  int hash;
3621  int mask = (number_string_cache()->length() >> 1) - 1;
3622  if (number->IsSmi()) {
3623  hash = smi_get_hash(Smi::cast(number)) & mask;
3624  } else {
3625  hash = double_get_hash(number->Number()) & mask;
3626  }
3627  if (number_string_cache()->get(hash * 2) != undefined_value() &&
3628  number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3629  // The first time we have a hash collision, we move to the full sized
3630  // number string cache.
3631  AllocateFullSizeNumberStringCache();
3632  return;
3633  }
3634  number_string_cache()->set(hash * 2, number);
3635  number_string_cache()->set(hash * 2 + 1, string);
3636 }
3637 
3638 
3639 MaybeObject* Heap::NumberToString(Object* number,
3640  bool check_number_string_cache) {
3641  isolate_->counters()->number_to_string_runtime()->Increment();
3642  if (check_number_string_cache) {
3643  Object* cached = GetNumberStringCache(number);
3644  if (cached != undefined_value()) {
3645  return cached;
3646  }
3647  }
3648 
3649  char arr[100];
3650  Vector<char> buffer(arr, ARRAY_SIZE(arr));
3651  const char* str;
3652  if (number->IsSmi()) {
3653  int num = Smi::cast(number)->value();
3654  str = IntToCString(num, buffer);
3655  } else {
3656  double num = HeapNumber::cast(number)->value();
3657  str = DoubleToCString(num, buffer);
3658  }
3659 
3660  Object* js_string;
3661 
3662  // We tenure the allocated string since it is referenced from the
3663  // number-string cache which lives in the old space.
3664  MaybeObject* maybe_js_string =
3666  if (maybe_js_string->ToObject(&js_string)) {
3667  SetNumberStringCache(number, String::cast(js_string));
3668  }
3669  return maybe_js_string;
3670 }
3671 
3672 
3673 MaybeObject* Heap::Uint32ToString(uint32_t value,
3674  bool check_number_string_cache) {
3675  Object* number;
3676  MaybeObject* maybe = NumberFromUint32(value);
3677  if (!maybe->To<Object>(&number)) return maybe;
3678  return NumberToString(number, check_number_string_cache);
3679 }
3680 
3681 
3682 MaybeObject* Heap::AllocateAllocationSitesScratchpad() {
3683  MaybeObject* maybe_obj =
3684  AllocateFixedArray(kAllocationSiteScratchpadSize, TENURED);
3685  return maybe_obj;
3686 }
3687 
3688 
3689 void Heap::FlushAllocationSitesScratchpad() {
3690  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3691  allocation_sites_scratchpad()->set_undefined(i);
3692  }
3693  allocation_sites_scratchpad_length_ = 0;
3694 }
3695 
3696 
3697 void Heap::InitializeAllocationSitesScratchpad() {
3698  ASSERT(allocation_sites_scratchpad()->length() ==
3699  kAllocationSiteScratchpadSize);
3700  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3701  allocation_sites_scratchpad()->set_undefined(i);
3702  }
3703 }
3704 
3705 
3706 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3707  ScratchpadSlotMode mode) {
3708  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3709  // We cannot use the normal write-barrier because slots need to be
3710  // recorded with non-incremental marking as well. We have to explicitly
3711  // record the slot to take evacuation candidates into account.
3712  allocation_sites_scratchpad()->set(
3713  allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
3714  Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3715  allocation_sites_scratchpad_length_);
3716 
3717  if (mode == RECORD_SCRATCHPAD_SLOT) {
3718  // We need to allow slots buffer overflow here since the evacuation
3719  // candidates are not part of the global list of old space pages and
3720  // releasing an evacuation candidate due to a slots buffer overflow
3721  // results in lost pages.
3722  mark_compact_collector()->RecordSlot(
3723  slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
3724  }
3725  allocation_sites_scratchpad_length_++;
3726  }
3727 }
3728 
3729 
3731  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3732 }
3733 
3734 
3736  ExternalArrayType array_type) {
3737  switch (array_type) {
3738 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3739  case kExternal##Type##Array: \
3740  return kExternal##Type##ArrayMapRootIndex;
3741 
3743 #undef ARRAY_TYPE_TO_ROOT_INDEX
3744 
3745  default:
3746  UNREACHABLE();
3747  return kUndefinedValueRootIndex;
3748  }
3749 }
3750 
3751 
3753  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3754 }
3755 
3756 
3758  ExternalArrayType array_type) {
3759  switch (array_type) {
3760 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3761  case kExternal##Type##Array: \
3762  return kFixed##Type##ArrayMapRootIndex;
3763 
3765 #undef ARRAY_TYPE_TO_ROOT_INDEX
3766 
3767  default:
3768  UNREACHABLE();
3769  return kUndefinedValueRootIndex;
3770  }
3771 }
3772 
3773 
3775  ElementsKind elementsKind) {
3776  switch (elementsKind) {
3777 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3778  case EXTERNAL_##TYPE##_ELEMENTS: \
3779  return kEmptyExternal##Type##ArrayRootIndex;
3780 
3782 #undef ELEMENT_KIND_TO_ROOT_INDEX
3783 
3784  default:
3785  UNREACHABLE();
3786  return kUndefinedValueRootIndex;
3787  }
3788 }
3789 
3790 
3792  ElementsKind elementsKind) {
3793  switch (elementsKind) {
3794 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3795  case TYPE##_ELEMENTS: \
3796  return kEmptyFixed##Type##ArrayRootIndex;
3797 
3799 #undef ELEMENT_KIND_TO_ROOT_INDEX
3800  default:
3801  UNREACHABLE();
3802  return kUndefinedValueRootIndex;
3803  }
3804 }
3805 
3806 
3808  return ExternalArray::cast(
3810 }
3811 
3812 
3816 }
3817 
3818 
3819 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3820  // We need to distinguish the minus zero value and this cannot be
3821  // done after conversion to int. Doing this by comparing bit
3822  // patterns is faster than using fpclassify() et al.
3823  if (IsMinusZero(value)) {
3824  return AllocateHeapNumber(-0.0, pretenure);
3825  }
3826 
3827  int int_value = FastD2I(value);
3828  if (value == int_value && Smi::IsValid(int_value)) {
3829  return Smi::FromInt(int_value);
3830  }
3831 
3832  // Materialize the value in the heap.
3833  return AllocateHeapNumber(value, pretenure);
3834 }
3835 
3836 
3837 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3838  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3840  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3841  Foreign* result;
3842  MaybeObject* maybe_result = Allocate(foreign_map(), space);
3843  if (!maybe_result->To(&result)) return maybe_result;
3844  result->set_foreign_address(address);
3845  return result;
3846 }
3847 
3848 
3850  SharedFunctionInfo* share;
3851  MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3852  if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3853 
3854  // Set pointer fields.
3855  share->set_name(name);
3856  Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3857  share->set_code(illegal);
3858  share->set_optimized_code_map(Smi::FromInt(0));
3859  share->set_scope_info(ScopeInfo::Empty(isolate_));
3860  Code* construct_stub =
3861  isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3862  share->set_construct_stub(construct_stub);
3863  share->set_instance_class_name(Object_string());
3864  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3865  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3866  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3867  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3868  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3869  share->set_ast_node_count(0);
3870  share->set_counters(0);
3871 
3872  // Set integer fields (smi or int, depending on the architecture).
3873  share->set_length(0);
3874  share->set_formal_parameter_count(0);
3875  share->set_expected_nof_properties(0);
3876  share->set_num_literals(0);
3877  share->set_start_position_and_type(0);
3878  share->set_end_position(0);
3879  share->set_function_token_position(0);
3880  // All compiler hints default to false or 0.
3881  share->set_compiler_hints(0);
3883 
3884  return share;
3885 }
3886 
3887 
3889  JSArray* arguments,
3890  int start_position,
3891  int end_position,
3892  Object* script,
3893  Object* stack_frames) {
3894  Object* result;
3895  { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3896  if (!maybe_result->ToObject(&result)) return maybe_result;
3897  }
3899  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3900  message->initialize_elements();
3901  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3902  message->set_type(type);
3903  message->set_arguments(arguments);
3904  message->set_start_position(start_position);
3905  message->set_end_position(end_position);
3906  message->set_script(script);
3907  message->set_stack_frames(stack_frames);
3908  return result;
3909 }
3910 
3911 
3913  const ExternalAsciiString::Resource* resource) {
3914  size_t length = resource->length();
3915  if (length > static_cast<size_t>(String::kMaxLength)) {
3916  return isolate()->ThrowInvalidStringLength();
3917  }
3918 
3919  Map* map = external_ascii_string_map();
3920  Object* result;
3921  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3922  if (!maybe_result->ToObject(&result)) return maybe_result;
3923  }
3924 
3925  ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3926  external_string->set_length(static_cast<int>(length));
3927  external_string->set_hash_field(String::kEmptyHashField);
3928  external_string->set_resource(resource);
3929 
3930  return result;
3931 }
3932 
3933 
3935  const ExternalTwoByteString::Resource* resource) {
3936  size_t length = resource->length();
3937  if (length > static_cast<size_t>(String::kMaxLength)) {
3938  return isolate()->ThrowInvalidStringLength();
3939  }
3940 
3941  // For small strings we check whether the resource contains only
3942  // one byte characters. If yes, we use a different string map.
3943  static const size_t kOneByteCheckLengthLimit = 32;
3944  bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3945  String::IsOneByte(resource->data(), static_cast<int>(length));
3946  Map* map = is_one_byte ?
3947  external_string_with_one_byte_data_map() : external_string_map();
3948  Object* result;
3949  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3950  if (!maybe_result->ToObject(&result)) return maybe_result;
3951  }
3952 
3953  ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3954  external_string->set_length(static_cast<int>(length));
3955  external_string->set_hash_field(String::kEmptyHashField);
3956  external_string->set_resource(resource);
3957 
3958  return result;
3959 }
3960 
3961 
3963  if (code <= String::kMaxOneByteCharCode) {
3964  Object* value = single_character_string_cache()->get(code);
3965  if (value != undefined_value()) return value;
3966 
3967  uint8_t buffer[1];
3968  buffer[0] = static_cast<uint8_t>(code);
3969  Object* result;
3970  OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
3971  MaybeObject* maybe_result = InternalizeStringWithKey(&key);
3972 
3973  if (!maybe_result->ToObject(&result)) return maybe_result;
3974  single_character_string_cache()->set(code, result);
3975  return result;
3976  }
3977 
3978  SeqTwoByteString* result;
3979  { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3980  if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
3981  }
3982  result->SeqTwoByteStringSet(0, code);
3983  return result;
3984 }
3985 
3986 
3987 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3988  if (length < 0 || length > ByteArray::kMaxLength) {
3989  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3990  }
3991  int size = ByteArray::SizeFor(length);
3992  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3993  Object* result;
3994  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3995  if (!maybe_result->ToObject(&result)) return maybe_result;
3996  }
3997 
3998  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3999  byte_array_map());
4000  reinterpret_cast<ByteArray*>(result)->set_length(length);
4001  return result;
4002 }
4003 
4004 
4005 void Heap::CreateFillerObjectAt(Address addr, int size) {
4006  if (size == 0) return;
4007  HeapObject* filler = HeapObject::FromAddress(addr);
4008  if (size == kPointerSize) {
4009  filler->set_map_no_write_barrier(one_pointer_filler_map());
4010  } else if (size == 2 * kPointerSize) {
4011  filler->set_map_no_write_barrier(two_pointer_filler_map());
4012  } else {
4013  filler->set_map_no_write_barrier(free_space_map());
4014  FreeSpace::cast(filler)->set_size(size);
4015  }
4016 }
4017 
4018 
4020  Address address = object->address();
4021  bool is_in_old_pointer_space = InOldPointerSpace(address);
4022  bool is_in_old_data_space = InOldDataSpace(address);
4023 
4024  if (lo_space()->Contains(object)) return false;
4025 
4026  // We cannot move the object start if the given old space page is
4027  // concurrently swept.
4028  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
4029  Page::FromAddress(address)->parallel_sweeping() <=
4031 }
4032 
4033 
4034 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
4035  if (incremental_marking()->IsMarking() &&
4036  Marking::IsBlack(Marking::MarkBitFrom(address))) {
4037  if (mode == FROM_GC) {
4039  } else {
4041  }
4042  }
4043 }
4044 
4045 
4046 MaybeObject* Heap::AllocateExternalArray(int length,
4047  ExternalArrayType array_type,
4048  void* external_pointer,
4049  PretenureFlag pretenure) {
4050  int size = ExternalArray::kAlignedSize;
4051  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4052  Object* result;
4053  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4054  if (!maybe_result->ToObject(&result)) return maybe_result;
4055  }
4056 
4057  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4058  MapForExternalArrayType(array_type));
4059  reinterpret_cast<ExternalArray*>(result)->set_length(length);
4060  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4061  external_pointer);
4062 
4063  return result;
4064 }
4065 
4066 static void ForFixedTypedArray(ExternalArrayType array_type,
4067  int* element_size,
4068  ElementsKind* element_kind) {
4069  switch (array_type) {
4070 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
4071  case kExternal##Type##Array: \
4072  *element_size = size; \
4073  *element_kind = TYPE##_ELEMENTS; \
4074  return;
4075 
4077 #undef TYPED_ARRAY_CASE
4078 
4079  default:
4080  *element_size = 0; // Bogus
4081  *element_kind = UINT8_ELEMENTS; // Bogus
4082  UNREACHABLE();
4083  }
4084 }
4085 
4086 
4087 MaybeObject* Heap::AllocateFixedTypedArray(int length,
4088  ExternalArrayType array_type,
4089  PretenureFlag pretenure) {
4090  int element_size;
4091  ElementsKind elements_kind;
4092  ForFixedTypedArray(array_type, &element_size, &elements_kind);
4093  int size = OBJECT_POINTER_ALIGN(
4094  length * element_size + FixedTypedArrayBase::kDataOffset);
4095 #ifndef V8_HOST_ARCH_64_BIT
4096  if (array_type == kExternalFloat64Array) {
4097  size += kPointerSize;
4098  }
4099 #endif
4100  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4101 
4102  HeapObject* object;
4103  MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
4104  if (!maybe_object->To(&object)) return maybe_object;
4105 
4106  if (array_type == kExternalFloat64Array) {
4107  object = EnsureDoubleAligned(this, object, size);
4108  }
4109 
4110  FixedTypedArrayBase* elements =
4111  reinterpret_cast<FixedTypedArrayBase*>(object);
4112  elements->set_map(MapForFixedTypedArray(array_type));
4113  elements->set_length(length);
4114  memset(elements->DataPtr(), 0, elements->DataSize());
4115  return elements;
4116 }
4117 
4118 
4119 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4121  Handle<Object> self_reference,
4122  bool immovable,
4123  bool crankshafted,
4124  int prologue_offset) {
4125  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
4126  // do not risk leaving uninitialized Code object (and breaking the heap).
4127  ByteArray* reloc_info;
4128  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4129  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4130 
4131  ConstantPoolArray* constant_pool;
4132  if (FLAG_enable_ool_constant_pool) {
4133  MaybeObject* maybe_constant_pool = desc.origin->AllocateConstantPool(this);
4134  if (!maybe_constant_pool->To(&constant_pool)) return maybe_constant_pool;
4135  } else {
4136  constant_pool = empty_constant_pool_array();
4137  }
4138 
4139  // Compute size.
4140  int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4141  int obj_size = Code::SizeFor(body_size);
4142  ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4143  MaybeObject* maybe_result;
4144  // Large code objects and code objects which should stay at a fixed address
4145  // are allocated in large object space.
4146  HeapObject* result;
4147  bool force_lo_space = obj_size > code_space()->AreaSize();
4148  if (force_lo_space) {
4149  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4150  } else {
4151  maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4152  }
4153  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4154 
4155  if (immovable && !force_lo_space &&
4156  // Objects on the first page of each space are never moved.
4157  !code_space_->FirstPage()->Contains(result->address())) {
4158  // Discard the first code allocation, which was on a page where it could be
4159  // moved.
4160  CreateFillerObjectAt(result->address(), obj_size);
4161  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4162  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4163  }
4164 
4165  // Initialize the object
4166  result->set_map_no_write_barrier(code_map());
4167  Code* code = Code::cast(result);
4168  ASSERT(!isolate_->code_range()->exists() ||
4169  isolate_->code_range()->contains(code->address()));
4170  code->set_instruction_size(desc.instr_size);
4171  code->set_relocation_info(reloc_info);
4172  code->set_flags(flags);
4175  code->set_is_crankshafted(crankshafted);
4176  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4177  code->set_raw_type_feedback_info(undefined_value());
4178  code->set_next_code_link(undefined_value());
4179  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4180  code->set_gc_metadata(Smi::FromInt(0));
4181  code->set_ic_age(global_ic_age_);
4182  code->set_prologue_offset(prologue_offset);
4183  if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4184  code->set_marked_for_deoptimization(false);
4185  }
4186 
4187  if (FLAG_enable_ool_constant_pool) {
4188  desc.origin->PopulateConstantPool(constant_pool);
4189  }
4190  code->set_constant_pool(constant_pool);
4191 
4192 #ifdef ENABLE_DEBUGGER_SUPPORT
4193  if (code->kind() == Code::FUNCTION) {
4195  isolate_->debugger()->IsDebuggerActive());
4196  }
4197 #endif
4198 
4199  // Allow self references to created code object by patching the handle to
4200  // point to the newly allocated Code object.
4201  if (!self_reference.is_null()) {
4202  *(self_reference.location()) = code;
4203  }
4204  // Migrate generated code.
4205  // The generated code can contain Object** values (typically from handles)
4206  // that are dereferenced during the copy to point directly to the actual heap
4207  // objects. These pointers can include references to the code object itself,
4208  // through the self_reference parameter.
4209  code->CopyFrom(desc);
4210 
4211 #ifdef VERIFY_HEAP
4212  if (FLAG_verify_heap) {
4213  code->Verify();
4214  }
4215 #endif
4216  return code;
4217 }
4218 
4219 
4220 MaybeObject* Heap::CopyCode(Code* code) {
4221  MaybeObject* maybe_result;
4222  Object* new_constant_pool;
4223  if (FLAG_enable_ool_constant_pool &&
4224  code->constant_pool() != empty_constant_pool_array()) {
4225  // Copy the constant pool, since edits to the copied code may modify
4226  // the constant pool.
4227  maybe_result = CopyConstantPoolArray(code->constant_pool());
4228  if (!maybe_result->ToObject(&new_constant_pool)) return maybe_result;
4229  } else {
4230  new_constant_pool = empty_constant_pool_array();
4231  }
4232 
4233  // Allocate an object the same size as the code object.
4234  int obj_size = code->Size();
4235  if (obj_size > code_space()->AreaSize()) {
4236  maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4237  } else {
4238  maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4239  }
4240 
4241  Object* result;
4242  if (!maybe_result->ToObject(&result)) return maybe_result;
4243 
4244  // Copy code object.
4245  Address old_addr = code->address();
4246  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4247  CopyBlock(new_addr, old_addr, obj_size);
4248  Code* new_code = Code::cast(result);
4249 
4250  // Update the constant pool.
4251  new_code->set_constant_pool(new_constant_pool);
4252 
4253  // Relocate the copy.
4254  ASSERT(!isolate_->code_range()->exists() ||
4255  isolate_->code_range()->contains(code->address()));
4256  new_code->Relocate(new_addr - old_addr);
4257  return new_code;
4258 }
4259 
4260 
4261 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4262  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
4263  // do not risk leaving uninitialized Code object (and breaking the heap).
4264  Object* reloc_info_array;
4265  { MaybeObject* maybe_reloc_info_array =
4266  AllocateByteArray(reloc_info.length(), TENURED);
4267  if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4268  return maybe_reloc_info_array;
4269  }
4270  }
4271  Object* new_constant_pool;
4272  if (FLAG_enable_ool_constant_pool &&
4273  code->constant_pool() != empty_constant_pool_array()) {
4274  // Copy the constant pool, since edits to the copied code may modify
4275  // the constant pool.
4276  MaybeObject* maybe_constant_pool =
4278  if (!maybe_constant_pool->ToObject(&new_constant_pool))
4279  return maybe_constant_pool;
4280  } else {
4281  new_constant_pool = empty_constant_pool_array();
4282  }
4283 
4284  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4285 
4286  int new_obj_size = Code::SizeFor(new_body_size);
4287 
4288  Address old_addr = code->address();
4289 
4290  size_t relocation_offset =
4291  static_cast<size_t>(code->instruction_end() - old_addr);
4292 
4293  MaybeObject* maybe_result;
4294  if (new_obj_size > code_space()->AreaSize()) {
4295  maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4296  } else {
4297  maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4298  }
4299 
4300  Object* result;
4301  if (!maybe_result->ToObject(&result)) return maybe_result;
4302 
4303  // Copy code object.
4304  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4305 
4306  // Copy header and instructions.
4307  CopyBytes(new_addr, old_addr, relocation_offset);
4308 
4309  Code* new_code = Code::cast(result);
4310  new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4311 
4312  // Update constant pool.
4313  new_code->set_constant_pool(new_constant_pool);
4314 
4315  // Copy patched rinfo.
4316  CopyBytes(new_code->relocation_start(),
4317  reloc_info.start(),
4318  static_cast<size_t>(reloc_info.length()));
4319 
4320  // Relocate the copy.
4321  ASSERT(!isolate_->code_range()->exists() ||
4322  isolate_->code_range()->contains(code->address()));
4323  new_code->Relocate(new_addr - old_addr);
4324 
4325 #ifdef VERIFY_HEAP
4326  if (FLAG_verify_heap) {
4327  code->Verify();
4328  }
4329 #endif
4330  return new_code;
4331 }
4332 
4333 
4334 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4335  AllocationSite* allocation_site) {
4336  memento->set_map_no_write_barrier(allocation_memento_map());
4337  ASSERT(allocation_site->map() == allocation_site_map());
4338  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4339  if (FLAG_allocation_site_pretenuring) {
4340  allocation_site->IncrementMementoCreateCount();
4341  }
4342 }
4343 
4344 
4345 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space,
4346  AllocationSite* allocation_site) {
4347  ASSERT(gc_state_ == NOT_IN_GC);
4348  ASSERT(map->instance_type() != MAP_TYPE);
4349  // If allocation failures are disallowed, we may allocate in a different
4350  // space when new space is full and the object is not a large object.
4351  AllocationSpace retry_space =
4352  (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4353  int size = map->instance_size();
4354  if (allocation_site != NULL) {
4355  size += AllocationMemento::kSize;
4356  }
4357  Object* result;
4358  MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4359  if (!maybe_result->ToObject(&result)) return maybe_result;
4360  // No need for write barrier since object is white and map is in old space.
4362  if (allocation_site != NULL) {
4363  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4364  reinterpret_cast<Address>(result) + map->instance_size());
4365  InitializeAllocationMemento(alloc_memento, allocation_site);
4366  }
4367  return result;
4368 }
4369 
4370 
4371 void Heap::InitializeFunction(JSFunction* function,
4372  SharedFunctionInfo* shared,
4373  Object* prototype) {
4374  ASSERT(!prototype->IsMap());
4375  function->initialize_properties();
4376  function->initialize_elements();
4377  function->set_shared(shared);
4378  function->set_code(shared->code());
4379  function->set_prototype_or_initial_map(prototype);
4380  function->set_context(undefined_value());
4381  function->set_literals_or_bindings(empty_fixed_array());
4382  function->set_next_function_link(undefined_value());
4383 }
4384 
4385 
4386 MaybeObject* Heap::AllocateFunction(Map* function_map,
4387  SharedFunctionInfo* shared,
4388  Object* prototype,
4389  PretenureFlag pretenure) {
4390  AllocationSpace space =
4391  (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4392  Object* result;
4393  { MaybeObject* maybe_result = Allocate(function_map, space);
4394  if (!maybe_result->ToObject(&result)) return maybe_result;
4395  }
4396  InitializeFunction(JSFunction::cast(result), shared, prototype);
4397  return result;
4398 }
4399 
4400 
4401 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4402  // To get fast allocation and map sharing for arguments objects we
4403  // allocate them based on an arguments boilerplate.
4404 
4405  JSObject* boilerplate;
4406  int arguments_object_size;
4407  bool strict_mode_callee = callee->IsJSFunction() &&
4408  JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
4409  if (strict_mode_callee) {
4410  boilerplate =
4411  isolate()->context()->native_context()->strict_arguments_boilerplate();
4412  arguments_object_size = kStrictArgumentsObjectSize;
4413  } else {
4414  boilerplate =
4415  isolate()->context()->native_context()->sloppy_arguments_boilerplate();
4416  arguments_object_size = kSloppyArgumentsObjectSize;
4417  }
4418 
4419  // Check that the size of the boilerplate matches our
4420  // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4421  // on the size being a known constant.
4422  ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4423 
4424  // Do the allocation.
4425  Object* result;
4426  { MaybeObject* maybe_result =
4427  AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4428  if (!maybe_result->ToObject(&result)) return maybe_result;
4429  }
4430 
4431  // Copy the content. The arguments boilerplate doesn't have any
4432  // fields that point to new space so it's safe to skip the write
4433  // barrier here.
4434  CopyBlock(HeapObject::cast(result)->address(),
4435  boilerplate->address(),
4437 
4438  // Set the length property.
4440  Smi::FromInt(length),
4442  // Set the callee property for sloppy mode arguments object only.
4443  if (!strict_mode_callee) {
4445  callee);
4446  }
4447 
4448  // Check the state of the object
4449  ASSERT(JSObject::cast(result)->HasFastProperties());
4450  ASSERT(JSObject::cast(result)->HasFastObjectElements());
4451 
4452  return result;
4453 }
4454 
4455 
4456 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4457  FixedArray* properties,
4458  Map* map) {
4459  obj->set_properties(properties);
4460  obj->initialize_elements();
4461  // TODO(1240798): Initialize the object's body using valid initial values
4462  // according to the object's initial map. For example, if the map's
4463  // instance type is JS_ARRAY_TYPE, the length field should be initialized
4464  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4465  // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
4466  // verification code has to cope with (temporarily) invalid objects. See
4467  // for example, JSArray::JSArrayVerify).
4468  Object* filler;
4469  // We cannot always fill with one_pointer_filler_map because objects
4470  // created from API functions expect their internal fields to be initialized
4471  // with undefined_value.
4472  // Pre-allocated fields need to be initialized with undefined_value as well
4473  // so that object accesses before the constructor completes (e.g. in the
4474  // debugger) will not cause a crash.
4475  if (map->constructor()->IsJSFunction() &&
4476  JSFunction::cast(map->constructor())->shared()->
4477  IsInobjectSlackTrackingInProgress()) {
4478  // We might want to shrink the object later.
4479  ASSERT(obj->GetInternalFieldCount() == 0);
4480  filler = Heap::one_pointer_filler_map();
4481  } else {
4482  filler = Heap::undefined_value();
4483  }
4484  obj->InitializeBody(map, Heap::undefined_value(), filler);
4485 }
4486 
4487 
4489  Map* map,
4490  PretenureFlag pretenure,
4491  bool allocate_properties,
4492  AllocationSite* allocation_site) {
4493  // JSFunctions should be allocated using AllocateFunction to be
4494  // properly initialized.
4496 
4497  // Both types of global objects should be allocated using
4498  // AllocateGlobalObject to be properly initialized.
4501 
4502  // Allocate the backing storage for the properties.
4503  FixedArray* properties;
4504  if (allocate_properties) {
4505  int prop_size = map->InitialPropertiesLength();
4506  ASSERT(prop_size >= 0);
4507  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4508  if (!maybe_properties->To(&properties)) return maybe_properties;
4509  }
4510  } else {
4511  properties = empty_fixed_array();
4512  }
4513 
4514  // Allocate the JSObject.
4515  int size = map->instance_size();
4516  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4517  Object* obj;
4518  MaybeObject* maybe_obj = Allocate(map, space, allocation_site);
4519  if (!maybe_obj->To(&obj)) return maybe_obj;
4520 
4521  // Initialize the JSObject.
4522  InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4523  ASSERT(JSObject::cast(obj)->HasFastElements() ||
4524  JSObject::cast(obj)->HasExternalArrayElements() ||
4525  JSObject::cast(obj)->HasFixedTypedArrayElements());
4526  return obj;
4527 }
4528 
4529 
4530 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4531  PretenureFlag pretenure,
4532  AllocationSite* allocation_site) {
4533  ASSERT(constructor->has_initial_map());
4534 
4535  // Allocate the object based on the constructors initial map.
4536  MaybeObject* result = AllocateJSObjectFromMap(constructor->initial_map(),
4537  pretenure,
4538  true,
4539  allocation_site);
4540 #ifdef DEBUG
4541  // Make sure result is NOT a global object if valid.
4542  Object* non_failure;
4543  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4544 #endif
4545  return result;
4546 }
4547 
4548 
4549 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4550  // Allocate a fresh map. Modules do not have a prototype.
4551  Map* map;
4552  MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4553  if (!maybe_map->To(&map)) return maybe_map;
4554  // Allocate the object based on the map.
4555  JSModule* module;
4556  MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4557  if (!maybe_module->To(&module)) return maybe_module;
4558  module->set_context(context);
4559  module->set_scope_info(scope_info);
4560  return module;
4561 }
4562 
4563 
4565  ElementsKind elements_kind,
4566  int length,
4567  int capacity,
4569  PretenureFlag pretenure) {
4570  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4571  JSArray* array;
4572  if (!maybe_array->To(&array)) return maybe_array;
4573 
4574  // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4575  // for performance reasons.
4576  ASSERT(capacity >= length);
4577 
4578  if (capacity == 0) {
4579  array->set_length(Smi::FromInt(0));
4580  array->set_elements(empty_fixed_array());
4581  return array;
4582  }
4583 
4584  FixedArrayBase* elms;
4585  MaybeObject* maybe_elms = NULL;
4586  if (IsFastDoubleElementsKind(elements_kind)) {
4587  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4588  maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4589  } else {
4591  maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4592  }
4593  } else {
4594  ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4595  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4596  maybe_elms = AllocateUninitializedFixedArray(capacity);
4597  } else {
4599  maybe_elms = AllocateFixedArrayWithHoles(capacity);
4600  }
4601  }
4602  if (!maybe_elms->To(&elms)) return maybe_elms;
4603 
4604  array->set_elements(elms);
4605  array->set_length(Smi::FromInt(length));
4606  return array;
4607 }
4608 
4609 
4611  JSArray* array,
4612  int length,
4613  int capacity,
4615  ASSERT(capacity >= length);
4616 
4617  if (capacity == 0) {
4618  array->set_length(Smi::FromInt(0));
4619  array->set_elements(empty_fixed_array());
4620  return array;
4621  }
4622 
4623  FixedArrayBase* elms;
4624  MaybeObject* maybe_elms = NULL;
4625  ElementsKind elements_kind = array->GetElementsKind();
4626  if (IsFastDoubleElementsKind(elements_kind)) {
4627  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4628  maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4629  } else {
4631  maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4632  }
4633  } else {
4634  ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4635  if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4636  maybe_elms = AllocateUninitializedFixedArray(capacity);
4637  } else {
4639  maybe_elms = AllocateFixedArrayWithHoles(capacity);
4640  }
4641  }
4642  if (!maybe_elms->To(&elms)) return maybe_elms;
4643 
4644  array->set_elements(elms);
4645  array->set_length(Smi::FromInt(length));
4646  return array;
4647 }
4648 
4649 
4651  FixedArrayBase* elements,
4652  ElementsKind elements_kind,
4653  int length,
4654  PretenureFlag pretenure) {
4655  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4656  JSArray* array;
4657  if (!maybe_array->To(&array)) return maybe_array;
4658 
4659  array->set_elements(elements);
4660  array->set_length(Smi::FromInt(length));
4661  array->ValidateElements();
4662  return array;
4663 }
4664 
4665 
4666 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4667  // Allocate map.
4668  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4669  // maps. Will probably depend on the identity of the handler object, too.
4670  Map* map;
4671  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4672  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4673  map->set_prototype(prototype);
4674 
4675  // Allocate the proxy object.
4676  JSProxy* result;
4677  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4678  if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4679  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4680  result->set_handler(handler);
4681  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4682  return result;
4683 }
4684 
4685 
4687  Object* call_trap,
4688  Object* construct_trap,
4689  Object* prototype) {
4690  // Allocate map.
4691  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4692  // maps. Will probably depend on the identity of the handler object, too.
4693  Map* map;
4694  MaybeObject* maybe_map_obj =
4696  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4697  map->set_prototype(prototype);
4698 
4699  // Allocate the proxy object.
4700  JSFunctionProxy* result;
4701  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4702  if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4703  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4704  result->set_handler(handler);
4705  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4706  result->set_call_trap(call_trap);
4707  result->set_construct_trap(construct_trap);
4708  return result;
4709 }
4710 
4711 
4712 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4713  // Never used to copy functions. If functions need to be copied we
4714  // have to be careful to clear the literals array.
4715  SLOW_ASSERT(!source->IsJSFunction());
4716 
4717  // Make the clone.
4718  Map* map = source->map();
4719  int object_size = map->instance_size();
4720  Object* clone;
4721 
4722  ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4723 
4725 
4726  // If we're forced to always allocate, we use the general allocation
4727  // functions which may leave us with an object in old space.
4728  if (always_allocate()) {
4729  { MaybeObject* maybe_clone =
4730  AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4731  if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4732  }
4733  Address clone_address = HeapObject::cast(clone)->address();
4734  CopyBlock(clone_address,
4735  source->address(),
4736  object_size);
4737  // Update write barrier for all fields that lie beyond the header.
4738  RecordWrites(clone_address,
4740  (object_size - JSObject::kHeaderSize) / kPointerSize);
4741  } else {
4742  wb_mode = SKIP_WRITE_BARRIER;
4743 
4744  { int adjusted_object_size = site != NULL
4745  ? object_size + AllocationMemento::kSize
4746  : object_size;
4747  MaybeObject* maybe_clone =
4748  AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4749  if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4750  }
4751  SLOW_ASSERT(InNewSpace(clone));
4752  // Since we know the clone is allocated in new space, we can copy
4753  // the contents without worrying about updating the write barrier.
4754  CopyBlock(HeapObject::cast(clone)->address(),
4755  source->address(),
4756  object_size);
4757 
4758  if (site != NULL) {
4759  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4760  reinterpret_cast<Address>(clone) + object_size);
4761  InitializeAllocationMemento(alloc_memento, site);
4762  }
4763  }
4764 
4765  SLOW_ASSERT(
4766  JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4767  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4768  FixedArray* properties = FixedArray::cast(source->properties());
4769  // Update elements if necessary.
4770  if (elements->length() > 0) {
4771  Object* elem;
4772  { MaybeObject* maybe_elem;
4773  if (elements->map() == fixed_cow_array_map()) {
4774  maybe_elem = FixedArray::cast(elements);
4775  } else if (source->HasFastDoubleElements()) {
4776  maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4777  } else {
4778  maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4779  }
4780  if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4781  }
4782  JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4783  }
4784  // Update properties if necessary.
4785  if (properties->length() > 0) {
4786  Object* prop;
4787  { MaybeObject* maybe_prop = CopyFixedArray(properties);
4788  if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4789  }
4790  JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4791  }
4792  // Return the new clone.
4793  return clone;
4794 }
4795 
4796 
4798  JSReceiver* object, InstanceType type, int size) {
4799  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4800 
4801  // Allocate fresh map.
4802  // TODO(rossberg): Once we optimize proxies, cache these maps.
4803  Map* map;
4804  MaybeObject* maybe = AllocateMap(type, size);
4805  if (!maybe->To<Map>(&map)) return maybe;
4806 
4807  // Check that the receiver has at least the size of the fresh object.
4808  int size_difference = object->map()->instance_size() - map->instance_size();
4809  ASSERT(size_difference >= 0);
4810 
4811  map->set_prototype(object->map()->prototype());
4812 
4813  // Allocate the backing storage for the properties.
4814  int prop_size = map->unused_property_fields() - map->inobject_properties();
4815  Object* properties;
4816  maybe = AllocateFixedArray(prop_size, TENURED);
4817  if (!maybe->ToObject(&properties)) return maybe;
4818 
4819  // Functions require some allocation, which might fail here.
4820  SharedFunctionInfo* shared = NULL;
4821  if (type == JS_FUNCTION_TYPE) {
4822  String* name;
4823  OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
4824  HashSeed());
4825  maybe = InternalizeStringWithKey(&key);
4826  if (!maybe->To<String>(&name)) return maybe;
4827  maybe = AllocateSharedFunctionInfo(name);
4828  if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4829  }
4830 
4831  // Because of possible retries of this function after failure,
4832  // we must NOT fail after this point, where we have changed the type!
4833 
4834  // Reset the map for the object.
4835  object->set_map(map);
4836  JSObject* jsobj = JSObject::cast(object);
4837 
4838  // Reinitialize the object from the constructor map.
4839  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4840 
4841  // Functions require some minimal initialization.
4842  if (type == JS_FUNCTION_TYPE) {
4843  map->set_function_with_prototype(true);
4844  InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4845  JSFunction::cast(object)->set_context(
4846  isolate()->context()->native_context());
4847  }
4848 
4849  // Put in filler if the new object is smaller than the old.
4850  if (size_difference > 0) {
4852  object->address() + map->instance_size(), size_difference);
4853  }
4854 
4855  return object;
4856 }
4857 
4858 
4860  JSGlobalProxy* object) {
4861  ASSERT(constructor->has_initial_map());
4862  Map* map = constructor->initial_map();
4863 
4864  // Check that the already allocated object has the same size and type as
4865  // objects allocated using the constructor.
4866  ASSERT(map->instance_size() == object->map()->instance_size());
4867  ASSERT(map->instance_type() == object->map()->instance_type());
4868 
4869  // Allocate the backing storage for the properties.
4870  int prop_size = map->unused_property_fields() - map->inobject_properties();
4871  Object* properties;
4872  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4873  if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4874  }
4875 
4876  // Reset the map for the object.
4877  object->set_map(constructor->initial_map());
4878 
4879  // Reinitialize the object from the constructor map.
4880  InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4881  return object;
4882 }
4883 
4884 
4886  PretenureFlag pretenure) {
4887  int length = string.length();
4888  if (length == 1) {
4890  }
4891  Object* result;
4892  { MaybeObject* maybe_result =
4893  AllocateRawOneByteString(string.length(), pretenure);
4894  if (!maybe_result->ToObject(&result)) return maybe_result;
4895  }
4896 
4897  // Copy the characters into the new object.
4898  CopyChars(SeqOneByteString::cast(result)->GetChars(),
4899  string.start(),
4900  length);
4901  return result;
4902 }
4903 
4904 
4906  int non_ascii_start,
4907  PretenureFlag pretenure) {
4908  // Continue counting the number of characters in the UTF-8 string, starting
4909  // from the first non-ascii character or word.
4911  decoder(isolate_->unicode_cache()->utf8_decoder());
4912  decoder->Reset(string.start() + non_ascii_start,
4913  string.length() - non_ascii_start);
4914  int utf16_length = decoder->Utf16Length();
4915  ASSERT(utf16_length > 0);
4916  // Allocate string.
4917  Object* result;
4918  {
4919  int chars = non_ascii_start + utf16_length;
4920  MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4921  if (!maybe_result->ToObject(&result)) return maybe_result;
4922  }
4923  // Convert and copy the characters into the new object.
4924  SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4925  // Copy ascii portion.
4926  uint16_t* data = twobyte->GetChars();
4927  if (non_ascii_start != 0) {
4928  const char* ascii_data = string.start();
4929  for (int i = 0; i < non_ascii_start; i++) {
4930  *data++ = *ascii_data++;
4931  }
4932  }
4933  // Now write the remainder.
4934  decoder->WriteUtf16(data, utf16_length);
4935  return result;
4936 }
4937 
4938 
4940  PretenureFlag pretenure) {
4941  // Check if the string is an ASCII string.
4942  Object* result;
4943  int length = string.length();
4944  const uc16* start = string.start();
4945 
4946  if (String::IsOneByte(start, length)) {
4947  MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4948  if (!maybe_result->ToObject(&result)) return maybe_result;
4949  CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4950  } else { // It's not a one byte string.
4951  MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4952  if (!maybe_result->ToObject(&result)) return maybe_result;
4953  CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4954  }
4955  return result;
4956 }
4957 
4958 
4960  // If the string is in new space it cannot be used as internalized.
4961  if (InNewSpace(string)) return NULL;
4962 
4963  // Find the corresponding internalized string map for strings.
4964  switch (string->map()->instance_type()) {
4965  case STRING_TYPE: return internalized_string_map();
4966  case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4967  case CONS_STRING_TYPE: return cons_internalized_string_map();
4968  case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4969  case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4971  return external_ascii_internalized_string_map();
4973  return external_internalized_string_with_one_byte_data_map();
4975  return short_external_internalized_string_map();
4977  return short_external_ascii_internalized_string_map();
4979  return short_external_internalized_string_with_one_byte_data_map();
4980  default: return NULL; // No match found.
4981  }
4982 }
4983 
4984 
4985 static inline void WriteOneByteData(Vector<const char> vector,
4986  uint8_t* chars,
4987  int len) {
4988  // Only works for ascii.
4989  ASSERT(vector.length() == len);
4990  OS::MemCopy(chars, vector.start(), len);
4991 }
4992 
4993 static inline void WriteTwoByteData(Vector<const char> vector,
4994  uint16_t* chars,
4995  int len) {
4996  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4997  unsigned stream_length = vector.length();
4998  while (stream_length != 0) {
4999  unsigned consumed = 0;
5000  uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5002  ASSERT(consumed <= stream_length);
5003  stream_length -= consumed;
5004  stream += consumed;
5006  len -= 2;
5007  if (len < 0) break;
5008  *chars++ = unibrow::Utf16::LeadSurrogate(c);
5009  *chars++ = unibrow::Utf16::TrailSurrogate(c);
5010  } else {
5011  len -= 1;
5012  if (len < 0) break;
5013  *chars++ = c;
5014  }
5015  }
5016  ASSERT(stream_length == 0);
5017  ASSERT(len == 0);
5018 }
5019 
5020 
5021 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5022  ASSERT(s->length() == len);
5023  String::WriteToFlat(s, chars, 0, len);
5024 }
5025 
5026 
5027 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5028  ASSERT(s->length() == len);
5029  String::WriteToFlat(s, chars, 0, len);
5030 }
5031 
5032 
5033 template<bool is_one_byte, typename T>
5035  T t, int chars, uint32_t hash_field) {
5036  ASSERT(chars >= 0);
5037  // Compute map and object size.
5038  int size;
5039  Map* map;
5040 
5041  if (chars < 0 || chars > String::kMaxLength) {
5042  return isolate()->ThrowInvalidStringLength();
5043  }
5044  if (is_one_byte) {
5045  map = ascii_internalized_string_map();
5046  size = SeqOneByteString::SizeFor(chars);
5047  } else {
5048  map = internalized_string_map();
5049  size = SeqTwoByteString::SizeFor(chars);
5050  }
5051  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
5052 
5053  // Allocate string.
5054  Object* result;
5055  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5056  if (!maybe_result->ToObject(&result)) return maybe_result;
5057  }
5058 
5059  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5060  // Set length and hash fields of the allocated string.
5061  String* answer = String::cast(result);
5062  answer->set_length(chars);
5063  answer->set_hash_field(hash_field);
5064 
5065  ASSERT_EQ(size, answer->Size());
5066 
5067  if (is_one_byte) {
5068  WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5069  } else {
5070  WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5071  }
5072  return answer;
5073 }
5074 
5075 
5076 // Need explicit instantiations.
5077 template
5078 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5079 template
5080 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5081  String*, int, uint32_t);
5082 template
5083 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5084  Vector<const char>, int, uint32_t);
5085 
5086 
5087 MaybeObject* Heap::AllocateRawOneByteString(int length,
5088  PretenureFlag pretenure) {
5089  if (length < 0 || length > String::kMaxLength) {
5090  return isolate()->ThrowInvalidStringLength();
5091  }
5092  int size = SeqOneByteString::SizeFor(length);
5094  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5095 
5096  Object* result;
5097  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5098  if (!maybe_result->ToObject(&result)) return maybe_result;
5099  }
5100 
5101  // Partially initialize the object.
5102  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5103  String::cast(result)->set_length(length);
5105  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5106 
5107  return result;
5108 }
5109 
5110 
5111 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5112  PretenureFlag pretenure) {
5113  if (length < 0 || length > String::kMaxLength) {
5114  return isolate()->ThrowInvalidStringLength();
5115  }
5116  int size = SeqTwoByteString::SizeFor(length);
5118  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5119 
5120  Object* result;
5121  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5122  if (!maybe_result->ToObject(&result)) return maybe_result;
5123  }
5124 
5125  // Partially initialize the object.
5126  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5127  String::cast(result)->set_length(length);
5129  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5130  return result;
5131 }
5132 
5133 
5134 MaybeObject* Heap::AllocateJSArray(
5135  ElementsKind elements_kind,
5136  PretenureFlag pretenure) {
5137  Context* native_context = isolate()->context()->native_context();
5138  JSFunction* array_function = native_context->array_function();
5139  Map* map = array_function->initial_map();
5140  Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5141  if (transition_map != NULL) map = transition_map;
5142  return AllocateJSObjectFromMap(map, pretenure);
5143 }
5144 
5145 
5146 MaybeObject* Heap::AllocateEmptyFixedArray() {
5147  int size = FixedArray::SizeFor(0);
5148  Object* result;
5149  { MaybeObject* maybe_result =
5151  if (!maybe_result->ToObject(&result)) return maybe_result;
5152  }
5153  // Initialize the object.
5154  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5155  fixed_array_map());
5156  reinterpret_cast<FixedArray*>(result)->set_length(0);
5157  return result;
5158 }
5159 
5160 
5161 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5162  return AllocateExternalArray(0, array_type, NULL, TENURED);
5163 }
5164 
5165 
5167  if (!InNewSpace(src)) {
5168  return src;
5169  }
5170 
5171  int len = src->length();
5172  Object* obj;
5173  { MaybeObject* maybe_obj = AllocateRawFixedArray(len, TENURED);
5174  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5175  }
5176  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_array_map());
5177  FixedArray* result = FixedArray::cast(obj);
5178  result->set_length(len);
5179 
5180  // Copy the content
5181  DisallowHeapAllocation no_gc;
5182  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5183  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5184 
5185  // TODO(mvstanton): The map is set twice because of protection against calling
5186  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
5187  // we might then be able to remove this whole method.
5188  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
5189  return result;
5190 }
5191 
5192 
5193 MaybeObject* Heap::AllocateEmptyFixedTypedArray(ExternalArrayType array_type) {
5194  return AllocateFixedTypedArray(0, array_type, TENURED);
5195 }
5196 
5197 
5198 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5199  int len = src->length();
5200  Object* obj;
5201  { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5202  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5203  }
5204  if (InNewSpace(obj)) {
5205  HeapObject* dst = HeapObject::cast(obj);
5206  dst->set_map_no_write_barrier(map);
5207  CopyBlock(dst->address() + kPointerSize,
5208  src->address() + kPointerSize,
5210  return obj;
5211  }
5213  FixedArray* result = FixedArray::cast(obj);
5214  result->set_length(len);
5215 
5216  // Copy the content
5217  DisallowHeapAllocation no_gc;
5218  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5219  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5220  return result;
5221 }
5222 
5223 
5225  Map* map) {
5226  int len = src->length();
5227  Object* obj;
5228  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5229  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5230  }
5231  HeapObject* dst = HeapObject::cast(obj);
5232  dst->set_map_no_write_barrier(map);
5233  CopyBlock(
5237  return obj;
5238 }
5239 
5240 
5242  Map* map) {
5243  int int64_entries = src->count_of_int64_entries();
5244  int code_ptr_entries = src->count_of_code_ptr_entries();
5245  int heap_ptr_entries = src->count_of_heap_ptr_entries();
5246  int int32_entries = src->count_of_int32_entries();
5247  Object* obj;
5248  { MaybeObject* maybe_obj =
5249  AllocateConstantPoolArray(int64_entries, code_ptr_entries,
5250  heap_ptr_entries, int32_entries);
5251  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5252  }
5253  HeapObject* dst = HeapObject::cast(obj);
5254  dst->set_map_no_write_barrier(map);
5255  int size = ConstantPoolArray::SizeFor(
5256  int64_entries, code_ptr_entries, heap_ptr_entries, int32_entries);
5257  CopyBlock(
5261  return obj;
5262 }
5263 
5264 
5265 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5266  if (length < 0 || length > FixedArray::kMaxLength) {
5267  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
5268  }
5269  int size = FixedArray::SizeFor(length);
5270  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5271 
5272  return AllocateRaw(size, space, OLD_POINTER_SPACE);
5273 }
5274 
5275 
5276 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5277  PretenureFlag pretenure,
5278  Object* filler) {
5279  ASSERT(length >= 0);
5280  ASSERT(empty_fixed_array()->IsFixedArray());
5281  if (length == 0) return empty_fixed_array();
5282 
5283  ASSERT(!InNewSpace(filler));
5284  Object* result;
5285  { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5286  if (!maybe_result->ToObject(&result)) return maybe_result;
5287  }
5288 
5289  HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5290  FixedArray* array = FixedArray::cast(result);
5291  array->set_length(length);
5292  MemsetPointer(array->data_start(), filler, length);
5293  return array;
5294 }
5295 
5296 
5297 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5298  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5299 }
5300 
5301 
5302 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5303  PretenureFlag pretenure) {
5304  return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5305 }
5306 
5307 
5308 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5309  if (length == 0) return empty_fixed_array();
5310 
5311  Object* obj;
5312  { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5313  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5314  }
5315 
5316  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5317  fixed_array_map());
5318  FixedArray::cast(obj)->set_length(length);
5319  return obj;
5320 }
5321 
5322 
5323 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5324  int size = FixedDoubleArray::SizeFor(0);
5325  Object* result;
5326  { MaybeObject* maybe_result =
5328  if (!maybe_result->ToObject(&result)) return maybe_result;
5329  }
5330  // Initialize the object.
5331  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5332  fixed_double_array_map());
5333  reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5334  return result;
5335 }
5336 
5337 
5339  int length,
5340  PretenureFlag pretenure) {
5341  if (length == 0) return empty_fixed_array();
5342 
5343  Object* elements_object;
5344  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5345  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5346  FixedDoubleArray* elements =
5347  reinterpret_cast<FixedDoubleArray*>(elements_object);
5348 
5349  elements->set_map_no_write_barrier(fixed_double_array_map());
5350  elements->set_length(length);
5351  return elements;
5352 }
5353 
5354 
5356  int length,
5357  PretenureFlag pretenure) {
5358  if (length == 0) return empty_fixed_array();
5359 
5360  Object* elements_object;
5361  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5362  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5363  FixedDoubleArray* elements =
5364  reinterpret_cast<FixedDoubleArray*>(elements_object);
5365 
5366  for (int i = 0; i < length; ++i) {
5367  elements->set_the_hole(i);
5368  }
5369 
5370  elements->set_map_no_write_barrier(fixed_double_array_map());
5371  elements->set_length(length);
5372  return elements;
5373 }
5374 
5375 
5376 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5377  PretenureFlag pretenure) {
5378  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5379  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
5380  }
5381  int size = FixedDoubleArray::SizeFor(length);
5382 #ifndef V8_HOST_ARCH_64_BIT
5383  size += kPointerSize;
5384 #endif
5385  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5386 
5387  HeapObject* object;
5388  { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5389  if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5390  }
5391 
5392  return EnsureDoubleAligned(this, object, size);
5393 }
5394 
5395 
5396 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5397  int number_of_code_ptr_entries,
5398  int number_of_heap_ptr_entries,
5399  int number_of_int32_entries) {
5400  ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
5401  number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
5402  int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5403  number_of_code_ptr_entries,
5404  number_of_heap_ptr_entries,
5405  number_of_int32_entries);
5406 #ifndef V8_HOST_ARCH_64_BIT
5407  size += kPointerSize;
5408 #endif
5409  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5410 
5411  HeapObject* object;
5412  { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5413  if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5414  }
5415  object = EnsureDoubleAligned(this, object, size);
5416  HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5417 
5418  ConstantPoolArray* constant_pool =
5419  reinterpret_cast<ConstantPoolArray*>(object);
5420  constant_pool->SetEntryCounts(number_of_int64_entries,
5421  number_of_code_ptr_entries,
5422  number_of_heap_ptr_entries,
5423  number_of_int32_entries);
5424  if (number_of_code_ptr_entries > 0) {
5425  int offset =
5426  constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
5427  MemsetPointer(
5428  reinterpret_cast<Address*>(HeapObject::RawField(constant_pool, offset)),
5429  isolate()->builtins()->builtin(Builtins::kIllegal)->entry(),
5430  number_of_code_ptr_entries);
5431  }
5432  if (number_of_heap_ptr_entries > 0) {
5433  int offset =
5434  constant_pool->OffsetOfElementAt(constant_pool->first_heap_ptr_index());
5435  MemsetPointer(
5436  HeapObject::RawField(constant_pool, offset),
5437  undefined_value(),
5438  number_of_heap_ptr_entries);
5439  }
5440  return constant_pool;
5441 }
5442 
5443 
5444 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
5445  int size = ConstantPoolArray::SizeFor(0, 0, 0, 0);
5446  Object* result;
5447  { MaybeObject* maybe_result =
5449  if (!maybe_result->ToObject(&result)) return maybe_result;
5450  }
5451  HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
5452  ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0, 0);
5453  return result;
5454 }
5455 
5456 
5457 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5458  Object* result;
5459  { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5460  if (!maybe_result->ToObject(&result)) return maybe_result;
5461  }
5462  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5463  hash_table_map());
5464  ASSERT(result->IsHashTable());
5465  return result;
5466 }
5467 
5468 
5469 MaybeObject* Heap::AllocateSymbol() {
5470  // Statically ensure that it is safe to allocate symbols in paged spaces.
5472 
5473  Object* result;
5474  MaybeObject* maybe =
5476  if (!maybe->ToObject(&result)) return maybe;
5477 
5478  HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5479 
5480  // Generate a random hash value.
5481  int hash;
5482  int attempts = 0;
5483  do {
5484  hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5485  attempts++;
5486  } while (hash == 0 && attempts < 30);
5487  if (hash == 0) hash = 1; // never return 0
5488 
5489  Symbol::cast(result)->set_hash_field(
5491  Symbol::cast(result)->set_name(undefined_value());
5492  Symbol::cast(result)->set_flags(Smi::FromInt(0));
5493 
5494  ASSERT(!Symbol::cast(result)->is_private());
5495  return result;
5496 }
5497 
5498 
5500  MaybeObject* maybe = AllocateSymbol();
5501  Symbol* symbol;
5502  if (!maybe->To(&symbol)) return maybe;
5503  symbol->set_is_private(true);
5504  return symbol;
5505 }
5506 
5507 
5509  Object* result;
5510  { MaybeObject* maybe_result =
5512  if (!maybe_result->ToObject(&result)) return maybe_result;
5513  }
5514  Context* context = reinterpret_cast<Context*>(result);
5515  context->set_map_no_write_barrier(native_context_map());
5516  context->set_js_array_maps(undefined_value());
5517  ASSERT(context->IsNativeContext());
5518  ASSERT(result->IsContext());
5519  return result;
5520 }
5521 
5522 
5524  ScopeInfo* scope_info) {
5525  Object* result;
5526  { MaybeObject* maybe_result =
5527  AllocateFixedArray(scope_info->ContextLength(), TENURED);
5528  if (!maybe_result->ToObject(&result)) return maybe_result;
5529  }
5530  Context* context = reinterpret_cast<Context*>(result);
5531  context->set_map_no_write_barrier(global_context_map());
5532  context->set_closure(function);
5533  context->set_previous(function->context());
5534  context->set_extension(scope_info);
5535  context->set_global_object(function->context()->global_object());
5536  ASSERT(context->IsGlobalContext());
5537  ASSERT(result->IsContext());
5538  return context;
5539 }
5540 
5541 
5542 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5543  Object* result;
5544  { MaybeObject* maybe_result =
5545  AllocateFixedArray(scope_info->ContextLength(), TENURED);
5546  if (!maybe_result->ToObject(&result)) return maybe_result;
5547  }
5548  Context* context = reinterpret_cast<Context*>(result);
5549  context->set_map_no_write_barrier(module_context_map());
5550  // Instance link will be set later.
5551  context->set_extension(Smi::FromInt(0));
5552  return context;
5553 }
5554 
5555 
5556 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5558  Object* result;
5559  { MaybeObject* maybe_result = AllocateFixedArray(length);
5560  if (!maybe_result->ToObject(&result)) return maybe_result;
5561  }
5562  Context* context = reinterpret_cast<Context*>(result);
5563  context->set_map_no_write_barrier(function_context_map());
5564  context->set_closure(function);
5565  context->set_previous(function->context());
5566  context->set_extension(Smi::FromInt(0));
5567  context->set_global_object(function->context()->global_object());
5568  return context;
5569 }
5570 
5571 
5573  Context* previous,
5574  String* name,
5575  Object* thrown_object) {
5577  Object* result;
5578  { MaybeObject* maybe_result =
5580  if (!maybe_result->ToObject(&result)) return maybe_result;
5581  }
5582  Context* context = reinterpret_cast<Context*>(result);
5583  context->set_map_no_write_barrier(catch_context_map());
5584  context->set_closure(function);
5585  context->set_previous(previous);
5586  context->set_extension(name);
5587  context->set_global_object(previous->global_object());
5588  context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5589  return context;
5590 }
5591 
5592 
5593 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5594  Context* previous,
5595  JSReceiver* extension) {
5596  Object* result;
5597  { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5598  if (!maybe_result->ToObject(&result)) return maybe_result;
5599  }
5600  Context* context = reinterpret_cast<Context*>(result);
5601  context->set_map_no_write_barrier(with_context_map());
5602  context->set_closure(function);
5603  context->set_previous(previous);
5604  context->set_extension(extension);
5605  context->set_global_object(previous->global_object());
5606  return context;
5607 }
5608 
5609 
5611  Context* previous,
5612  ScopeInfo* scope_info) {
5613  Object* result;
5614  { MaybeObject* maybe_result =
5616  if (!maybe_result->ToObject(&result)) return maybe_result;
5617  }
5618  Context* context = reinterpret_cast<Context*>(result);
5619  context->set_map_no_write_barrier(block_context_map());
5620  context->set_closure(function);
5621  context->set_previous(previous);
5622  context->set_extension(scope_info);
5623  context->set_global_object(previous->global_object());
5624  return context;
5625 }
5626 
5627 
5628 MaybeObject* Heap::AllocateScopeInfo(int length) {
5629  FixedArray* scope_info;
5630  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5631  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5632  scope_info->set_map_no_write_barrier(scope_info_map());
5633  return scope_info;
5634 }
5635 
5636 
5637 MaybeObject* Heap::AllocateExternal(void* value) {
5638  Foreign* foreign;
5639  { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5640  if (!maybe_result->To(&foreign)) return maybe_result;
5641  }
5642  JSObject* external;
5643  { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5644  if (!maybe_result->To(&external)) return maybe_result;
5645  }
5646  external->SetInternalField(0, foreign);
5647  return external;
5648 }
5649 
5650 
5652  Map* map;
5653  switch (type) {
5654 #define MAKE_CASE(NAME, Name, name) \
5655  case NAME##_TYPE: map = name##_map(); break;
5657 #undef MAKE_CASE
5658  default:
5659  UNREACHABLE();
5660  return Failure::InternalError();
5661  }
5662  int size = map->instance_size();
5663  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5664  Object* result;
5665  { MaybeObject* maybe_result = Allocate(map, space);
5666  if (!maybe_result->ToObject(&result)) return maybe_result;
5667  }
5668  Struct::cast(result)->InitializeBody(size);
5669  return result;
5670 }
5671 
5672 
5674  return (!old_pointer_space()->was_swept_conservatively() &&
5675  !old_data_space()->was_swept_conservatively());
5676 }
5677 
5678 
5680  ASSERT(AllowHeapAllocation::IsAllowed());
5681  if (!IsHeapIterable()) {
5682  CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5683  }
5685 }
5686 
5687 
5688 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5689  incremental_marking()->Step(step_size,
5691 
5692  if (incremental_marking()->IsComplete()) {
5693  bool uncommit = false;
5694  if (gc_count_at_last_idle_gc_ == gc_count_) {
5695  // No GC since the last full GC, the mutator is probably not active.
5696  isolate_->compilation_cache()->Clear();
5697  uncommit = true;
5698  }
5699  CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5700  mark_sweeps_since_idle_round_started_++;
5701  gc_count_at_last_idle_gc_ = gc_count_;
5702  if (uncommit) {
5703  new_space_.Shrink();
5704  UncommitFromSpace();
5705  }
5706  }
5707 }
5708 
5709 
5710 bool Heap::IdleNotification(int hint) {
5711  // Hints greater than this value indicate that
5712  // the embedder is requesting a lot of GC work.
5713  const int kMaxHint = 1000;
5714  const int kMinHintForIncrementalMarking = 10;
5715  // Minimal hint that allows to do full GC.
5716  const int kMinHintForFullGC = 100;
5717  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5718  // The size factor is in range [5..250]. The numbers here are chosen from
5719  // experiments. If you changes them, make sure to test with
5720  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5721  intptr_t step_size =
5723 
5724  if (contexts_disposed_ > 0) {
5725  contexts_disposed_ = 0;
5726  int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5727  if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5728  incremental_marking()->IsStopped()) {
5729  HistogramTimerScope scope(isolate_->counters()->gc_context());
5731  "idle notification: contexts disposed");
5732  } else {
5733  AdvanceIdleIncrementalMarking(step_size);
5734  }
5735 
5736  // After context disposal there is likely a lot of garbage remaining, reset
5737  // the idle notification counters in order to trigger more incremental GCs
5738  // on subsequent idle notifications.
5739  StartIdleRound();
5740  return false;
5741  }
5742 
5743  if (!FLAG_incremental_marking || Serializer::enabled()) {
5744  return IdleGlobalGC();
5745  }
5746 
5747  // By doing small chunks of GC work in each IdleNotification,
5748  // perform a round of incremental GCs and after that wait until
5749  // the mutator creates enough garbage to justify a new round.
5750  // An incremental GC progresses as follows:
5751  // 1. many incremental marking steps,
5752  // 2. one old space mark-sweep-compact,
5753  // 3. many lazy sweep steps.
5754  // Use mark-sweep-compact events to count incremental GCs in a round.
5755 
5756  if (incremental_marking()->IsStopped()) {
5757  if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5758  !IsSweepingComplete() &&
5759  !AdvanceSweepers(static_cast<int>(step_size))) {
5760  return false;
5761  }
5762  }
5763 
5764  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5765  if (EnoughGarbageSinceLastIdleRound()) {
5766  StartIdleRound();
5767  } else {
5768  return true;
5769  }
5770  }
5771 
5772  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5773  mark_sweeps_since_idle_round_started_;
5774 
5775  if (incremental_marking()->IsStopped()) {
5776  // If there are no more than two GCs left in this idle round and we are
5777  // allowed to do a full GC, then make those GCs full in order to compact
5778  // the code space.
5779  // TODO(ulan): Once we enable code compaction for incremental marking,
5780  // we can get rid of this special case and always start incremental marking.
5781  if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5783  "idle notification: finalize idle round");
5784  mark_sweeps_since_idle_round_started_++;
5785  } else if (hint > kMinHintForIncrementalMarking) {
5787  }
5788  }
5789  if (!incremental_marking()->IsStopped() &&
5790  hint > kMinHintForIncrementalMarking) {
5791  AdvanceIdleIncrementalMarking(step_size);
5792  }
5793 
5794  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5795  FinishIdleRound();
5796  return true;
5797  }
5798 
5799  return false;
5800 }
5801 
5802 
5803 bool Heap::IdleGlobalGC() {
5804  static const int kIdlesBeforeScavenge = 4;
5805  static const int kIdlesBeforeMarkSweep = 7;
5806  static const int kIdlesBeforeMarkCompact = 8;
5807  static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5808  static const unsigned int kGCsBetweenCleanup = 4;
5809 
5810  if (!last_idle_notification_gc_count_init_) {
5811  last_idle_notification_gc_count_ = gc_count_;
5812  last_idle_notification_gc_count_init_ = true;
5813  }
5814 
5815  bool uncommit = true;
5816  bool finished = false;
5817 
5818  // Reset the number of idle notifications received when a number of
5819  // GCs have taken place. This allows another round of cleanup based
5820  // on idle notifications if enough work has been carried out to
5821  // provoke a number of garbage collections.
5822  if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5823  number_idle_notifications_ =
5824  Min(number_idle_notifications_ + 1, kMaxIdleCount);
5825  } else {
5826  number_idle_notifications_ = 0;
5827  last_idle_notification_gc_count_ = gc_count_;
5828  }
5829 
5830  if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5831  CollectGarbage(NEW_SPACE, "idle notification");
5832  new_space_.Shrink();
5833  last_idle_notification_gc_count_ = gc_count_;
5834  } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5835  // Before doing the mark-sweep collections we clear the
5836  // compilation cache to avoid hanging on to source code and
5837  // generated code for cached functions.
5838  isolate_->compilation_cache()->Clear();
5839 
5840  CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5841  new_space_.Shrink();
5842  last_idle_notification_gc_count_ = gc_count_;
5843 
5844  } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5845  CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5846  new_space_.Shrink();
5847  last_idle_notification_gc_count_ = gc_count_;
5848  number_idle_notifications_ = 0;
5849  finished = true;
5850  } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5851  // If we have received more than kIdlesBeforeMarkCompact idle
5852  // notifications we do not perform any cleanup because we don't
5853  // expect to gain much by doing so.
5854  finished = true;
5855  }
5856 
5857  if (uncommit) UncommitFromSpace();
5858 
5859  return finished;
5860 }
5861 
5862 
5863 #ifdef DEBUG
5864 
5865 void Heap::Print() {
5866  if (!HasBeenSetUp()) return;
5867  isolate()->PrintStack(stdout);
5868  AllSpaces spaces(this);
5869  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5870  space->Print();
5871  }
5872 }
5873 
5874 
5875 void Heap::ReportCodeStatistics(const char* title) {
5876  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5877  PagedSpace::ResetCodeStatistics(isolate());
5878  // We do not look for code in new space, map space, or old space. If code
5879  // somehow ends up in those spaces, we would miss it here.
5880  code_space_->CollectCodeStatistics();
5881  lo_space_->CollectCodeStatistics();
5882  PagedSpace::ReportCodeStatistics(isolate());
5883 }
5884 
5885 
5886 // This function expects that NewSpace's allocated objects histogram is
5887 // populated (via a call to CollectStatistics or else as a side effect of a
5888 // just-completed scavenge collection).
5889 void Heap::ReportHeapStatistics(const char* title) {
5890  USE(title);
5891  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5892  title, gc_count_);
5893  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5894  old_generation_allocation_limit_);
5895 
5896  PrintF("\n");
5897  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5898  isolate_->global_handles()->PrintStats();
5899  PrintF("\n");
5900 
5901  PrintF("Heap statistics : ");
5902  isolate_->memory_allocator()->ReportStatistics();
5903  PrintF("To space : ");
5904  new_space_.ReportStatistics();
5905  PrintF("Old pointer space : ");
5906  old_pointer_space_->ReportStatistics();
5907  PrintF("Old data space : ");
5908  old_data_space_->ReportStatistics();
5909  PrintF("Code space : ");
5910  code_space_->ReportStatistics();
5911  PrintF("Map space : ");
5912  map_space_->ReportStatistics();
5913  PrintF("Cell space : ");
5914  cell_space_->ReportStatistics();
5915  PrintF("PropertyCell space : ");
5916  property_cell_space_->ReportStatistics();
5917  PrintF("Large object space : ");
5918  lo_space_->ReportStatistics();
5919  PrintF(">>>>>> ========================================= >>>>>>\n");
5920 }
5921 
5922 #endif // DEBUG
5923 
5925  return Contains(value->address());
5926 }
5927 
5928 
5930  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5931  return HasBeenSetUp() &&
5932  (new_space_.ToSpaceContains(addr) ||
5933  old_pointer_space_->Contains(addr) ||
5934  old_data_space_->Contains(addr) ||
5935  code_space_->Contains(addr) ||
5936  map_space_->Contains(addr) ||
5937  cell_space_->Contains(addr) ||
5938  property_cell_space_->Contains(addr) ||
5939  lo_space_->SlowContains(addr));
5940 }
5941 
5942 
5944  return InSpace(value->address(), space);
5945 }
5946 
5947 
5949  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5950  if (!HasBeenSetUp()) return false;
5951 
5952  switch (space) {
5953  case NEW_SPACE:
5954  return new_space_.ToSpaceContains(addr);
5955  case OLD_POINTER_SPACE:
5956  return old_pointer_space_->Contains(addr);
5957  case OLD_DATA_SPACE:
5958  return old_data_space_->Contains(addr);
5959  case CODE_SPACE:
5960  return code_space_->Contains(addr);
5961  case MAP_SPACE:
5962  return map_space_->Contains(addr);
5963  case CELL_SPACE:
5964  return cell_space_->Contains(addr);
5965  case PROPERTY_CELL_SPACE:
5966  return property_cell_space_->Contains(addr);
5967  case LO_SPACE:
5968  return lo_space_->SlowContains(addr);
5969  }
5970 
5971  return false;
5972 }
5973 
5974 
5975 #ifdef VERIFY_HEAP
5976 void Heap::Verify() {
5977  CHECK(HasBeenSetUp());
5978 
5979  store_buffer()->Verify();
5980 
5981  VerifyPointersVisitor visitor;
5982  IterateRoots(&visitor, VISIT_ONLY_STRONG);
5983 
5984  VerifySmisVisitor smis_visitor;
5985  IterateSmiRoots(&smis_visitor);
5986 
5987  new_space_.Verify();
5988 
5989  old_pointer_space_->Verify(&visitor);
5990  map_space_->Verify(&visitor);
5991 
5992  VerifyPointersVisitor no_dirty_regions_visitor;
5993  old_data_space_->Verify(&no_dirty_regions_visitor);
5994  code_space_->Verify(&no_dirty_regions_visitor);
5995  cell_space_->Verify(&no_dirty_regions_visitor);
5996  property_cell_space_->Verify(&no_dirty_regions_visitor);
5997 
5998  lo_space_->Verify();
5999 }
6000 #endif
6001 
6002 
6004  Utf8StringKey key(string, HashSeed());
6005  return InternalizeStringWithKey(&key);
6006 }
6007 
6008 
6009 MaybeObject* Heap::InternalizeString(String* string) {
6010  if (string->IsInternalizedString()) return string;
6011  Object* result = NULL;
6012  Object* new_table;
6013  { MaybeObject* maybe_new_table =
6014  string_table()->LookupString(string, &result);
6015  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6016  }
6017  // Can't use set_string_table because StringTable::cast knows that
6018  // StringTable is a singleton and checks for identity.
6019  roots_[kStringTableRootIndex] = new_table;
6020  ASSERT(result != NULL);
6021  return result;
6022 }
6023 
6024 
6026  if (string->IsInternalizedString()) {
6027  *result = string;
6028  return true;
6029  }
6030  return string_table()->LookupStringIfExists(string, result);
6031 }
6032 
6033 
6035  Object* result = NULL;
6036  Object* new_table;
6037  { MaybeObject* maybe_new_table =
6038  string_table()->LookupKey(key, &result);
6039  if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6040  }
6041  // Can't use set_string_table because StringTable::cast knows that
6042  // StringTable is a singleton and checks for identity.
6043  roots_[kStringTableRootIndex] = new_table;
6044  ASSERT(result != NULL);
6045  return result;
6046 }
6047 
6048 
6049 void Heap::ZapFromSpace() {
6050  NewSpacePageIterator it(new_space_.FromSpaceStart(),
6051  new_space_.FromSpaceEnd());
6052  while (it.has_next()) {
6053  NewSpacePage* page = it.next();
6054  for (Address cursor = page->area_start(), limit = page->area_end();
6055  cursor < limit;
6056  cursor += kPointerSize) {
6058  }
6059  }
6060 }
6061 
6062 
6064  Address end,
6065  ObjectSlotCallback callback) {
6066  Address slot_address = start;
6067 
6068  // We are not collecting slots on new space objects during mutation
6069  // thus we have to scan for pointers to evacuation candidates when we
6070  // promote objects. But we should not record any slots in non-black
6071  // objects. Grey object's slots would be rescanned.
6072  // White object might not survive until the end of collection
6073  // it would be a violation of the invariant to record it's slots.
6074  bool record_slots = false;
6075  if (incremental_marking()->IsCompacting()) {
6076  MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6077  record_slots = Marking::IsBlack(mark_bit);
6078  }
6079 
6080  while (slot_address < end) {
6081  Object** slot = reinterpret_cast<Object**>(slot_address);
6082  Object* object = *slot;
6083  // If the store buffer becomes overfull we mark pages as being exempt from
6084  // the store buffer. These pages are scanned to find pointers that point
6085  // to the new space. In that case we may hit newly promoted objects and
6086  // fix the pointers before the promotion queue gets to them. Thus the 'if'.
6087  if (object->IsHeapObject()) {
6088  if (Heap::InFromSpace(object)) {
6089  callback(reinterpret_cast<HeapObject**>(slot),
6090  HeapObject::cast(object));
6091  Object* new_object = *slot;
6092  if (InNewSpace(new_object)) {
6093  SLOW_ASSERT(Heap::InToSpace(new_object));
6094  SLOW_ASSERT(new_object->IsHeapObject());
6095  store_buffer_.EnterDirectlyIntoStoreBuffer(
6096  reinterpret_cast<Address>(slot));
6097  }
6098  SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6099  } else if (record_slots &&
6100  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6101  mark_compact_collector()->RecordSlot(slot, slot, object);
6102  }
6103  }
6104  slot_address += kPointerSize;
6105  }
6106 }
6107 
6108 
6109 #ifdef DEBUG
6110 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6111 
6112 
6113 bool IsAMapPointerAddress(Object** addr) {
6114  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6115  int mod = a % Map::kSize;
6116  return mod >= Map::kPointerFieldsBeginOffset &&
6118 }
6119 
6120 
6121 bool EverythingsAPointer(Object** addr) {
6122  return true;
6123 }
6124 
6125 
6126 static void CheckStoreBuffer(Heap* heap,
6127  Object** current,
6128  Object** limit,
6129  Object**** store_buffer_position,
6130  Object*** store_buffer_top,
6131  CheckStoreBufferFilter filter,
6132  Address special_garbage_start,
6133  Address special_garbage_end) {
6134  Map* free_space_map = heap->free_space_map();
6135  for ( ; current < limit; current++) {
6136  Object* o = *current;
6137  Address current_address = reinterpret_cast<Address>(current);
6138  // Skip free space.
6139  if (o == free_space_map) {
6140  Address current_address = reinterpret_cast<Address>(current);
6141  FreeSpace* free_space =
6142  FreeSpace::cast(HeapObject::FromAddress(current_address));
6143  int skip = free_space->Size();
6144  ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6145  ASSERT(skip > 0);
6146  current_address += skip - kPointerSize;
6147  current = reinterpret_cast<Object**>(current_address);
6148  continue;
6149  }
6150  // Skip the current linear allocation space between top and limit which is
6151  // unmarked with the free space map, but can contain junk.
6152  if (current_address == special_garbage_start &&
6153  special_garbage_end != special_garbage_start) {
6154  current_address = special_garbage_end - kPointerSize;
6155  current = reinterpret_cast<Object**>(current_address);
6156  continue;
6157  }
6158  if (!(*filter)(current)) continue;
6159  ASSERT(current_address < special_garbage_start ||
6160  current_address >= special_garbage_end);
6161  ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6162  // We have to check that the pointer does not point into new space
6163  // without trying to cast it to a heap object since the hash field of
6164  // a string can contain values like 1 and 3 which are tagged null
6165  // pointers.
6166  if (!heap->InNewSpace(o)) continue;
6167  while (**store_buffer_position < current &&
6168  *store_buffer_position < store_buffer_top) {
6169  (*store_buffer_position)++;
6170  }
6171  if (**store_buffer_position != current ||
6172  *store_buffer_position == store_buffer_top) {
6173  Object** obj_start = current;
6174  while (!(*obj_start)->IsMap()) obj_start--;
6175  UNREACHABLE();
6176  }
6177  }
6178 }
6179 
6180 
6181 // Check that the store buffer contains all intergenerational pointers by
6182 // scanning a page and ensuring that all pointers to young space are in the
6183 // store buffer.
6184 void Heap::OldPointerSpaceCheckStoreBuffer() {
6185  OldSpace* space = old_pointer_space();
6186  PageIterator pages(space);
6187 
6188  store_buffer()->SortUniq();
6189 
6190  while (pages.has_next()) {
6191  Page* page = pages.next();
6192  Object** current = reinterpret_cast<Object**>(page->area_start());
6193 
6194  Address end = page->area_end();
6195 
6196  Object*** store_buffer_position = store_buffer()->Start();
6197  Object*** store_buffer_top = store_buffer()->Top();
6198 
6199  Object** limit = reinterpret_cast<Object**>(end);
6200  CheckStoreBuffer(this,
6201  current,
6202  limit,
6203  &store_buffer_position,
6204  store_buffer_top,
6205  &EverythingsAPointer,
6206  space->top(),
6207  space->limit());
6208  }
6209 }
6210 
6211 
6212 void Heap::MapSpaceCheckStoreBuffer() {
6213  MapSpace* space = map_space();
6214  PageIterator pages(space);
6215 
6216  store_buffer()->SortUniq();
6217 
6218  while (pages.has_next()) {
6219  Page* page = pages.next();
6220  Object** current = reinterpret_cast<Object**>(page->area_start());
6221 
6222  Address end = page->area_end();
6223 
6224  Object*** store_buffer_position = store_buffer()->Start();
6225  Object*** store_buffer_top = store_buffer()->Top();
6226 
6227  Object** limit = reinterpret_cast<Object**>(end);
6228  CheckStoreBuffer(this,
6229  current,
6230  limit,
6231  &store_buffer_position,
6232  store_buffer_top,
6233  &IsAMapPointerAddress,
6234  space->top(),
6235  space->limit());
6236  }
6237 }
6238 
6239 
6240 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6241  LargeObjectIterator it(lo_space());
6242  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6243  // We only have code, sequential strings, or fixed arrays in large
6244  // object space, and only fixed arrays can possibly contain pointers to
6245  // the young generation.
6246  if (object->IsFixedArray()) {
6247  Object*** store_buffer_position = store_buffer()->Start();
6248  Object*** store_buffer_top = store_buffer()->Top();
6249  Object** current = reinterpret_cast<Object**>(object->address());
6250  Object** limit =
6251  reinterpret_cast<Object**>(object->address() + object->Size());
6252  CheckStoreBuffer(this,
6253  current,
6254  limit,
6255  &store_buffer_position,
6256  store_buffer_top,
6257  &EverythingsAPointer,
6258  NULL,
6259  NULL);
6260  }
6261  }
6262 }
6263 #endif
6264 
6265 
6266 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6267  IterateStrongRoots(v, mode);
6268  IterateWeakRoots(v, mode);
6269 }
6270 
6271 
6272 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6273  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6274  v->Synchronize(VisitorSynchronization::kStringTable);
6275  if (mode != VISIT_ALL_IN_SCAVENGE &&
6276  mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6277  // Scavenge collections have special processing for this.
6278  external_string_table_.Iterate(v);
6279  }
6280  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6281 }
6282 
6283 
6284 void Heap::IterateSmiRoots(ObjectVisitor* v) {
6285  // Acquire execution access since we are going to read stack limit values.
6286  ExecutionAccess access(isolate());
6287  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
6288  v->Synchronize(VisitorSynchronization::kSmiRootList);
6289 }
6290 
6291 
6292 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6293  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6294  v->Synchronize(VisitorSynchronization::kStrongRootList);
6295 
6296  v->VisitPointer(BitCast<Object**>(&hidden_string_));
6297  v->Synchronize(VisitorSynchronization::kInternalizedString);
6298 
6299  isolate_->bootstrapper()->Iterate(v);
6300  v->Synchronize(VisitorSynchronization::kBootstrapper);
6301  isolate_->Iterate(v);
6302  v->Synchronize(VisitorSynchronization::kTop);
6303  Relocatable::Iterate(isolate_, v);
6304  v->Synchronize(VisitorSynchronization::kRelocatable);
6305 
6306 #ifdef ENABLE_DEBUGGER_SUPPORT
6307  isolate_->debug()->Iterate(v);
6308  if (isolate_->deoptimizer_data() != NULL) {
6309  isolate_->deoptimizer_data()->Iterate(v);
6310  }
6311 #endif
6312  v->Synchronize(VisitorSynchronization::kDebug);
6313  isolate_->compilation_cache()->Iterate(v);
6314  v->Synchronize(VisitorSynchronization::kCompilationCache);
6315 
6316  // Iterate over local handles in handle scopes.
6317  isolate_->handle_scope_implementer()->Iterate(v);
6318  isolate_->IterateDeferredHandles(v);
6319  v->Synchronize(VisitorSynchronization::kHandleScope);
6320 
6321  // Iterate over the builtin code objects and code stubs in the
6322  // heap. Note that it is not necessary to iterate over code objects
6323  // on scavenge collections.
6324  if (mode != VISIT_ALL_IN_SCAVENGE) {
6325  isolate_->builtins()->IterateBuiltins(v);
6326  }
6327  v->Synchronize(VisitorSynchronization::kBuiltins);
6328 
6329  // Iterate over global handles.
6330  switch (mode) {
6331  case VISIT_ONLY_STRONG:
6332  isolate_->global_handles()->IterateStrongRoots(v);
6333  break;
6334  case VISIT_ALL_IN_SCAVENGE:
6336  break;
6338  case VISIT_ALL:
6339  isolate_->global_handles()->IterateAllRoots(v);
6340  break;
6341  }
6342  v->Synchronize(VisitorSynchronization::kGlobalHandles);
6343 
6344  // Iterate over eternal handles.
6345  if (mode == VISIT_ALL_IN_SCAVENGE) {
6346  isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6347  } else {
6348  isolate_->eternal_handles()->IterateAllRoots(v);
6349  }
6350  v->Synchronize(VisitorSynchronization::kEternalHandles);
6351 
6352  // Iterate over pointers being held by inactive threads.
6353  isolate_->thread_manager()->Iterate(v);
6354  v->Synchronize(VisitorSynchronization::kThreadManager);
6355 
6356  // Iterate over the pointers the Serialization/Deserialization code is
6357  // holding.
6358  // During garbage collection this keeps the partial snapshot cache alive.
6359  // During deserialization of the startup snapshot this creates the partial
6360  // snapshot cache and deserializes the objects it refers to. During
6361  // serialization this does nothing, since the partial snapshot cache is
6362  // empty. However the next thing we do is create the partial snapshot,
6363  // filling up the partial snapshot cache with objects it needs as we go.
6364  SerializerDeserializer::Iterate(isolate_, v);
6365  // We don't do a v->Synchronize call here, because in debug mode that will
6366  // output a flag to the snapshot. However at this point the serializer and
6367  // deserializer are deliberately a little unsynchronized (see above) so the
6368  // checking of the sync flag in the snapshot would fail.
6369 }
6370 
6371 
6372 // TODO(1236194): Since the heap size is configurable on the command line
6373 // and through the API, we should gracefully handle the case that the heap
6374 // size is not big enough to fit all the initial objects.
6375 bool Heap::ConfigureHeap(int max_semispace_size,
6376  intptr_t max_old_gen_size,
6377  intptr_t max_executable_size) {
6378  if (HasBeenSetUp()) return false;
6379 
6380  if (FLAG_stress_compaction) {
6381  // This will cause more frequent GCs when stressing.
6382  max_semispace_size_ = Page::kPageSize;
6383  }
6384 
6385  if (max_semispace_size > 0) {
6386  if (max_semispace_size < Page::kPageSize) {
6387  max_semispace_size = Page::kPageSize;
6388  if (FLAG_trace_gc) {
6389  PrintPID("Max semispace size cannot be less than %dkbytes\n",
6390  Page::kPageSize >> 10);
6391  }
6392  }
6393  max_semispace_size_ = max_semispace_size;
6394  }
6395 
6396  if (Snapshot::IsEnabled()) {
6397  // If we are using a snapshot we always reserve the default amount
6398  // of memory for each semispace because code in the snapshot has
6399  // write-barrier code that relies on the size and alignment of new
6400  // space. We therefore cannot use a larger max semispace size
6401  // than the default reserved semispace size.
6402  if (max_semispace_size_ > reserved_semispace_size_) {
6403  max_semispace_size_ = reserved_semispace_size_;
6404  if (FLAG_trace_gc) {
6405  PrintPID("Max semispace size cannot be more than %dkbytes\n",
6406  reserved_semispace_size_ >> 10);
6407  }
6408  }
6409  } else {
6410  // If we are not using snapshots we reserve space for the actual
6411  // max semispace size.
6412  reserved_semispace_size_ = max_semispace_size_;
6413  }
6414 
6415  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6416  if (max_executable_size > 0) {
6417  max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6418  }
6419 
6420  // The max executable size must be less than or equal to the max old
6421  // generation size.
6422  if (max_executable_size_ > max_old_generation_size_) {
6423  max_executable_size_ = max_old_generation_size_;
6424  }
6425 
6426  // The new space size must be a power of two to support single-bit testing
6427  // for containment.
6428  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6429  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6430  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6431 
6432  // The external allocation limit should be below 256 MB on all architectures
6433  // to avoid unnecessary low memory notifications, as that is the threshold
6434  // for some embedders.
6435  external_allocation_limit_ = 12 * max_semispace_size_;
6436  ASSERT(external_allocation_limit_ <= 256 * MB);
6437 
6438  // The old generation is paged and needs at least one page for each space.
6439  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6440  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6441  Page::kPageSize),
6442  RoundUp(max_old_generation_size_,
6443  Page::kPageSize));
6444 
6445  // We rely on being able to allocate new arrays in paged spaces.
6447  (JSArray::kSize +
6450 
6451  configured_ = true;
6452  return true;
6453 }
6454 
6455 
6457  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6458  static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6459  static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6460 }
6461 
6462 
6463 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6466  *stats->new_space_size = new_space_.SizeAsInt();
6467  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6468  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6469  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6470  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6471  *stats->old_data_space_capacity = old_data_space_->Capacity();
6472  *stats->code_space_size = code_space_->SizeOfObjects();
6473  *stats->code_space_capacity = code_space_->Capacity();
6474  *stats->map_space_size = map_space_->SizeOfObjects();
6475  *stats->map_space_capacity = map_space_->Capacity();
6476  *stats->cell_space_size = cell_space_->SizeOfObjects();
6477  *stats->cell_space_capacity = cell_space_->Capacity();
6478  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6479  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6480  *stats->lo_space_size = lo_space_->Size();
6481  isolate_->global_handles()->RecordStats(stats);
6483  *stats->memory_allocator_capacity =
6484  isolate()->memory_allocator()->Size() +
6486  *stats->os_error = OS::GetLastError();
6488  if (take_snapshot) {
6489  HeapIterator iterator(this);
6490  for (HeapObject* obj = iterator.next();
6491  obj != NULL;
6492  obj = iterator.next()) {
6493  InstanceType type = obj->map()->instance_type();
6494  ASSERT(0 <= type && type <= LAST_TYPE);
6495  stats->objects_per_type[type]++;
6496  stats->size_per_type[type] += obj->Size();
6497  }
6498  }
6499 }
6500 
6501 
6503  return old_pointer_space_->SizeOfObjects()
6504  + old_data_space_->SizeOfObjects()
6505  + code_space_->SizeOfObjects()
6506  + map_space_->SizeOfObjects()
6507  + cell_space_->SizeOfObjects()
6508  + property_cell_space_->SizeOfObjects()
6509  + lo_space_->SizeOfObjects();
6510 }
6511 
6512 
6513 bool Heap::AdvanceSweepers(int step_size) {
6514  ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
6515  bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6516  sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6517  return sweeping_complete;
6518 }
6519 
6520 
6521 int64_t Heap::PromotedExternalMemorySize() {
6522  if (amount_of_external_allocated_memory_
6523  <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6524  return amount_of_external_allocated_memory_
6525  - amount_of_external_allocated_memory_at_last_global_gc_;
6526 }
6527 
6528 
6530  if (!inline_allocation_disabled_) return;
6531  inline_allocation_disabled_ = false;
6532 
6533  // Update inline allocation limit for new space.
6535 }
6536 
6537 
6539  if (inline_allocation_disabled_) return;
6540  inline_allocation_disabled_ = true;
6541 
6542  // Update inline allocation limit for new space.
6544 
6545  // Update inline allocation limit for old spaces.
6546  PagedSpaces spaces(this);
6547  for (PagedSpace* space = spaces.next();
6548  space != NULL;
6549  space = spaces.next()) {
6550  space->EmptyAllocationInfo();
6551  }
6552 }
6553 
6554 
6555 V8_DECLARE_ONCE(initialize_gc_once);
6556 
6557 static void InitializeGCOnce() {
6558  InitializeScavengingVisitorsTables();
6561 }
6562 
6563 
6564 bool Heap::SetUp() {
6565 #ifdef DEBUG
6566  allocation_timeout_ = FLAG_gc_interval;
6567 #endif
6568 
6569  // Initialize heap spaces and initial maps and objects. Whenever something
6570  // goes wrong, just return false. The caller should check the results and
6571  // call Heap::TearDown() to release allocated memory.
6572  //
6573  // If the heap is not yet configured (e.g. through the API), configure it.
6574  // Configuration is based on the flags new-space-size (really the semispace
6575  // size) and old-space-size if set or the initial values of semispace_size_
6576  // and old_generation_size_ otherwise.
6577  if (!configured_) {
6578  if (!ConfigureHeapDefault()) return false;
6579  }
6580 
6581  CallOnce(&initialize_gc_once, &InitializeGCOnce);
6582 
6583  MarkMapPointersAsEncoded(false);
6584 
6585  // Set up memory allocator.
6586  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6587  return false;
6588 
6589  // Set up new space.
6590  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6591  return false;
6592  }
6593 
6594  // Initialize old pointer space.
6595  old_pointer_space_ =
6596  new OldSpace(this,
6597  max_old_generation_size_,
6599  NOT_EXECUTABLE);
6600  if (old_pointer_space_ == NULL) return false;
6601  if (!old_pointer_space_->SetUp()) return false;
6602 
6603  // Initialize old data space.
6604  old_data_space_ =
6605  new OldSpace(this,
6606  max_old_generation_size_,
6608  NOT_EXECUTABLE);
6609  if (old_data_space_ == NULL) return false;
6610  if (!old_data_space_->SetUp()) return false;
6611 
6612  // Initialize the code space, set its maximum capacity to the old
6613  // generation size. It needs executable memory.
6614  // On 64-bit platform(s), we put all code objects in a 2 GB range of
6615  // virtual address space, so that they can call each other with near calls.
6616  if (code_range_size_ > 0) {
6617  if (!isolate_->code_range()->SetUp(code_range_size_)) {
6618  return false;
6619  }
6620  }
6621 
6622  code_space_ =
6623  new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6624  if (code_space_ == NULL) return false;
6625  if (!code_space_->SetUp()) return false;
6626 
6627  // Initialize map space.
6628  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6629  if (map_space_ == NULL) return false;
6630  if (!map_space_->SetUp()) return false;
6631 
6632  // Initialize simple cell space.
6633  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6634  if (cell_space_ == NULL) return false;
6635  if (!cell_space_->SetUp()) return false;
6636 
6637  // Initialize global property cell space.
6638  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6640  if (property_cell_space_ == NULL) return false;
6641  if (!property_cell_space_->SetUp()) return false;
6642 
6643  // The large object code space may contain code or data. We set the memory
6644  // to be non-executable here for safety, but this means we need to enable it
6645  // explicitly when allocating large code objects.
6646  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6647  if (lo_space_ == NULL) return false;
6648  if (!lo_space_->SetUp()) return false;
6649 
6650  // Set up the seed that is used to randomize the string hash function.
6651  ASSERT(hash_seed() == 0);
6652  if (FLAG_randomize_hashes) {
6653  if (FLAG_hash_seed == 0) {
6654  int rnd = isolate()->random_number_generator()->NextInt();
6655  set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6656  } else {
6657  set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6658  }
6659  }
6660 
6661  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6662  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6663 
6664  store_buffer()->SetUp();
6665 
6667 
6668  return true;
6669 }
6670 
6671 
6673  // Create initial maps.
6674  if (!CreateInitialMaps()) return false;
6675  if (!CreateApiObjects()) return false;
6676 
6677  // Create initial objects
6678  if (!CreateInitialObjects()) return false;
6679 
6680  native_contexts_list_ = undefined_value();
6681  array_buffers_list_ = undefined_value();
6682  allocation_sites_list_ = undefined_value();
6683  weak_object_to_code_table_ = undefined_value();
6684  return true;
6685 }
6686 
6687 
6689  ASSERT(isolate_ != NULL);
6690  ASSERT(isolate_ == isolate());
6691  // On 64 bit machines, pointers are generally out of range of Smis. We write
6692  // something that looks like an out of range Smi to the GC.
6693 
6694  // Set up the special root array entries containing the stack limits.
6695  // These are actually addresses, but the tag makes the GC ignore it.
6696  roots_[kStackLimitRootIndex] =
6697  reinterpret_cast<Object*>(
6698  (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6699  roots_[kRealStackLimitRootIndex] =
6700  reinterpret_cast<Object*>(
6701  (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6702 }
6703 
6704 
6706 #ifdef VERIFY_HEAP
6707  if (FLAG_verify_heap) {
6708  Verify();
6709  }
6710 #endif
6711 
6713 
6714  if (FLAG_print_cumulative_gc_stat) {
6715  PrintF("\n");
6716  PrintF("gc_count=%d ", gc_count_);
6717  PrintF("mark_sweep_count=%d ", ms_count_);
6718  PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6719  PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6720  PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6721  PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6723  PrintF("total_marking_time=%.1f ", marking_time());
6724  PrintF("total_sweeping_time=%.1f ", sweeping_time());
6725  PrintF("\n\n");
6726  }
6727 
6728  if (FLAG_print_max_heap_committed) {
6729  PrintF("\n");
6730  PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6732  PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6733  new_space_.MaximumCommittedMemory());
6734  PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6735  old_data_space_->MaximumCommittedMemory());
6736  PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6737  old_pointer_space_->MaximumCommittedMemory());
6738  PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6739  old_pointer_space_->MaximumCommittedMemory());
6740  PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6741  code_space_->MaximumCommittedMemory());
6742  PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6743  map_space_->MaximumCommittedMemory());
6744  PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6745  cell_space_->MaximumCommittedMemory());
6746  PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6747  property_cell_space_->MaximumCommittedMemory());
6748  PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6749  lo_space_->MaximumCommittedMemory());
6750  PrintF("\n\n");
6751  }
6752 
6753  TearDownArrayBuffers();
6754 
6755  isolate_->global_handles()->TearDown();
6756 
6757  external_string_table_.TearDown();
6758 
6760 
6761  new_space_.TearDown();
6762 
6763  if (old_pointer_space_ != NULL) {
6764  old_pointer_space_->TearDown();
6765  delete old_pointer_space_;
6766  old_pointer_space_ = NULL;
6767  }
6768 
6769  if (old_data_space_ != NULL) {
6770  old_data_space_->TearDown();
6771  delete old_data_space_;
6772  old_data_space_ = NULL;
6773  }
6774 
6775  if (code_space_ != NULL) {
6776  code_space_->TearDown();
6777  delete code_space_;
6778  code_space_ = NULL;
6779  }
6780 
6781  if (map_space_ != NULL) {
6782  map_space_->TearDown();
6783  delete map_space_;
6784  map_space_ = NULL;
6785  }
6786 
6787  if (cell_space_ != NULL) {
6788  cell_space_->TearDown();
6789  delete cell_space_;
6790  cell_space_ = NULL;
6791  }
6792 
6793  if (property_cell_space_ != NULL) {
6794  property_cell_space_->TearDown();
6795  delete property_cell_space_;
6796  property_cell_space_ = NULL;
6797  }
6798 
6799  if (lo_space_ != NULL) {
6800  lo_space_->TearDown();
6801  delete lo_space_;
6802  lo_space_ = NULL;
6803  }
6804 
6805  store_buffer()->TearDown();
6807 
6808  isolate_->memory_allocator()->TearDown();
6809 }
6810 
6811 
6813  GCType gc_type,
6814  bool pass_isolate) {
6815  ASSERT(callback != NULL);
6816  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6817  ASSERT(!gc_prologue_callbacks_.Contains(pair));
6818  return gc_prologue_callbacks_.Add(pair);
6819 }
6820 
6821 
6823  ASSERT(callback != NULL);
6824  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6825  if (gc_prologue_callbacks_[i].callback == callback) {
6826  gc_prologue_callbacks_.Remove(i);
6827  return;
6828  }
6829  }
6830  UNREACHABLE();
6831 }
6832 
6833 
6835  GCType gc_type,
6836  bool pass_isolate) {
6837  ASSERT(callback != NULL);
6838  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6839  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6840  return gc_epilogue_callbacks_.Add(pair);
6841 }
6842 
6843 
6845  ASSERT(callback != NULL);
6846  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6847  if (gc_epilogue_callbacks_[i].callback == callback) {
6848  gc_epilogue_callbacks_.Remove(i);
6849  return;
6850  }
6851  }
6852  UNREACHABLE();
6853 }
6854 
6855 
6857  DependentCode* dep) {
6858  ASSERT(!InNewSpace(obj));
6859  ASSERT(!InNewSpace(dep));
6860  MaybeObject* maybe_obj =
6861  WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6862  WeakHashTable* table;
6863  if (!maybe_obj->To(&table)) return maybe_obj;
6864  if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6865  WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6866  }
6867  set_weak_object_to_code_table(table);
6868  ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6869  return weak_object_to_code_table_;
6870 }
6871 
6872 
6874  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6875  if (dep->IsDependentCode()) return DependentCode::cast(dep);
6876  return DependentCode::cast(empty_fixed_array());
6877 }
6878 
6879 
6881  if (!weak_object_to_code_table()->IsHashTable()) {
6882  set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6883  }
6884 }
6885 
6886 
6887 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
6888  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
6889 }
6890 
6891 #ifdef DEBUG
6892 
6893 class PrintHandleVisitor: public ObjectVisitor {
6894  public:
6895  void VisitPointers(Object** start, Object** end) {
6896  for (Object** p = start; p < end; p++)
6897  PrintF(" handle %p to %p\n",
6898  reinterpret_cast<void*>(p),
6899  reinterpret_cast<void*>(*p));
6900  }
6901 };
6902 
6903 
6904 void Heap::PrintHandles() {
6905  PrintF("Handles:\n");
6906  PrintHandleVisitor v;
6907  isolate_->handle_scope_implementer()->Iterate(&v);
6908 }
6909 
6910 #endif
6911 
6912 
6913 Space* AllSpaces::next() {
6914  switch (counter_++) {
6915  case NEW_SPACE:
6916  return heap_->new_space();
6917  case OLD_POINTER_SPACE:
6918  return heap_->old_pointer_space();
6919  case OLD_DATA_SPACE:
6920  return heap_->old_data_space();
6921  case CODE_SPACE:
6922  return heap_->code_space();
6923  case MAP_SPACE:
6924  return heap_->map_space();
6925  case CELL_SPACE:
6926  return heap_->cell_space();
6927  case PROPERTY_CELL_SPACE:
6928  return heap_->property_cell_space();
6929  case LO_SPACE:
6930  return heap_->lo_space();
6931  default:
6932  return NULL;
6933  }
6934 }
6935 
6936 
6937 PagedSpace* PagedSpaces::next() {
6938  switch (counter_++) {
6939  case OLD_POINTER_SPACE:
6940  return heap_->old_pointer_space();
6941  case OLD_DATA_SPACE:
6942  return heap_->old_data_space();
6943  case CODE_SPACE:
6944  return heap_->code_space();
6945  case MAP_SPACE:
6946  return heap_->map_space();
6947  case CELL_SPACE:
6948  return heap_->cell_space();
6949  case PROPERTY_CELL_SPACE:
6950  return heap_->property_cell_space();
6951  default:
6952  return NULL;
6953  }
6954 }
6955 
6956 
6957 
6958 OldSpace* OldSpaces::next() {
6959  switch (counter_++) {
6960  case OLD_POINTER_SPACE:
6961  return heap_->old_pointer_space();
6962  case OLD_DATA_SPACE:
6963  return heap_->old_data_space();
6964  case CODE_SPACE:
6965  return heap_->code_space();
6966  default:
6967  return NULL;
6968  }
6969 }
6970 
6971 
6973  : heap_(heap),
6974  current_space_(FIRST_SPACE),
6975  iterator_(NULL),
6976  size_func_(NULL) {
6977 }
6978 
6979 
6981  : heap_(heap),
6982  current_space_(FIRST_SPACE),
6983  iterator_(NULL),
6984  size_func_(size_func) {
6985 }
6986 
6987 
6989  // Delete active iterator if any.
6990  delete iterator_;
6991 }
6992 
6993 
6995  // Iterate until no more spaces.
6996  return current_space_ != LAST_SPACE;
6997 }
6998 
6999 
7001  if (iterator_ != NULL) {
7002  delete iterator_;
7003  iterator_ = NULL;
7004  // Move to the next space
7005  current_space_++;
7006  if (current_space_ > LAST_SPACE) {
7007  return NULL;
7008  }
7009  }
7010 
7011  // Return iterator for the new current space.
7012  return CreateIterator();
7013 }
7014 
7015 
7016 // Create an iterator for the space to iterate.
7017 ObjectIterator* SpaceIterator::CreateIterator() {
7018  ASSERT(iterator_ == NULL);
7019 
7020  switch (current_space_) {
7021  case NEW_SPACE:
7022  iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7023  break;
7024  case OLD_POINTER_SPACE:
7025  iterator_ =
7026  new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7027  break;
7028  case OLD_DATA_SPACE:
7029  iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7030  break;
7031  case CODE_SPACE:
7032  iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7033  break;
7034  case MAP_SPACE:
7035  iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7036  break;
7037  case CELL_SPACE:
7038  iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7039  break;
7040  case PROPERTY_CELL_SPACE:
7041  iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7042  size_func_);
7043  break;
7044  case LO_SPACE:
7045  iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7046  break;
7047  }
7048 
7049  // Return the newly allocated iterator;
7050  ASSERT(iterator_ != NULL);
7051  return iterator_;
7052 }
7053 
7054 
7056  public:
7057  virtual ~HeapObjectsFilter() {}
7058  virtual bool SkipObject(HeapObject* object) = 0;
7059 };
7060 
7061 
7063  public:
7064  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
7065  MarkReachableObjects();
7066  }
7067 
7069  heap_->mark_compact_collector()->ClearMarkbits();
7070  }
7071 
7072  bool SkipObject(HeapObject* object) {
7073  MarkBit mark_bit = Marking::MarkBitFrom(object);
7074  return !mark_bit.Get();
7075  }
7076 
7077  private:
7078  class MarkingVisitor : public ObjectVisitor {
7079  public:
7080  MarkingVisitor() : marking_stack_(10) {}
7081 
7082  void VisitPointers(Object** start, Object** end) {
7083  for (Object** p = start; p < end; p++) {
7084  if (!(*p)->IsHeapObject()) continue;
7085  HeapObject* obj = HeapObject::cast(*p);
7086  MarkBit mark_bit = Marking::MarkBitFrom(obj);
7087  if (!mark_bit.Get()) {
7088  mark_bit.Set();
7089  marking_stack_.Add(obj);
7090  }
7091  }
7092  }
7093 
7094  void TransitiveClosure() {
7095  while (!marking_stack_.is_empty()) {
7096  HeapObject* obj = marking_stack_.RemoveLast();
7097  obj->Iterate(this);
7098  }
7099  }
7100 
7101  private:
7102  List<HeapObject*> marking_stack_;
7103  };
7104 
7105  void MarkReachableObjects() {
7106  MarkingVisitor visitor;
7107  heap_->IterateRoots(&visitor, VISIT_ALL);
7108  visitor.TransitiveClosure();
7109  }
7110 
7111  Heap* heap_;
7112  DisallowHeapAllocation no_allocation_;
7113 };
7114 
7115 
7116 HeapIterator::HeapIterator(Heap* heap)
7117  : heap_(heap),
7118  filtering_(HeapIterator::kNoFiltering),
7119  filter_(NULL) {
7120  Init();
7121 }
7122 
7123 
7124 HeapIterator::HeapIterator(Heap* heap,
7125  HeapIterator::HeapObjectsFiltering filtering)
7126  : heap_(heap),
7127  filtering_(filtering),
7128  filter_(NULL) {
7129  Init();
7130 }
7131 
7132 
7133 HeapIterator::~HeapIterator() {
7134  Shutdown();
7135 }
7136 
7137 
7138 void HeapIterator::Init() {
7139  // Start the iteration.
7140  space_iterator_ = new SpaceIterator(heap_);
7141  switch (filtering_) {
7142  case kFilterUnreachable:
7143  filter_ = new UnreachableObjectsFilter(heap_);
7144  break;
7145  default:
7146  break;
7147  }
7148  object_iterator_ = space_iterator_->next();
7149 }
7150 
7151 
7152 void HeapIterator::Shutdown() {
7153 #ifdef DEBUG
7154  // Assert that in filtering mode we have iterated through all
7155  // objects. Otherwise, heap will be left in an inconsistent state.
7156  if (filtering_ != kNoFiltering) {
7157  ASSERT(object_iterator_ == NULL);
7158  }
7159 #endif
7160  // Make sure the last iterator is deallocated.
7161  delete space_iterator_;
7162  space_iterator_ = NULL;
7163  object_iterator_ = NULL;
7164  delete filter_;
7165  filter_ = NULL;
7166 }
7167 
7168 
7169 HeapObject* HeapIterator::next() {
7170  if (filter_ == NULL) return NextObject();
7171 
7172  HeapObject* obj = NextObject();
7173  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7174  return obj;
7175 }
7176 
7177 
7178 HeapObject* HeapIterator::NextObject() {
7179  // No iterator means we are done.
7180  if (object_iterator_ == NULL) return NULL;
7181 
7182  if (HeapObject* obj = object_iterator_->next_object()) {
7183  // If the current iterator has more objects we are fine.
7184  return obj;
7185  } else {
7186  // Go though the spaces looking for one that has objects.
7187  while (space_iterator_->has_next()) {
7188  object_iterator_ = space_iterator_->next();
7189  if (HeapObject* obj = object_iterator_->next_object()) {
7190  return obj;
7191  }
7192  }
7193  }
7194  // Done with the last space.
7195  object_iterator_ = NULL;
7196  return NULL;
7197 }
7198 
7199 
7200 void HeapIterator::reset() {
7201  // Restart the iterator.
7202  Shutdown();
7203  Init();
7204 }
7205 
7206 
7207 #ifdef DEBUG
7208 
7209 Object* const PathTracer::kAnyGlobalObject = NULL;
7210 
7211 class PathTracer::MarkVisitor: public ObjectVisitor {
7212  public:
7213  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7214  void VisitPointers(Object** start, Object** end) {
7215  // Scan all HeapObject pointers in [start, end)
7216  for (Object** p = start; !tracer_->found() && (p < end); p++) {
7217  if ((*p)->IsHeapObject())
7218  tracer_->MarkRecursively(p, this);
7219  }
7220  }
7221 
7222  private:
7223  PathTracer* tracer_;
7224 };
7225 
7226 
7227 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7228  public:
7229  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7230  void VisitPointers(Object** start, Object** end) {
7231  // Scan all HeapObject pointers in [start, end)
7232  for (Object** p = start; p < end; p++) {
7233  if ((*p)->IsHeapObject())
7234  tracer_->UnmarkRecursively(p, this);
7235  }
7236  }
7237 
7238  private:
7239  PathTracer* tracer_;
7240 };
7241 
7242 
7243 void PathTracer::VisitPointers(Object** start, Object** end) {
7244  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7245  // Visit all HeapObject pointers in [start, end)
7246  for (Object** p = start; !done && (p < end); p++) {
7247  if ((*p)->IsHeapObject()) {
7248  TracePathFrom(p);
7249  done = ((what_to_find_ == FIND_FIRST) && found_target_);
7250  }
7251  }
7252 }
7253 
7254 
7255 void PathTracer::Reset() {
7256  found_target_ = false;
7257  object_stack_.Clear();
7258 }
7259 
7260 
7261 void PathTracer::TracePathFrom(Object** root) {
7262  ASSERT((search_target_ == kAnyGlobalObject) ||
7263  search_target_->IsHeapObject());
7264  found_target_in_trace_ = false;
7265  Reset();
7266 
7267  MarkVisitor mark_visitor(this);
7268  MarkRecursively(root, &mark_visitor);
7269 
7270  UnmarkVisitor unmark_visitor(this);
7271  UnmarkRecursively(root, &unmark_visitor);
7272 
7273  ProcessResults();
7274 }
7275 
7276 
7277 static bool SafeIsNativeContext(HeapObject* obj) {
7278  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7279 }
7280 
7281 
7282 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7283  if (!(*p)->IsHeapObject()) return;
7284 
7285  HeapObject* obj = HeapObject::cast(*p);
7286 
7287  Object* map = obj->map();
7288 
7289  if (!map->IsHeapObject()) return; // visited before
7290 
7291  if (found_target_in_trace_) return; // stop if target found
7292  object_stack_.Add(obj);
7293  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7294  (obj == search_target_)) {
7295  found_target_in_trace_ = true;
7296  found_target_ = true;
7297  return;
7298  }
7299 
7300  bool is_native_context = SafeIsNativeContext(obj);
7301 
7302  // not visited yet
7303  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7304 
7305  Address map_addr = map_p->address();
7306 
7307  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7308 
7309  // Scan the object body.
7310  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7311  // This is specialized to scan Context's properly.
7312  Object** start = reinterpret_cast<Object**>(obj->address() +
7314  Object** end = reinterpret_cast<Object**>(obj->address() +
7316  mark_visitor->VisitPointers(start, end);
7317  } else {
7318  obj->IterateBody(map_p->instance_type(),
7319  obj->SizeFromMap(map_p),
7320  mark_visitor);
7321  }
7322 
7323  // Scan the map after the body because the body is a lot more interesting
7324  // when doing leak detection.
7325  MarkRecursively(&map, mark_visitor);
7326 
7327  if (!found_target_in_trace_) // don't pop if found the target
7328  object_stack_.RemoveLast();
7329 }
7330 
7331 
7332 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7333  if (!(*p)->IsHeapObject()) return;
7334 
7335  HeapObject* obj = HeapObject::cast(*p);
7336 
7337  Object* map = obj->map();
7338 
7339  if (map->IsHeapObject()) return; // unmarked already
7340 
7341  Address map_addr = reinterpret_cast<Address>(map);
7342 
7343  map_addr -= kMarkTag;
7344 
7345  ASSERT_TAG_ALIGNED(map_addr);
7346 
7347  HeapObject* map_p = HeapObject::FromAddress(map_addr);
7348 
7349  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7350 
7351  UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7352 
7353  obj->IterateBody(Map::cast(map_p)->instance_type(),
7354  obj->SizeFromMap(Map::cast(map_p)),
7355  unmark_visitor);
7356 }
7357 
7358 
7359 void PathTracer::ProcessResults() {
7360  if (found_target_) {
7361  PrintF("=====================================\n");
7362  PrintF("==== Path to object ====\n");
7363  PrintF("=====================================\n\n");
7364 
7365  ASSERT(!object_stack_.is_empty());
7366  for (int i = 0; i < object_stack_.length(); i++) {
7367  if (i > 0) PrintF("\n |\n |\n V\n\n");
7368  Object* obj = object_stack_[i];
7369  obj->Print();
7370  }
7371  PrintF("=====================================\n");
7372  }
7373 }
7374 
7375 
7376 // Triggers a depth-first traversal of reachable objects from one
7377 // given root object and finds a path to a specific heap object and
7378 // prints it.
7379 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7380  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7381  tracer.VisitPointer(&root);
7382 }
7383 
7384 
7385 // Triggers a depth-first traversal of reachable objects from roots
7386 // and finds a path to a specific heap object and prints it.
7387 void Heap::TracePathToObject(Object* target) {
7388  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7389  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7390 }
7391 
7392 
7393 // Triggers a depth-first traversal of reachable objects from roots
7394 // and finds a path to any global object and prints it. Useful for
7395 // determining the source for leaks of global objects.
7396 void Heap::TracePathToGlobal() {
7397  PathTracer tracer(PathTracer::kAnyGlobalObject,
7398  PathTracer::FIND_ALL,
7399  VISIT_ALL);
7400  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7401 }
7402 #endif
7403 
7404 
7405 static intptr_t CountTotalHolesSize(Heap* heap) {
7406  intptr_t holes_size = 0;
7407  OldSpaces spaces(heap);
7408  for (OldSpace* space = spaces.next();
7409  space != NULL;
7410  space = spaces.next()) {
7411  holes_size += space->Waste() + space->Available();
7412  }
7413  return holes_size;
7414 }
7415 
7416 
7417 GCTracer::GCTracer(Heap* heap,
7418  const char* gc_reason,
7419  const char* collector_reason)
7420  : start_time_(0.0),
7421  start_object_size_(0),
7422  start_memory_size_(0),
7423  gc_count_(0),
7424  full_gc_count_(0),
7425  allocated_since_last_gc_(0),
7426  spent_in_mutator_(0),
7427  promoted_objects_size_(0),
7428  nodes_died_in_new_space_(0),
7429  nodes_copied_in_new_space_(0),
7430  nodes_promoted_(0),
7431  heap_(heap),
7432  gc_reason_(gc_reason),
7433  collector_reason_(collector_reason) {
7434  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7435  start_time_ = OS::TimeCurrentMillis();
7436  start_object_size_ = heap_->SizeOfObjects();
7437  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7438 
7439  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7440  scopes_[i] = 0;
7441  }
7442 
7443  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7444 
7445  allocated_since_last_gc_ =
7446  heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7447 
7448  if (heap_->last_gc_end_timestamp_ > 0) {
7449  spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7450  }
7451 
7452  steps_count_ = heap_->incremental_marking()->steps_count();
7453  steps_took_ = heap_->incremental_marking()->steps_took();
7454  longest_step_ = heap_->incremental_marking()->longest_step();
7455  steps_count_since_last_gc_ =
7457  steps_took_since_last_gc_ =
7459 }
7460 
7461 
7462 GCTracer::~GCTracer() {
7463  // Printf ONE line iff flag is set.
7464  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7465 
7466  bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7467 
7468  heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7469  heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7470 
7471  double time = heap_->last_gc_end_timestamp_ - start_time_;
7472 
7473  // Update cumulative GC statistics if required.
7474  if (FLAG_print_cumulative_gc_stat) {
7475  heap_->total_gc_time_ms_ += time;
7476  heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7477  heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7478  heap_->alive_after_last_gc_);
7479  if (!first_gc) {
7480  heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7481  spent_in_mutator_);
7482  }
7483  } else if (FLAG_trace_gc_verbose) {
7484  heap_->total_gc_time_ms_ += time;
7485  }
7486 
7487  if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7488 
7489  heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7490 
7491  if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7492  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7493 
7494  if (!FLAG_trace_gc_nvp) {
7495  int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7496 
7497  double end_memory_size_mb =
7498  static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7499 
7500  PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7501  CollectorString(),
7502  static_cast<double>(start_object_size_) / MB,
7503  static_cast<double>(start_memory_size_) / MB,
7504  SizeOfHeapObjects(),
7505  end_memory_size_mb);
7506 
7507  if (external_time > 0) PrintF("%d / ", external_time);
7508  PrintF("%.1f ms", time);
7509  if (steps_count_ > 0) {
7510  if (collector_ == SCAVENGER) {
7511  PrintF(" (+ %.1f ms in %d steps since last GC)",
7512  steps_took_since_last_gc_,
7513  steps_count_since_last_gc_);
7514  } else {
7515  PrintF(" (+ %.1f ms in %d steps since start of marking, "
7516  "biggest step %.1f ms)",
7517  steps_took_,
7518  steps_count_,
7519  longest_step_);
7520  }
7521  }
7522 
7523  if (gc_reason_ != NULL) {
7524  PrintF(" [%s]", gc_reason_);
7525  }
7526 
7527  if (collector_reason_ != NULL) {
7528  PrintF(" [%s]", collector_reason_);
7529  }
7530 
7531  PrintF(".\n");
7532  } else {
7533  PrintF("pause=%.1f ", time);
7534  PrintF("mutator=%.1f ", spent_in_mutator_);
7535  PrintF("gc=");
7536  switch (collector_) {
7537  case SCAVENGER:
7538  PrintF("s");
7539  break;
7540  case MARK_COMPACTOR:
7541  PrintF("ms");
7542  break;
7543  default:
7544  UNREACHABLE();
7545  }
7546  PrintF(" ");
7547 
7548  PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7549  PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7550  PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
7551  PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7552  PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
7553  PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7554  PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7555  PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7556  PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7557  PrintF("compaction_ptrs=%.1f ",
7558  scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7559  PrintF("intracompaction_ptrs=%.1f ",
7560  scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7561  PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7562  PrintF("weakcollection_process=%.1f ",
7563  scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7564  PrintF("weakcollection_clear=%.1f ",
7565  scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7566 
7567  PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7568  PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7569  PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7570  in_free_list_or_wasted_before_gc_);
7571  PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7572 
7573  PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7574  PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7575  PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7576  PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7577  PrintF("nodes_promoted=%d ", nodes_promoted_);
7578 
7579  if (collector_ == SCAVENGER) {
7580  PrintF("stepscount=%d ", steps_count_since_last_gc_);
7581  PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7582  } else {
7583  PrintF("stepscount=%d ", steps_count_);
7584  PrintF("stepstook=%.1f ", steps_took_);
7585  PrintF("longeststep=%.1f ", longest_step_);
7586  }
7587 
7588  PrintF("\n");
7589  }
7590 
7591  heap_->PrintShortHeapStatistics();
7592 }
7593 
7594 
7595 const char* GCTracer::CollectorString() {
7596  switch (collector_) {
7597  case SCAVENGER:
7598  return "Scavenge";
7599  case MARK_COMPACTOR:
7600  return "Mark-sweep";
7601  }
7602  return "Unknown GC";
7603 }
7604 
7605 
7606 int KeyedLookupCache::Hash(Map* map, Name* name) {
7607  // Uses only lower 32 bits if pointers are larger.
7608  uintptr_t addr_hash =
7609  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7610  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7611 }
7612 
7613 
7615  int index = (Hash(map, name) & kHashMask);
7616  for (int i = 0; i < kEntriesPerBucket; i++) {
7617  Key& key = keys_[index + i];
7618  if ((key.map == map) && key.name->Equals(name)) {
7619  return field_offsets_[index + i];
7620  }
7621  }
7622  return kNotFound;
7623 }
7624 
7625 
7626 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7627  if (!name->IsUniqueName()) {
7628  String* internalized_string;
7629  if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7630  String::cast(name), &internalized_string)) {
7631  return;
7632  }
7633  name = internalized_string;
7634  }
7635  // This cache is cleared only between mark compact passes, so we expect the
7636  // cache to only contain old space names.
7637  ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7638 
7639  int index = (Hash(map, name) & kHashMask);
7640  // After a GC there will be free slots, so we use them in order (this may
7641  // help to get the most frequently used one in position 0).
7642  for (int i = 0; i< kEntriesPerBucket; i++) {
7643  Key& key = keys_[index];
7644  Object* free_entry_indicator = NULL;
7645  if (key.map == free_entry_indicator) {
7646  key.map = map;
7647  key.name = name;
7648  field_offsets_[index + i] = field_offset;
7649  return;
7650  }
7651  }
7652  // No free entry found in this bucket, so we move them all down one and
7653  // put the new entry at position zero.
7654  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7655  Key& key = keys_[index + i];
7656  Key& key2 = keys_[index + i - 1];
7657  key = key2;
7658  field_offsets_[index + i] = field_offsets_[index + i - 1];
7659  }
7660 
7661  // Write the new first entry.
7662  Key& key = keys_[index];
7663  key.map = map;
7664  key.name = name;
7665  field_offsets_[index] = field_offset;
7666 }
7667 
7668 
7670  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7671 }
7672 
7673 
7675  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7676 }
7677 
7678 
7679 #ifdef DEBUG
7680 void Heap::GarbageCollectionGreedyCheck() {
7681  ASSERT(FLAG_gc_greedy);
7682  if (isolate_->bootstrapper()->IsActive()) return;
7683  if (!AllowAllocationFailure::IsAllowed(isolate_)) return;
7685 }
7686 #endif
7687 
7688 
7690  int last = 0;
7691  for (int i = 0; i < new_space_strings_.length(); ++i) {
7692  if (new_space_strings_[i] == heap_->the_hole_value()) {
7693  continue;
7694  }
7695  ASSERT(new_space_strings_[i]->IsExternalString());
7696  if (heap_->InNewSpace(new_space_strings_[i])) {
7697  new_space_strings_[last++] = new_space_strings_[i];
7698  } else {
7699  old_space_strings_.Add(new_space_strings_[i]);
7700  }
7701  }
7702  new_space_strings_.Rewind(last);
7703  new_space_strings_.Trim();
7704 
7705  last = 0;
7706  for (int i = 0; i < old_space_strings_.length(); ++i) {
7707  if (old_space_strings_[i] == heap_->the_hole_value()) {
7708  continue;
7709  }
7710  ASSERT(old_space_strings_[i]->IsExternalString());
7711  ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7712  old_space_strings_[last++] = old_space_strings_[i];
7713  }
7714  old_space_strings_.Rewind(last);
7715  old_space_strings_.Trim();
7716 #ifdef VERIFY_HEAP
7717  if (FLAG_verify_heap) {
7718  Verify();
7719  }
7720 #endif
7721 }
7722 
7723 
7725  for (int i = 0; i < new_space_strings_.length(); ++i) {
7726  heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7727  }
7728  new_space_strings_.Free();
7729  for (int i = 0; i < old_space_strings_.length(); ++i) {
7730  heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7731  }
7732  old_space_strings_.Free();
7733 }
7734 
7735 
7737  chunk->set_next_chunk(chunks_queued_for_free_);
7738  chunks_queued_for_free_ = chunk;
7739 }
7740 
7741 
7743  if (chunks_queued_for_free_ == NULL) return;
7744  MemoryChunk* next;
7745  MemoryChunk* chunk;
7746  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7747  next = chunk->next_chunk();
7749 
7750  if (chunk->owner()->identity() == LO_SPACE) {
7751  // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7752  // If FromAnyPointerAddress encounters a slot that belongs to a large
7753  // chunk queued for deletion it will fail to find the chunk because
7754  // it try to perform a search in the list of pages owned by of the large
7755  // object space and queued chunks were detached from that list.
7756  // To work around this we split large chunk into normal kPageSize aligned
7757  // pieces and initialize size, owner and flags field of every piece.
7758  // If FromAnyPointerAddress encounters a slot that belongs to one of
7759  // these smaller pieces it will treat it as a slot on a normal Page.
7760  Address chunk_end = chunk->address() + chunk->size();
7762  chunk->address() + Page::kPageSize);
7763  MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7764  while (inner <= inner_last) {
7765  // Size of a large chunk is always a multiple of
7766  // OS::AllocateAlignment() so there is always
7767  // enough space for a fake MemoryChunk header.
7768  Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7769  // Guard against overflow.
7770  if (area_end < inner->address()) area_end = chunk_end;
7771  inner->SetArea(inner->address(), area_end);
7772  inner->set_size(Page::kPageSize);
7773  inner->set_owner(lo_space());
7775  inner = MemoryChunk::FromAddress(
7776  inner->address() + Page::kPageSize);
7777  }
7778  }
7779  }
7780  isolate_->heap()->store_buffer()->Compact();
7782  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7783  next = chunk->next_chunk();
7784  isolate_->memory_allocator()->Free(chunk);
7785  }
7786  chunks_queued_for_free_ = NULL;
7787 }
7788 
7789 
7790 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7791  uintptr_t p = reinterpret_cast<uintptr_t>(page);
7792  // Tag the page pointer to make it findable in the dump file.
7793  if (compacted) {
7794  p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
7795  } else {
7796  p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
7797  }
7798  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7799  reinterpret_cast<Address>(p);
7800  remembered_unmapped_pages_index_++;
7801  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7802 }
7803 
7804 
7805 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7806  memset(object_counts_, 0, sizeof(object_counts_));
7807  memset(object_sizes_, 0, sizeof(object_sizes_));
7808  if (clear_last_time_stats) {
7809  memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7810  memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7811  }
7812 }
7813 
7814 
7815 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7816 
7817 
7819  LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7820  Counters* counters = isolate()->counters();
7821 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7822  counters->count_of_##name()->Increment( \
7823  static_cast<int>(object_counts_[name])); \
7824  counters->count_of_##name()->Decrement( \
7825  static_cast<int>(object_counts_last_time_[name])); \
7826  counters->size_of_##name()->Increment( \
7827  static_cast<int>(object_sizes_[name])); \
7828  counters->size_of_##name()->Decrement( \
7829  static_cast<int>(object_sizes_last_time_[name]));
7831 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7832  int index;
7833 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7834  index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
7835  counters->count_of_CODE_TYPE_##name()->Increment( \
7836  static_cast<int>(object_counts_[index])); \
7837  counters->count_of_CODE_TYPE_##name()->Decrement( \
7838  static_cast<int>(object_counts_last_time_[index])); \
7839  counters->size_of_CODE_TYPE_##name()->Increment( \
7840  static_cast<int>(object_sizes_[index])); \
7841  counters->size_of_CODE_TYPE_##name()->Decrement( \
7842  static_cast<int>(object_sizes_last_time_[index]));
7844 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7845 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7846  index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
7847  counters->count_of_FIXED_ARRAY_##name()->Increment( \
7848  static_cast<int>(object_counts_[index])); \
7849  counters->count_of_FIXED_ARRAY_##name()->Decrement( \
7850  static_cast<int>(object_counts_last_time_[index])); \
7851  counters->size_of_FIXED_ARRAY_##name()->Increment( \
7852  static_cast<int>(object_sizes_[index])); \
7853  counters->size_of_FIXED_ARRAY_##name()->Decrement( \
7854  static_cast<int>(object_sizes_last_time_[index]));
7856 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7857 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
7858  index = \
7859  FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7860  counters->count_of_CODE_AGE_##name()->Increment( \
7861  static_cast<int>(object_counts_[index])); \
7862  counters->count_of_CODE_AGE_##name()->Decrement( \
7863  static_cast<int>(object_counts_last_time_[index])); \
7864  counters->size_of_CODE_AGE_##name()->Increment( \
7865  static_cast<int>(object_sizes_[index])); \
7866  counters->size_of_CODE_AGE_##name()->Decrement( \
7867  static_cast<int>(object_sizes_last_time_[index]));
7869 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7870 
7871  OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7872  OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7873  ClearObjectStats();
7874 }
7875 
7876 } } // namespace v8::internal
static int SizeOfMarkedObject(HeapObject *object)
Definition: heap.h:3080
MUST_USE_RESULT MaybeObject * CopyConstantPoolArray(ConstantPoolArray *src)
Definition: heap-inl.h:212
byte * Address
Definition: globals.h:186
Address FromSpaceEnd()
Definition: spaces.h:2561
Object ** roots_array_start()
Definition: heap.h:1451
static Object * WeakNext(JSArrayBufferView *obj)
Definition: heap.cc:1927
static void SetWeakNext(JSArrayBufferView *obj, Object *next)
Definition: heap.cc:1923
MUST_USE_RESULT MaybeObject * AllocateJSModule(Context *context, ScopeInfo *scope_info)
Definition: heap.cc:4549
ContextSlotCache * context_slot_cache()
Definition: isolate.h:892
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const uint32_t kShortcutTypeTag
Definition: objects.h:656
intptr_t MaximumCommittedMemory()
Definition: spaces.h:1745
static const int kEmptyStringHash
Definition: objects.h:8919
#define CODE_AGE_LIST_COMPLETE(V)
Definition: builtins.h:53
static const int kPointerFieldsEndOffset
Definition: objects.h:6445
MUST_USE_RESULT MaybeObject * CopyCode(Code *code)
Definition: heap.cc:4220
void set_elements_kind(ElementsKind elements_kind)
Definition: objects.h:5937
static void Clear(FixedArray *cache)
Definition: heap.cc:3534
static const int kWeakNextOffset
Definition: objects.h:9880
virtual intptr_t Size()
Definition: spaces.h:2821
MUST_USE_RESULT MaybeObject * AllocateFixedTypedArray(int length, ExternalArrayType array_type, PretenureFlag pretenure)
Definition: heap.cc:4087
static const int kMaxLength
Definition: objects.h:3085
Code * builtin(Name name)
Definition: builtins.h:322
#define SLOW_ASSERT(condition)
Definition: checks.h:306
const intptr_t kSmiTagMask
Definition: v8.h:5480
void set_deopt_dependent_code(bool deopt)
Definition: objects.h:8326
static Object * WeakNext(AllocationSite *obj)
Definition: heap.cc:2008
MUST_USE_RESULT MaybeObject * AllocateStringFromUtf8Slow(Vector< const char > str, int non_ascii_start, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4905
static void SetWeakNext(Context *context, Object *next)
Definition: heap.cc:1838
static ConstantPoolArray * cast(Object *obj)
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
MUST_USE_RESULT MaybeObject * AllocateSymbol()
Definition: heap.cc:5469
static void VisitPhantomObject(Heap *, JSFunction *)
Definition: heap.cc:1808
static void DoWeakList(Heap *heap, Context *context, WeakObjectRetainer *retainer, bool record_slots, int index)
Definition: heap.cc:1862
static const int kCodeEntryOffset
Definition: objects.h:7518
void TearDown()
Definition: heap.cc:6705
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1994
bool Contains(const T &elm) const
Definition: list-inl.h:196
void SetStackLimits()
Definition: heap.cc:6688
bool NextGCIsLikelyToBeFull()
Definition: heap.h:1691
MUST_USE_RESULT MaybeObject * AllocateOneByteInternalizedString(Vector< const uint8_t > str, uint32_t hash_field)
Definition: heap-inl.h:138
MUST_USE_RESULT MaybeObject * AllocateExternalStringFromAscii(const ExternalAsciiString::Resource *resource)
Definition: heap.cc:3912
void set_constant_pool(Object *constant_pool)
Definition: objects-inl.h:4594
#define PROFILE(IsolateGetter, Call)
Definition: cpu-profiler.h:194
#define LAZY_MUTEX_INITIALIZER
Definition: mutex.h:130
MUST_USE_RESULT MaybeObject * AllocateRawOneByteString(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5087
CodeRange * code_range()
Definition: isolate.h:865
void Callback(MemoryChunk *page, StoreBufferEvent event)
Definition: heap.cc:1399
intptr_t Available()
Definition: spaces.h:1783
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:342
static const int kSize
Definition: objects.h:9167
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:2008
#define STRUCT_TABLE_ELEMENT(NAME, Name, name)
MUST_USE_RESULT MaybeObject * CopyFixedDoubleArray(FixedDoubleArray *src)
Definition: heap-inl.h:207
static void VisitPhantomObject(Heap *, Code *)
Definition: heap.cc:1831
intptr_t * old_pointer_space_size
Definition: heap.h:2564
int Lookup(Map *map, Name *name)
Definition: heap.cc:7614
bool Contains(Address addr)
Definition: spaces.h:377
void set(int index, Object *value)
Definition: objects-inl.h:2147
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
CompilationCache * compilation_cache()
Definition: isolate.h:867
intptr_t * cell_space_size
Definition: heap.h:2572
void PopulateConstantPool(ConstantPoolArray *constant_pool)
static const int kMapHashShift
Definition: heap.h:2759
void DeoptMarkedAllocationSites()
Definition: heap.cc:571
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void PrintStack(StringStream *accumulator)
Definition: isolate.cc:686
void SetNewSpaceHighPromotionModeActive(bool mode)
Definition: heap.h:1563
#define ASSERT_TAG_ALIGNED(address)
Definition: v8checks.h:57
void CollectAllGarbage(int flags, const char *gc_reason=NULL, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition: heap.cc:731
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static Object * WeakNext(JSArrayBuffer *obj)
Definition: heap.cc:1950
bool InOldDataSpace(Address address)
Definition: heap-inl.h:341
void set_function_with_prototype(bool value)
Definition: objects-inl.h:4066
bool InNewSpace(Object *object)
Definition: heap-inl.h:307
static void VisitPhantomObject(Heap *heap, Context *context)
Definition: heap.cc:1883
static String * cast(Object *obj)
void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, GCType gc_type_filter, bool pass_isolate=true)
Definition: heap.cc:6834
bool IsHeapIterable()
Definition: heap.cc:5673
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f)
MUST_USE_RESULT MaybeObject * AllocateFunctionContext(int length, JSFunction *function)
Definition: heap.cc:5556
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind)
Definition: heap.cc:3774
int * new_space_capacity
Definition: heap.h:2563
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:44
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:902
void set_access_flags(v8::AccessControl access_control)
Definition: objects-inl.h:6434
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2467
static DescriptorArray * cast(Object *obj)
static Failure * InternalError()
Definition: objects-inl.h:1239
void IterateWeakRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6272
bool SkipObject(HeapObject *object)
Definition: heap.cc:7072
static int SizeOf(Map *map, HeapObject *object)
Definition: objects.h:3107
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:277
uint32_t HashSeed()
Definition: heap.h:1831
Isolate * isolate()
Definition: heap-inl.h:624
MUST_USE_RESULT MaybeObject * ReinitializeJSGlobalProxy(JSFunction *constructor, JSGlobalProxy *global)
Definition: heap.cc:4859
int unused_property_fields()
Definition: objects-inl.h:4022
void set_length(Smi *length)
Definition: objects-inl.h:6628
V8_INLINE bool IsOutsideAllocatedSpace(const void *address) const
Definition: spaces.h:1118
bool SetUp(const size_t requested_size)
Definition: spaces.cc:136
void PostGarbageCollectionProcessing(Heap *heap)
void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback)
Definition: heap.cc:6822
MUST_USE_RESULT MaybeObject * CopyFixedDoubleArrayWithMap(FixedDoubleArray *src, Map *map)
Definition: heap.cc:5224
void Prepare(GCTracer *tracer)
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:186
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
static void VisitPhantomObject(Heap *heap, AllocationSite *phantom)
Definition: heap.cc:2017
bool CreateHeapObjects()
Definition: heap.cc:6672
#define LOG(isolate, Call)
Definition: log.h:86
MUST_USE_RESULT MaybeObject * AllocateJSFunctionProxy(Object *handler, Object *call_trap, Object *construct_trap, Object *prototype)
Definition: heap.cc:4686
const int KB
Definition: globals.h:245
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:4673
void FinalizeExternalString(String *string)
Definition: heap-inl.h:291
MUST_USE_RESULT MaybeObject * CopyJSObject(JSObject *source, AllocationSite *site=NULL)
Definition: heap.cc:4712
intptr_t MaxReserved()
Definition: heap.h:593
Map * MapForFixedTypedArray(ExternalArrayType array_type)
Definition: heap.cc:3752
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:764
V8_DECLARE_ONCE(initialize_gc_once)
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
void set_ic_age(int count)
Address FromSpaceStart()
Definition: spaces.h:2560
static const int kDataOffset
Definition: objects.h:4970
bool is_logging()
Definition: log.h:354
int InitialPropertiesLength()
Definition: objects.h:6281
static HeapObject * cast(Object *obj)
Map * MapForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3730
void SetNumberStringCache(Object *number, String *str)
Definition: heap.cc:3619
static const byte kArgumentMarker
Definition: objects.h:9506
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index)
Definition: heap.cc:3409
void AgeInlineCaches()
Definition: heap.h:1866
MUST_USE_RESULT MaybeObject * AllocateModuleContext(ScopeInfo *scope_info)
Definition: heap.cc:5542
MUST_USE_RESULT MaybeObject * AllocateCodeCache()
Definition: heap.cc:2677
void set_pre_allocated_property_fields(int value)
Definition: objects-inl.h:4004
void CallOnce(OnceType *once, NoArgFunction init_func)
Definition: once.h:105
static const byte kUndefined
Definition: objects.h:9507
T Max(T a, T b)
Definition: utils.h:227
MUST_USE_RESULT MaybeObject * AllocateNativeContext()
Definition: heap.cc:5508
const int kVariableSizeSentinel
Definition: objects.h:314
RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3735
void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags)
Definition: heap.cc:1238
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:288
static ExternalTwoByteString * cast(Object *obj)
void VisitExternalResources(v8::ExternalResourceVisitor *visitor)
Definition: heap.cc:2073
void IterateDeferredHandles(ObjectVisitor *visitor)
Definition: isolate.cc:307
static Object * WeakNext(JSFunction *function)
Definition: heap.cc:1796
static Map * cast(Object *obj)
static uint16_t TrailSurrogate(uint32_t char_code)
Definition: unicode.h:134
MaybeObject * AddWeakObjectToCodeDependency(Object *obj, DependentCode *dep)
Definition: heap.cc:6856
void set_has_debug_break_slots(bool value)
Definition: objects-inl.h:4419
void set_start_position(int value)
void ResetAllocationInfo()
Definition: spaces.cc:1359
kSerializedDataOffset Object
Definition: objects-inl.h:5016
static const byte kTheHole
Definition: objects.h:9504
static ByteArray * cast(Object *obj)
MUST_USE_RESULT MaybeObject * AllocateJSMessageObject(String *type, JSArray *arguments, int start_position, int end_position, Object *script, Object *stack_frames)
Definition: heap.cc:3888
static const int kMaxSize
Definition: objects.h:9083
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
Definition: flags.cc:665
Builtins * builtins()
Definition: isolate.h:948
static AllocationSite * cast(Object *obj)
void set_end_position(int end_position)
void set_context(Object *context)
Definition: objects-inl.h:5573
static FreeSpace * cast(Object *obj)
Bootstrapper * bootstrapper()
Definition: isolate.h:858
bool InFromSpace(Object *object)
Definition: heap-inl.h:321
MUST_USE_RESULT MaybeObject * Uint32ToString(uint32_t value, bool check_number_string_cache=true)
Definition: heap.cc:3673
void Relocate(intptr_t delta)
Definition: objects.cc:10319
static SeqOneByteString * cast(Object *obj)
void set_is_crankshafted(bool value)
Definition: objects-inl.h:4348
Object * weak_object_to_code_table()
Definition: heap.h:1367
WriteBarrierMode GetWriteBarrierMode(const DisallowHeapAllocation &promise)
Definition: objects-inl.h:2350
PromotionQueue * promotion_queue()
Definition: heap.h:1302
const int kMaxInt
Definition: globals.h:248
void SetTop(Object ***top)
Definition: store-buffer.h:107
static bool enabled()
Definition: serialize.h:485
static ScopeInfo * Empty(Isolate *isolate)
Definition: scopeinfo.cc:151
void set_map(Map *value)
Definition: objects-inl.h:1341
RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type)
Definition: heap.cc:3757
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2620
intptr_t * code_space_size
Definition: heap.h:2568
static const int kSize
Definition: objects.h:9678
uint32_t Flags
Definition: objects.h:5184
MUST_USE_RESULT MaybeObject * InternalizeStringWithKey(HashTableKey *key)
Definition: heap.cc:6034
byte * instruction_end()
Definition: objects-inl.h:5862
MUST_USE_RESULT MaybeObject * AllocateExternal(void *value)
Definition: heap.cc:5637
static void VisitPhantomObject(Heap *, JSArrayBufferView *)
Definition: heap.cc:1936
RandomNumberGenerator * random_number_generator()
Definition: isolate-inl.h:75
static void SetWeakNext(Code *code, Object *next)
Definition: heap.cc:1815
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6292
intptr_t MaximumCommittedMemory()
Definition: heap.h:616
#define ASSERT(condition)
Definition: checks.h:329
bool InSpace(Address addr, AllocationSpace space)
Definition: heap.cc:5948
void(* GCPrologueCallback)(GCType type, GCCallbackFlags flags)
Definition: v8.h:4080
MUST_USE_RESULT MaybeObject * AllocateGlobalContext(JSFunction *function, ScopeInfo *scope_info)
Definition: heap.cc:5523
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:557
void Step(intptr_t allocated, CompletionAction action)
KeyedLookupCache * keyed_lookup_cache()
Definition: isolate.h:888
static const int kReduceMemoryFootprintMask
Definition: heap.h:1259
ExternalArrayType
Definition: v8.h:2113
unsigned short uint16_t
Definition: unicode.cc:46
void IterateStrongRoots(ObjectVisitor *v)
virtual Object * RetainAs(Object *object)
Definition: heap.cc:1489
static Context * cast(Object *context)
Definition: contexts.h:244
const intptr_t kCodeAlignment
Definition: v8globals.h:58
ThreadManager * thread_manager()
Definition: isolate.h:922
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)
static bool IsEnabled()
Definition: snapshot.h:49
MUST_USE_RESULT MaybeObject * AllocateBlockContext(JSFunction *function, Context *previous, ScopeInfo *info)
Definition: heap.cc:5610
int SizeFromMap(Map *map)
Definition: objects-inl.h:3946
intptr_t CommittedMemoryExecutable()
Definition: heap.cc:230
#define CHECK(condition)
Definition: checks.h:75
ObjectIterator * next()
Definition: heap.cc:7000
static const int kSize
Definition: objects.h:9740
static void VisitLiveObject(Heap *, JSArrayBufferView *obj, WeakObjectRetainer *retainer, bool record_slots)
Definition: heap.cc:1931
void VisitPointers(Object **start, Object **end)
Definition: heap.cc:1315
#define INTERNALIZED_STRING_LIST(V)
Definition: heap.h:276
#define STRING_TYPE_LIST(V)
Definition: objects.h:459
static FixedTypedArrayBase * cast(Object *obj)
static const int kSize
Definition: objects.h:9548
static ExternalAsciiString * cast(Object *obj)
static const int kMaxSize
Definition: objects.h:9123
static const int kPageSize
Definition: spaces.h:814
void init_back_pointer(Object *undefined)
Definition: objects-inl.h:4922
void IterateSmiRoots(ObjectVisitor *v)
Definition: heap.cc:6284
void set_foreign_address(Address value)
Definition: objects-inl.h:5724
friend class GCTracer
Definition: heap.h:2540
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, true > AllowHeapAllocation
Definition: assert-scope.h:218
void SeqTwoByteStringSet(int index, uint16_t value)
Definition: objects-inl.h:3176
static Code * cast(Object *obj)
virtual const uint16_t * data() const =0
MUST_USE_RESULT MaybeObject * AllocateJSObject(JSFunction *constructor, PretenureFlag pretenure=NOT_TENURED, AllocationSite *allocation_site=NULL)
Definition: heap.cc:4530
static bool IsAtEnd(Address addr)
Definition: spaces.h:2080
void AdjustLiveBytes(Address address, int by, InvocationMode mode)
Definition: heap.cc:4034
static void SetWeakNext(JSArrayBuffer *obj, Object *next)
Definition: heap.cc:1946
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)
void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback)
Definition: heap.cc:6063
static PolymorphicCodeCache * cast(Object *obj)
ArrayStorageAllocationMode
Definition: heap.h:554
static Symbol * cast(Object *obj)
const bool kIs64BitArch
Definition: globals.h:284
Failure * ThrowInvalidStringLength()
Definition: isolate.cc:950
static const int kSize
Definition: objects.h:7371
virtual Object * RetainAs(Object *object)=0
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1199
StoreBuffer * store_buffer()
Definition: heap.h:1773
static const byte kOther
Definition: objects.h:9509
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4589
static Smi * cast(Object *object)
void set_function_token_position(int function_token_position)
MUST_USE_RESULT MaybeObject * AllocateStringFromOneByte(Vector< const uint8_t > str, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4885
#define STRING_TYPE_ELEMENT(type, size, name, camel_name)
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2829
#define V8_INFINITY
Definition: globals.h:44
FixedTypedArrayBase * EmptyFixedTypedArrayForMap(Map *map)
Definition: heap.cc:3813
static MUST_USE_RESULT MaybeObject * InitializeIntrinsicFunctionNames(Heap *heap, Object *dictionary)
Definition: runtime.cc:15114
void set_closure(JSFunction *closure)
Definition: contexts.h:368
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
StackGuard * stack_guard()
Definition: isolate.h:874
void Free(MemoryChunk *chunk)
Definition: spaces.cc:751
void set_size(int value)
static const int kSize
Definition: objects.h:10077
intptr_t * lo_space_size
Definition: heap.h:2574
void set_opt_count_and_bailout_reason(int value)
MUST_USE_RESULT MaybeObject * CopyFixedArrayWithMap(FixedArray *src, Map *map)
Definition: heap.cc:5198
void Zap(Object *value)
Definition: objects.h:4296
uint8_t byte
Definition: globals.h:185
#define UPDATE_COUNTERS_FOR_SPACE(space)
static bool IsOneByte(const uc16 *chars, int length)
Definition: objects.h:8985
void EnsureWeakObjectToCodeTable()
Definition: heap.cc:6880
GlobalObject * global_object()
Definition: contexts.h:388
int NotifyContextDisposed()
Definition: heap.cc:872
Object * InObjectPropertyAtPut(int index, Object *value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:2002
static Struct * cast(Object *that)
MUST_USE_RESULT MaybeObject * AllocateHeapNumber(double value, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:2969
void InitializeBody(int object_size, Object *value)
Definition: objects-inl.h:5705
void RepairFreeListsAfterBoot()
Definition: heap.cc:497
MUST_USE_RESULT MaybeObject * NumberToString(Object *number, bool check_number_string_cache=true)
Definition: heap.cc:3639
static const int kWeakFirstViewOffset
Definition: objects.h:9881
UnicodeCache * unicode_cache()
Definition: isolate.h:908
String *(* ExternalStringTableUpdaterCallback)(Heap *heap, Object **pointer)
Definition: heap.h:388
double sweeping_time() const
Definition: heap.h:1765
uintptr_t real_jslimit()
Definition: execution.h:247
static const int kEndMarker
Definition: heap.h:2559
bool IdleNotification(int hint)
Definition: heap.cc:5710
virtual size_t length() const =0
#define UNREACHABLE()
Definition: checks.h:52
void SetEntryCounts(int number_of_int64_entries, int number_of_code_ptr_entries, int number_of_heap_ptr_entries, int number_of_int32_entries)
Definition: objects-inl.h:2263
friend class MarkCompactCollector
Definition: heap.h:2544
void(* GCEpilogueCallback)(Isolate *isolate, GCType type, GCCallbackFlags flags)
Definition: v8.h:4348
void EnsureHeapIsIterable()
Definition: heap.cc:5679
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
T * start() const
Definition: utils.h:426
MUST_USE_RESULT MaybeObject * AllocateUninitializedFixedDoubleArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5338
bool InOldPointerSpace(Address address)
Definition: heap-inl.h:331
intptr_t * property_cell_space_capacity
Definition: heap.h:2587
int(* HeapObjectCallback)(HeapObject *obj)
Definition: v8globals.h:248
static Object * WeakNext(Context *context)
Definition: heap.cc:1844
bool always_allocate()
Definition: heap.h:668
void set_global_object(GlobalObject *object)
Definition: contexts.h:393
static SeededNumberDictionary * cast(Object *obj)
Definition: objects.h:4104
#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)
static const int kMaxLength
Definition: objects.h:4626
const char * IntToCString(int n, Vector< char > buffer)
Definition: conversions.cc:136
bool is_tracking_object_moves() const
Definition: heap-profiler.h:84
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size)
Definition: heap.h:1596
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
bool AdvanceSweeper(intptr_t bytes_to_sweep)
Definition: spaces.cc:2583
void Register(StaticVisitorBase::VisitorId id, Callback callback)
intptr_t CommittedMemory()
Definition: spaces.h:1742
static bool IsMarked(HeapObject *object)
Definition: heap.h:3059
static Cell * cast(Object *obj)
static int NumberOfHandles(Isolate *isolate)
Definition: handles.cc:48
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
bool Contains(Address a)
Definition: spaces-inl.h:179
void IteratePointersToNewSpace(ObjectSlotCallback callback)
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
Address ToSpaceEnd()
Definition: spaces.h:2565
void SetFlag(int flag)
Definition: spaces.h:440
intptr_t CommittedMemory()
Definition: spaces.h:2461
void set_expected_nof_properties(int value)
MUST_USE_RESULT MaybeObject * AllocateJSArrayWithElements(FixedArrayBase *array_base, ElementsKind elements_kind, int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4650
Address ToSpaceStart()
Definition: spaces.h:2564
void set_instruction_size(int value)
Context * native_context()
Definition: contexts.cc:67
void InitializeBody(int object_size)
Definition: objects-inl.h:2064
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2549
static const int kStoreBufferSize
Definition: store-buffer.h:93
static const uchar kMaxNonSurrogateCharCode
Definition: unicode.h:121
static bool IsValid(intptr_t value)
Definition: objects-inl.h:1278
void set_resource(const Resource *buffer)
Definition: objects-inl.h:3258
#define MAKE_CASE(NAME, Name, name)
void ClearAllICsByKind(Code::Kind kind)
Definition: heap.cc:483
MUST_USE_RESULT MaybeObject * AllocateWithContext(JSFunction *function, Context *previous, JSReceiver *extension)
Definition: heap.cc:5593
void CollectAllAvailableGarbage(const char *gc_reason=NULL)
Definition: heap.cc:743
bool ConfigureHeapDefault()
Definition: heap.cc:6456
PagedSpace * paged_space(int idx)
Definition: heap.h:647
void set_aliased_context_slot(int count)
ElementsKind GetElementsKind()
Definition: objects-inl.h:5999
static const int kNoGCFlags
Definition: heap.h:1257
PropertyCellSpace * property_cell_space()
Definition: heap.h:643
MUST_USE_RESULT MaybeObject * AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5302
const int kPointerSize
Definition: globals.h:268
virtual intptr_t Size()
Definition: spaces.h:2438
void set_prologue_offset(int offset)
MemoryAllocator * memory_allocator()
Definition: isolate.h:884
EternalHandles * eternal_handles()
Definition: isolate.h:920
static Oddball * cast(Object *obj)
static Address & Address_at(Address addr)
Definition: v8memory.h:79
static uint16_t LeadSurrogate(uint32_t char_code)
Definition: unicode.h:131
MUST_USE_RESULT MaybeObject * AllocateForeign(Address address, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3837
const char * DoubleToCString(double v, Vector< char > buffer)
Definition: conversions.cc:81
intptr_t OffsetFrom(T x)
Definition: utils.h:120
static UnseededNumberDictionary * cast(Object *obj)
Definition: objects.h:4157
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:7736
static void VisitLiveObject(Heap *, JSFunction *, WeakObjectRetainer *, bool)
Definition: heap.cc:1804
void CheckpointObjectStats()
Definition: heap.cc:7818
MUST_USE_RESULT MaybeObject * AllocateExternalArray(int length, ExternalArrayType array_type, void *external_pointer, PretenureFlag pretenure)
Definition: heap.cc:4046
intptr_t * cell_space_capacity
Definition: heap.h:2573
bool IsAligned(T value, U alignment)
Definition: utils.h:211
intptr_t * memory_allocator_size
Definition: heap.h:2580
T Remove(int i)
Definition: list-inl.h:125
static DependentCode * cast(Object *object)
void Start(CompactionFlag flag=ALLOW_COMPACTION)
static void VisitLiveObject(Heap *heap, Context *context, WeakObjectRetainer *retainer, bool record_slots)
Definition: heap.cc:1848
void set_inobject_properties(int value)
Definition: objects-inl.h:3998
LazyStaticInstance< Mutex, DefaultConstructTrait< Mutex >, ThreadSafeInitOnceTrait >::type LazyMutex
Definition: mutex.h:128
friend class Page
Definition: heap.h:2542
void Iterate(ObjectVisitor *v)
Definition: isolate.cc:301
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2568
static void VisitLiveObject(Heap *heap, AllocationSite *site, WeakObjectRetainer *retainer, bool record_slots)
Definition: heap.cc:2012
size_t CommittedPhysicalMemory()
Definition: spaces.cc:984
GlobalHandles * global_handles()
Definition: isolate.h:918
intptr_t Available()
Definition: spaces.h:2476
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1685
void set_allocation_sites_list(Object *object)
Definition: heap.h:1361
MUST_USE_RESULT MaybeObject * AllocatePolymorphicCodeCache()
Definition: heap.cc:2688
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
intptr_t * code_space_capacity
Definition: heap.h:2569
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void VisitPointer(Object **p)
Definition: heap.cc:1313
static MUST_USE_RESULT MaybeObject * Allocate(Heap *heap, int at_least_space_for, PretenureFlag pretenure=NOT_TENURED)
MUST_USE_RESULT MaybeObject * InternalizeString(String *str)
Definition: heap.cc:6009
static void VisitPhantomObject(Heap *heap, JSArrayBuffer *phantom)
Definition: heap.cc:1971
static void Enter(Heap *heap, String *key_string, Object *key_pattern, FixedArray *value_array, ResultsCacheType type)
Definition: heap.cc:3477
const uint32_t kShortcutTypeMask
Definition: objects.h:652
static void MemMove(void *dest, const void *src, size_t size)
Definition: platform.h:402
void ReserveSpace(int *sizes, Address *addresses)
Definition: heap.cc:941
static const int kIsNotArrayIndexMask
Definition: objects.h:8638
static void DeoptimizeAll(Isolate *isolate)
Definition: deoptimizer.cc:450
void set_end_position(int value)
ExternalArray * EmptyExternalArrayForMap(Map *map)
Definition: heap.cc:3807
int length() const
Definition: utils.h:420
OldSpace * old_pointer_space()
Definition: heap.h:638
static const byte kUninitialized
Definition: objects.h:9508
Map * InternalizedStringMapForString(String *str)
Definition: heap.cc:4959
bool ConfigureHeap(int max_semispace_size, intptr_t max_old_gen_size, intptr_t max_executable_size)
Definition: heap.cc:6375
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
intptr_t * map_space_size
Definition: heap.h:2570
Object * Lookup(Object *key)
Definition: objects.cc:15823
static Object * WeakNext(Code *code)
Definition: heap.cc:1819
static double TimeCurrentMillis()
static FixedDoubleArray * cast(Object *obj)
MUST_USE_RESULT MaybeObject * AllocateTypeFeedbackInfo()
Definition: heap.cc:2705
PretenureFlag GetPretenureMode()
Definition: objects.cc:12650
bool CreateApiObjects()
Definition: heap.cc:3056
size_t size() const
Definition: spaces.h:595
GCType
Definition: v8.h:4067
HeapState gc_state()
Definition: heap.h:1508
static NameDictionary * cast(Object *obj)
Definition: objects.h:4049
static const int kSize
Definition: objects.h:6440
MaybeObject * AllocateConstantPool(Heap *heap)
MUST_USE_RESULT MaybeObject * AllocateByteArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3987
static void SetWeakNext(AllocationSite *obj, Object *next)
Definition: heap.cc:2004
void set_age_mark(Address mark)
Definition: spaces.h:2515
void IterateAllRoots(ObjectVisitor *v)
bool contains(Address address)
Definition: spaces.h:960
OldSpace * code_space()
Definition: heap.h:640
static const int kMakeHeapIterableMask
Definition: heap.h:1264
MUST_USE_RESULT MaybeObject * AllocateJSObjectFromMap(Map *map, PretenureFlag pretenure=NOT_TENURED, bool alloc_props=true, AllocationSite *allocation_site=NULL)
Definition: heap.cc:4488
static ExternalString * cast(Object *obj)
MUST_USE_RESULT MaybeObject * AllocateJSArrayAndStorage(ElementsKind elements_kind, int length, int capacity, ArrayStorageAllocationMode mode=DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4564
void EnsureSpace(intptr_t space_needed)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:707
void set_kind(byte kind)
Definition: objects-inl.h:1827
#define V8_PTR_PREFIX
Definition: globals.h:220
static const int kNextFunctionLinkOffset
Definition: objects.h:7526
bool InToSpace(Object *object)
Definition: heap-inl.h:326
void CopyFrom(const CodeDesc &desc)
Definition: objects.cc:10327
static int SizeFor(int length)
Definition: objects.h:3152
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
Definition: heap.cc:6887
void set_start_position_and_type(int value)
void set_resource(const Resource *buffer)
Definition: objects-inl.h:3291
static void Iterate(Isolate *isolate, ObjectVisitor *visitor)
Definition: serialize.cc:1350
static PropertyCell * cast(Object *obj)
void Iterate(ObjectVisitor *v)
Definition: v8threads.cc:370
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind)
Definition: heap.cc:3791
void Iterate(ObjectVisitor *v)
static const int kSize
Definition: objects.h:8710
bool HasBeenSetUp()
Definition: heap.cc:260
byte * relocation_start()
Definition: objects-inl.h:5877
#define STATIC_ASCII_VECTOR(x)
Definition: utils.h:570
LargeObjectSpace * lo_space()
Definition: heap.h:646
bool RootCanBeTreatedAsConstant(RootListIndex root_index)
Definition: heap.cc:3438
Assembler * origin
Definition: v8globals.h:242
const Address kFromSpaceZapValue
Definition: v8globals.h:85
bool ToSpaceContains(Address address)
Definition: spaces.h:2567
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:878
Callback GetVisitorById(StaticVisitorBase::VisitorId id)
MUST_USE_RESULT MaybeObject * AllocateExternalStringFromTwoByte(const ExternalTwoByteString::Resource *resource)
Definition: heap.cc:3934
MUST_USE_RESULT MaybeObject * AllocatePartialMap(InstanceType instance_type, int instance_size)
Definition: heap.cc:2619
static Object * Lookup(Heap *heap, String *key_string, Object *key_pattern, ResultsCacheType type)
Definition: heap.cc:3444
DescriptorLookupCache * descriptor_lookup_cache()
Definition: isolate.h:896
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1352
static JSMessageObject * cast(Object *obj)
Definition: objects-inl.h:5791
static const int kAbortIncrementalMarkingMask
Definition: heap.h:1260
static const int kNonWeakFieldsEndOffset
Definition: objects.h:7525
Vector< const char > CStrVector(const char *data)
Definition: utils.h:574
bool CollectGarbage(AllocationSpace space, const char *gc_reason=NULL, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition: heap-inl.h:554
void IterateNewSpaceRoots(ObjectVisitor *visitor)
void FreeQueuedChunks()
Definition: heap.cc:7742
CellSpace * cell_space()
Definition: heap.h:642
static Local< Context > ToLocal(v8::internal::Handle< v8::internal::Context > obj)
static int OffsetOfElementAt(int index)
Definition: objects.h:3070
static const int kNextCodeLinkOffset
Definition: objects.h:5588
intptr_t CommittedMemory()
Definition: heap.cc:202
Object * GetNumberStringCache(Object *number)
Definition: heap.cc:3599
intptr_t SizeOfObjects()
Definition: heap.cc:473
static int SizeFor(int length)
Definition: objects.h:3067
#define T(name, string, precedence)
Definition: token.cc:48
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:601
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor *v)
static uchar ValueOf(const byte *str, unsigned length, unsigned *cursor)
Definition: unicode-inl.h:152
Context * context()
Definition: isolate.h:557
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
friend class GCCallbacksScope
Definition: heap.h:2550
static SeqTwoByteString * cast(Object *obj)
static const int kSize
Definition: objects.h:9499
static JSFunctionResultCache * cast(Object *obj)
void Iterate(ObjectVisitor *v)
intptr_t get_max_alive_after_gc()
Definition: heap.h:1746
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1708
MUST_USE_RESULT MaybeObject * AllocateJSProxy(Object *handler, Object *prototype)
Definition: heap.cc:4666
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1896
void ClearNormalizedMapCaches()
Definition: heap.cc:1018
Definition: v8.h:2107
static const int kHeaderSize
Definition: objects.h:3016
static void VisitPointer(Heap *heap, Object **p)
Definition: heap.cc:2098
MUST_USE_RESULT MaybeObject * NumberFromDouble(double value, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3819
Vector< const uint8_t > OneByteVector(const char *data, int length)
Definition: utils.h:578
bool SlowContains(Address addr)
Definition: spaces.h:2875
static const int kLength
Definition: heap.h:2757
static void VisitLiveObject(Heap *, Code *, WeakObjectRetainer *, bool)
Definition: heap.cc:1827
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Definition: heap.cc:4119
void Update(Map *map, Name *name, int field_offset)
Definition: heap.cc:7626
intptr_t * old_data_space_capacity
Definition: heap.h:2567
static int SizeFor(int length)
Definition: objects.h:4591
HeapProfiler * heap_profiler() const
Definition: isolate.h:985
intptr_t Available()
Definition: heap.cc:247
Space * owner() const
Definition: spaces.h:332
MUST_USE_RESULT MaybeObject * AllocateArgumentsObject(Object *callee, int length)
Definition: heap.cc:4401
void set_instance_type(InstanceType value)
Definition: objects-inl.h:4017
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V)
Definition: objects.h:863
bool PostGarbageCollectionProcessing(GarbageCollector collector, GCTracer *tracer)
static HeapNumber * cast(Object *obj)
void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback)
Definition: heap.cc:6844
static void WriteToFlat(String *source, sinkchar *sink, int from, int to)
Definition: objects.cc:8635
static MUST_USE_RESULT MaybeObject * Allocate(Heap *heap, int at_least_space_for, MinimumCapacity capacity_option=USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure=NOT_TENURED)
intptr_t Capacity()
Definition: spaces.h:2455
void set_value(double value)
Definition: objects-inl.h:1406
MUST_USE_RESULT MaybeObject * CopyFixedArray(FixedArray *src)
Definition: heap-inl.h:202
virtual size_t length() const =0
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6266
static const int kLengthOffset
Definition: objects.h:3015
static double nan_value()
static const int kSize
Definition: objects.h:1979
MUST_USE_RESULT MaybeObject * ReinitializeJSReceiver(JSReceiver *object, InstanceType type, int size)
Definition: heap.cc:4797
MUST_USE_RESULT MaybeObject * AllocateAccessorPair()
Definition: heap.cc:2693
void set_raw_kind_specific_flags1(int value)
Definition: objects-inl.h:4332
MUST_USE_RESULT MaybeObject * AllocateConstantPoolArray(int number_of_int64_entries, int number_of_code_ptr_entries, int number_of_heap_ptr_entries, int number_of_int32_entries)
Definition: heap.cc:5396
MUST_USE_RESULT MaybeObject * AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field)
MUST_USE_RESULT MaybeObject * AllocateCatchContext(JSFunction *function, Context *previous, String *name, Object *thrown_object)
Definition: heap.cc:5572
const uint32_t kFreeListZapValue
Definition: v8globals.h:88
#define STRUCT_LIST(V)
Definition: objects.h:590
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
static const int kArgumentsLengthIndex
Definition: heap.h:1104
void set_bit_field3(uint32_t bits)
Definition: objects-inl.h:4754
#define CODE_KIND_LIST(V)
Definition: objects.h:5204
#define OBJECT_POINTER_ALIGN(value)
Definition: v8globals.h:386
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1372
const intptr_t kObjectAlignment
Definition: v8globals.h:44
CpuProfiler * cpu_profiler() const
Definition: isolate.h:984
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2099
void SetInternalField(int index, Object *value)
Definition: objects-inl.h:1933
void RecordStats(HeapStats *stats)
MUST_USE_RESULT MaybeObject * AllocateScopeInfo(int length)
Definition: heap.cc:5628
Object * native_contexts_list()
Definition: heap.h:1354
double get_min_in_mutator()
Definition: heap.h:1749
MUST_USE_RESULT MaybeObject * NumberFromUint32(uint32_t value, PretenureFlag pretenure=NOT_TENURED)
Definition: heap-inl.h:280
IncrementalMarking * incremental_marking()
Definition: heap.h:1781
double get_max_gc_pause()
Definition: heap.h:1743
bool Contains(Address addr)
Definition: heap.cc:5929
MUST_USE_RESULT MaybeObject * Put(Object *key, Object *value)
Definition: objects.cc:15831
void EnableInlineAllocation()
Definition: heap.cc:6529
size_t CommittedPhysicalMemory()
Definition: heap.cc:216
Map * get_initial_js_array_map(ElementsKind kind)
Definition: isolate.cc:2235
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)
uint16_t uc16
Definition: globals.h:309
MUST_USE_RESULT MaybeObject * AllocateUninitializedFixedArray(int length)
Definition: heap.cc:5308
void set_extension(Object *object)
Definition: contexts.h:379
MUST_USE_RESULT MaybeObject * AllocateAllocationSite()
Definition: heap.cc:3031
void MoveElements(FixedArray *array, int dst_index, int src_index, int len)
Definition: heap.cc:883
static const int kStartMarker
Definition: heap.h:2558
void set_bit_field(byte value)
Definition: objects-inl.h:4037
static int SizeFor(int length)
Definition: objects.h:9118
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
void Iterate(v8::internal::ObjectVisitor *v)
Definition: api.cc:7553
NewSpacePage * next_page() const
Definition: spaces.h:2053
#define TYPED_ARRAYS(V)
Definition: objects.h:4663
void MemsetPointer(T **dest, U *value, int counter)
Definition: v8utils.h:198
void set_owner(Space *space)
Definition: spaces.h:342
LoggingAndProfiling
Definition: heap.cc:2176
static int SizeFor(int number_of_int64_entries, int number_of_code_ptr_entries, int number_of_heap_ptr_entries, int number_of_int32_entries)
Definition: objects.h:3232
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:7790
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:925
static const int kNotFound
Definition: heap.h:2762
static const int kRegExpResultsCacheSize
Definition: heap.h:3028
void PrintPID(const char *format,...)
Definition: v8utils.cc:56
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void IterateAllRoots(ObjectVisitor *visitor)
static const byte kNull
Definition: objects.h:9505
static const int kBodyOffset
Definition: spaces.h:585
#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)
MUST_USE_RESULT MaybeObject * LookupSingleCharacterStringFromCode(uint16_t code)
Definition: heap.cc:3962
InstanceType instance_type()
Definition: objects-inl.h:4012
static void CopyBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:462
#define CONSTANT_STRING_ELEMENT(name, contents)
static bool ShouldZapGarbage()
Definition: heap.h:1486
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
static const int kSize
Definition: objects.h:9599
static const uchar kBadChar
Definition: unicode.h:162
void USE(T)
Definition: globals.h:341
void set_size(size_t size)
Definition: spaces.h:597
MUST_USE_RESULT MaybeObject * AllocateFixedDoubleArrayWithHoles(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5355
Counters * counters()
Definition: isolate.h:859
ScavengeVisitor(Heap *heap)
Definition: heap.cc:1311
MUST_USE_RESULT MaybeObject * AllocateFixedArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5297
static const int kArgumentsCalleeIndex
Definition: heap.h:1106
const int kSmiTag
Definition: v8.h:5478
MUST_USE_RESULT MaybeObject * AllocateHashTable(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5457
PerThreadAssertScopeDebugOnly< HEAP_ALLOCATION_ASSERT, false > DisallowHeapAllocation
Definition: assert-scope.h:214
#define ALLOCATE_MAP(instance_type, size, field_name)
IN DWORD64 OUT PDWORD64 OUT PIMAGEHLP_SYMBOL64 Symbol
static FixedArray * cast(Object *obj)
static const int kWeakNextOffset
Definition: objects.h:9918
MUST_USE_RESULT MaybeObject * InternalizeUtf8String(const char *str)
Definition: heap.h:1222
static const int kHeaderSize
Definition: objects.h:2757
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
void DisableInlineAllocation()
Definition: heap.cc:6538
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1371
OptimizingCompilerThread * optimizing_compiler_thread()
Definition: isolate.h:1076
ElementsKind elements_kind()
Definition: objects.h:5945
MapSpace * map_space()
Definition: heap.h:641
void set_previous(Context *context)
Definition: contexts.h:375
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:6502
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor *v)
intptr_t * old_pointer_space_capacity
Definition: heap.h:2565
bool OldGenerationAllocationLimitReached()
Definition: heap-inl.h:351
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
Logger * logger()
Definition: isolate.h:868
StaticResource< Utf8Decoder > * utf8_decoder()
Definition: scanner.h:137
int OffsetOfElementAt(int index)
Definition: objects.h:3244
static const uint32_t kHashBitMask
Definition: objects.h:8646
void set_instance_size(int value)
Definition: objects-inl.h:3990
Object * get(int index)
Definition: objects-inl.h:2127
static VisitorId GetVisitorId(int instance_type, int instance_size)
void ClearJSFunctionResultCaches()
Definition: heap.cc:995
GCCallbackFlags
Definition: v8.h:4073
void RecordStats(HeapStats *stats, bool take_snapshot=false)
Definition: heap.cc:6463
void set_formal_parameter_count(int value)
static const int kWeakNextOffset
Definition: objects.h:8418
static const int kMaxLength
Definition: objects.h:3174
HeapObject * obj
static WeakHashTable * cast(Object *obj)
Definition: objects.h:4282
void set_bit_field2(byte value)
Definition: objects-inl.h:4047
void CopyFrom(VisitorDispatchTable *other)
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:4005
void set_marked_for_deoptimization(bool flag)
Definition: objects-inl.h:4560
static const int kHashShift
Definition: objects.h:8642
static int GetLastError()
Object * array_buffers_list()
Definition: heap.h:1359
MUST_USE_RESULT MaybeObject * AllocateSharedFunctionInfo(Object *name)
Definition: heap.cc:3849
bool AdvanceSweepers(int step_size)
Definition: heap.cc:6513
static JSArrayBuffer * cast(Object *obj)
static NormalizedMapCache * cast(Object *obj)
static const int kMaxLength
Definition: objects.h:8922
MUST_USE_RESULT MaybeObject * Allocate(Map *map, AllocationSpace space, AllocationSite *allocation_site=NULL)
Definition: heap.cc:4345
intptr_t * map_space_capacity
Definition: heap.h:2571
static int SizeFor(int body_size)
Definition: objects.h:5492
static intptr_t MaxVirtualMemory()
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name)
static const intptr_t kAllocatedThreshold
static const int kCapacityMask
Definition: heap.h:2758
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)
static const byte kFalse
Definition: objects.h:9501
static void ScavengeObject(HeapObject **p, HeapObject *object)
Definition: heap-inl.h:527
Definition: objects.h:8475
void remove(HeapObject **target, int *size)
Definition: heap.h:460
static void SetWeakNext(JSFunction *function, Object *next)
Definition: heap.cc:1792
void set_visitor_id(int visitor_id)
Definition: objects-inl.h:3917
bool IsSweepingComplete()
Definition: heap.h:1785
void set_length(int value)
MUST_USE_RESULT MaybeObject * CopyConstantPoolArrayWithMap(ConstantPoolArray *src, Map *map)
Definition: heap.cc:5241
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1220
void IterateBuiltins(ObjectVisitor *v)
Definition: builtins.cc:1682
void CopyChars(sinkchar *dest, const sourcechar *src, int chars)
Definition: v8utils.h:279
static VisitorDispatchTable< ScavengingCallback > * GetTable()
Definition: heap.cc:2264
static void DeoptimizeMarkedCode(Isolate *isolate)
Definition: deoptimizer.cc:467
bool CanMoveObjectStart(HeapObject *object)
Definition: heap.cc:4019
static int SizeFor(int length)
Definition: objects.h:9078
static const int kNoScriptId
Definition: v8.h:1031
T Min(T a, T b)
Definition: utils.h:234
intptr_t * memory_allocator_capacity
Definition: heap.h:2581
static const int kEmptyHashField
Definition: objects.h:8678
bool EnsureSweeperProgress(intptr_t size_in_bytes)
Definition: spaces.cc:2632
MUST_USE_RESULT MaybeObject * AllocateJSArrayStorage(JSArray *array, int length, int capacity, ArrayStorageAllocationMode mode=DONT_INITIALIZE_ARRAY_ELEMENTS)
Definition: heap.cc:4610
static ConsString * cast(Object *obj)
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2825
Definition: code-stubs.h:1528
double marking_time() const
Definition: heap.h:1756
static const int kSloppyArgumentsObjectSize
Definition: heap.h:1098
static FixedArrayBase * cast(Object *object)
Definition: objects-inl.h:2121
void set_array_buffers_list(Object *object)
Definition: heap.h:1356
void set_flags(Flags flags)
Definition: objects-inl.h:4297
static const int kInitialMaxFastElementArray
Definition: objects.h:2744
static bool CanTrack(InstanceType type)
Definition: objects-inl.h:1500
intptr_t Capacity()
Definition: heap.cc:189
MUST_USE_RESULT MaybeObject * AllocateStruct(InstanceType type)
Definition: heap.cc:5651
void EnterDirectlyIntoStoreBuffer(Address addr)
void(* GCPrologueCallback)(Isolate *isolate, GCType type, GCCallbackFlags flags)
Definition: v8.h:4345
intptr_t * old_data_space_size
Definition: heap.h:2566
ParallelSweepingState parallel_sweeping()
Definition: spaces.h:485
static void VisitLiveObject(Heap *heap, JSArrayBuffer *array_buffer, WeakObjectRetainer *retainer, bool record_slots)
Definition: heap.cc:1954
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2935
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
MUST_USE_RESULT MaybeObject * Initialize(Heap *heap, const char *to_string, Object *to_number, byte kind)
Definition: objects.cc:9886
GCTracer * tracer()
Definition: heap.h:1724
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
NewSpace * new_space()
Definition: heap.h:637
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)
SpaceIterator(Heap *heap)
Definition: heap.cc:6972
MUST_USE_RESULT MaybeObject * AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind=TERMINAL_FAST_ELEMENTS_KIND)
Definition: heap.cc:2643
#define ARRAY_SIZE(a)
Definition: globals.h:333
void UpdateMaximumCommitted()
Definition: heap.cc:237
const intptr_t kDoubleAlignment
Definition: v8globals.h:52
intptr_t MaxExecutableSize()
Definition: heap.h:600
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const byte kTrue
Definition: objects.h:9502
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes, AllocationSpace space, AllocationSpace retry_space)
Definition: heap-inl.h:217
static const int kSize
Definition: objects.h:10010
Object * allocation_sites_list()
Definition: heap.h:1364
void set_hash_field(uint32_t value)
Definition: objects-inl.h:2946
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:324
void PrintShortHeapStatistics()
Definition: heap.cc:354
static JSObject * cast(Object *obj)
void ClearInlineCaches()
Definition: objects.cc:10563
static const int kStrictArgumentsObjectSize
Definition: heap.h:1101
void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, GCType gc_type_filter, bool pass_isolate=true)
Definition: heap.cc:6812
static const int kHashMask
Definition: heap.h:2760
int signbit(double x)
static AllocationSpace TargetSpaceId(InstanceType type)
Definition: heap-inl.h:393
uint32_t RoundUpToPowerOf2(uint32_t x)
Definition: utils.h:191
OldSpace * old_data_space()
Definition: heap.h:639
MUST_USE_RESULT MaybeObject * AllocateRawTwoByteString(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:5111
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2242
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1769
void set_raw_kind_specific_flags2(int value)
Definition: objects-inl.h:4337
MUST_USE_RESULT MaybeObject * AllocatePrivateSymbol()
Definition: heap.cc:5499
MUST_USE_RESULT MaybeObject * AllocateFunction(Map *function_map, SharedFunctionInfo *shared, Object *prototype, PretenureFlag pretenure=TENURED)
Definition: heap.cc:4386
int FastD2I(double x)
Definition: conversions.h:74
void UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1671
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)
bool InternalizeStringIfExists(String *str, String **result)
Definition: heap.cc:6025
static const int kAlignedSize
Definition: objects.h:4705
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2610
bool IsFastDoubleElementsKind(ElementsKind kind)
AllocationSpace identity()
Definition: spaces.h:906
static void FreeArrayBuffer(Isolate *isolate, JSArrayBuffer *phantom_array_buffer)
Definition: runtime.cc:748
void set_unused_property_fields(int value)
Definition: objects-inl.h:4027
void Free()
Definition: list.h:64
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags)
Definition: heap.cc:1221
static const int kIsExtensible
Definition: objects.h:6477
MUST_USE_RESULT MaybeObject * AllocateStringFromTwoByte(Vector< const uc16 > str, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4939
static const int kEntriesPerBucket
Definition: heap.h:2761
static const int kPointerFieldsBeginOffset
Definition: objects.h:6444
MUST_USE_RESULT MaybeObject * CopyAndTenureFixedCOWArray(FixedArray *src)
Definition: heap.cc:5166
DependentCode * LookupWeakObjectToCodeDependency(Object *obj)
Definition: heap.cc:6873
void InitializeBody(Map *map, Object *pre_allocated_value, Object *filler_value)
Definition: objects-inl.h:2014
MUST_USE_RESULT MaybeObject * AllocateAliasedArgumentsEntry(int slot)
Definition: heap.cc:2716
intptr_t * property_cell_space_size
Definition: heap.h:2586
MemoryChunk * next_chunk() const
Definition: spaces.h:316
const int MB
Definition: globals.h:246
static JSFunction * cast(Object *obj)