v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
serialize.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "deoptimizer.h"
34 #include "execution.h"
35 #include "global-handles.h"
36 #include "ic-inl.h"
37 #include "natives.h"
38 #include "platform.h"
39 #include "runtime.h"
40 #include "serialize.h"
41 #include "snapshot.h"
42 #include "stub-cache.h"
43 #include "v8threads.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 // -----------------------------------------------------------------------------
50 // Coding of external references.
51 
52 // The encoding of an external reference. The type is in the high word.
53 // The id is in the low word.
54 static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
55  return static_cast<uint32_t>(type) << 16 | id;
56 }
57 
58 
59 static int* GetInternalPointer(StatsCounter* counter) {
60  // All counters refer to dummy_counter, if deserializing happens without
61  // setting up counters.
62  static int dummy_counter = 0;
63  return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
64 }
65 
66 
68  ExternalReferenceTable* external_reference_table =
69  isolate->external_reference_table();
70  if (external_reference_table == NULL) {
71  external_reference_table = new ExternalReferenceTable(isolate);
72  isolate->set_external_reference_table(external_reference_table);
73  }
74  return external_reference_table;
75 }
76 
77 
78 void ExternalReferenceTable::AddFromId(TypeCode type,
79  uint16_t id,
80  const char* name,
81  Isolate* isolate) {
83  switch (type) {
84  case C_BUILTIN: {
85  ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
86  address = ref.address();
87  break;
88  }
89  case BUILTIN: {
90  ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
91  address = ref.address();
92  break;
93  }
94  case RUNTIME_FUNCTION: {
95  ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
96  address = ref.address();
97  break;
98  }
99  case IC_UTILITY: {
100  ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
101  isolate);
102  address = ref.address();
103  break;
104  }
105  default:
106  UNREACHABLE();
107  return;
108  }
109  Add(address, type, id, name);
110 }
111 
112 
113 void ExternalReferenceTable::Add(Address address,
114  TypeCode type,
115  uint16_t id,
116  const char* name) {
117  ASSERT_NE(NULL, address);
118  ExternalReferenceEntry entry;
119  entry.address = address;
120  entry.code = EncodeExternal(type, id);
121  entry.name = name;
122  ASSERT_NE(0, entry.code);
123  refs_.Add(entry);
124  if (id > max_id_[type]) max_id_[type] = id;
125 }
126 
127 
128 void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
129  for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
130  max_id_[type_code] = 0;
131  }
132 
133  // The following populates all of the different type of external references
134  // into the ExternalReferenceTable.
135  //
136  // NOTE: This function was originally 100k of code. It has since been
137  // rewritten to be mostly table driven, as the callback macro style tends to
138  // very easily cause code bloat. Please be careful in the future when adding
139  // new references.
140 
141  struct RefTableEntry {
142  TypeCode type;
143  uint16_t id;
144  const char* name;
145  };
146 
147  static const RefTableEntry ref_table[] = {
148  // Builtins
149 #define DEF_ENTRY_C(name, ignored) \
150  { C_BUILTIN, \
151  Builtins::c_##name, \
152  "Builtins::" #name },
153 
155 #undef DEF_ENTRY_C
156 
157 #define DEF_ENTRY_C(name, ignored) \
158  { BUILTIN, \
159  Builtins::k##name, \
160  "Builtins::" #name },
161 #define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
162 
166 #undef DEF_ENTRY_C
167 #undef DEF_ENTRY_A
168 
169  // Runtime functions
170 #define RUNTIME_ENTRY(name, nargs, ressize) \
171  { RUNTIME_FUNCTION, \
172  Runtime::k##name, \
173  "Runtime::" #name },
174 
176 #undef RUNTIME_ENTRY
177 
178 #define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \
179  { RUNTIME_FUNCTION, \
180  Runtime::kHidden##name, \
181  "Runtime::Hidden" #name },
182 
184 #undef RUNTIME_HIDDEN_ENTRY
185 
186 #define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
187  { RUNTIME_FUNCTION, \
188  Runtime::kInlineOptimized##name, \
189  "Runtime::" #name },
190 
192 #undef INLINE_OPTIMIZED_ENTRY
193 
194  // IC utilities
195 #define IC_ENTRY(name) \
196  { IC_UTILITY, \
197  IC::k##name, \
198  "IC::" #name },
199 
201 #undef IC_ENTRY
202  }; // end of ref_table[].
203 
204  for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
205  AddFromId(ref_table[i].type,
206  ref_table[i].id,
207  ref_table[i].name,
208  isolate);
209  }
210 
211 #ifdef ENABLE_DEBUGGER_SUPPORT
212  // Debug addresses
213  Add(Debug_Address(Debug::k_after_break_target_address).address(isolate),
215  Debug::k_after_break_target_address << kDebugIdShift,
216  "Debug::after_break_target_address()");
217  Add(Debug_Address(Debug::k_debug_break_slot_address).address(isolate),
219  Debug::k_debug_break_slot_address << kDebugIdShift,
220  "Debug::debug_break_slot_address()");
221  Add(Debug_Address(Debug::k_debug_break_return_address).address(isolate),
223  Debug::k_debug_break_return_address << kDebugIdShift,
224  "Debug::debug_break_return_address()");
225  Add(Debug_Address(Debug::k_restarter_frame_function_pointer).address(isolate),
227  Debug::k_restarter_frame_function_pointer << kDebugIdShift,
228  "Debug::restarter_frame_function_pointer_address()");
229 #endif
230 
231  // Stat counters
232  struct StatsRefTableEntry {
233  StatsCounter* (Counters::*counter)();
234  uint16_t id;
235  const char* name;
236  };
237 
238  const StatsRefTableEntry stats_ref_table[] = {
239 #define COUNTER_ENTRY(name, caption) \
240  { &Counters::name, \
241  Counters::k_##name, \
242  "Counters::" #name },
243 
246 #undef COUNTER_ENTRY
247  }; // end of stats_ref_table[].
248 
249  Counters* counters = isolate->counters();
250  for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
251  Add(reinterpret_cast<Address>(GetInternalPointer(
252  (counters->*(stats_ref_table[i].counter))())),
254  stats_ref_table[i].id,
255  stats_ref_table[i].name);
256  }
257 
258  // Top addresses
259 
260  const char* AddressNames[] = {
261 #define BUILD_NAME_LITERAL(CamelName, hacker_name) \
262  "Isolate::" #hacker_name "_address",
264  NULL
265 #undef BUILD_NAME_LITERAL
266  };
267 
268  for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
269  Add(isolate->get_address_from_id((Isolate::AddressId)i),
270  TOP_ADDRESS, i, AddressNames[i]);
271  }
272 
273  // Accessors
274 #define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
275  Add((Address)&Accessors::name, \
276  ACCESSOR, \
277  Accessors::k##name, \
278  "Accessors::" #name);
279 
281 #undef ACCESSOR_DESCRIPTOR_DECLARATION
282 
283  StubCache* stub_cache = isolate->stub_cache();
284 
285  // Stub cache tables
286  Add(stub_cache->key_reference(StubCache::kPrimary).address(),
288  1,
289  "StubCache::primary_->key");
290  Add(stub_cache->value_reference(StubCache::kPrimary).address(),
292  2,
293  "StubCache::primary_->value");
294  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
296  3,
297  "StubCache::primary_->map");
298  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
300  4,
301  "StubCache::secondary_->key");
302  Add(stub_cache->value_reference(StubCache::kSecondary).address(),
304  5,
305  "StubCache::secondary_->value");
306  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
308  6,
309  "StubCache::secondary_->map");
310 
311  // Runtime entries
312  Add(ExternalReference::perform_gc_function(isolate).address(),
314  1,
315  "Runtime::PerformGC");
316  // Runtime entries
317  Add(ExternalReference::out_of_memory_function(isolate).address(),
319  2,
320  "Runtime::OutOfMemory");
321  Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
323  4,
324  "HandleScope::DeleteExtensions");
325  Add(ExternalReference::
326  incremental_marking_record_write_function(isolate).address(),
328  5,
329  "IncrementalMarking::RecordWrite");
330  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
332  6,
333  "StoreBuffer::StoreBufferOverflow");
334 
335  // Miscellaneous
336  Add(ExternalReference::roots_array_start(isolate).address(),
337  UNCLASSIFIED,
338  3,
339  "Heap::roots_array_start()");
340  Add(ExternalReference::address_of_stack_limit(isolate).address(),
341  UNCLASSIFIED,
342  4,
343  "StackGuard::address_of_jslimit()");
344  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
345  UNCLASSIFIED,
346  5,
347  "StackGuard::address_of_real_jslimit()");
348 #ifndef V8_INTERPRETED_REGEXP
349  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
350  UNCLASSIFIED,
351  6,
352  "RegExpStack::limit_address()");
353  Add(ExternalReference::address_of_regexp_stack_memory_address(
354  isolate).address(),
355  UNCLASSIFIED,
356  7,
357  "RegExpStack::memory_address()");
358  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
359  UNCLASSIFIED,
360  8,
361  "RegExpStack::memory_size()");
362  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
363  UNCLASSIFIED,
364  9,
365  "OffsetsVector::static_offsets_vector");
366 #endif // V8_INTERPRETED_REGEXP
367  Add(ExternalReference::new_space_start(isolate).address(),
368  UNCLASSIFIED,
369  10,
370  "Heap::NewSpaceStart()");
371  Add(ExternalReference::new_space_mask(isolate).address(),
372  UNCLASSIFIED,
373  11,
374  "Heap::NewSpaceMask()");
375  Add(ExternalReference::heap_always_allocate_scope_depth(isolate).address(),
376  UNCLASSIFIED,
377  12,
378  "Heap::always_allocate_scope_depth()");
379  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
380  UNCLASSIFIED,
381  14,
382  "Heap::NewSpaceAllocationLimitAddress()");
383  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
384  UNCLASSIFIED,
385  15,
386  "Heap::NewSpaceAllocationTopAddress()");
387 #ifdef ENABLE_DEBUGGER_SUPPORT
388  Add(ExternalReference::debug_break(isolate).address(),
389  UNCLASSIFIED,
390  16,
391  "Debug::Break()");
392  Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
393  UNCLASSIFIED,
394  17,
395  "Debug::step_in_fp_addr()");
396 #endif
397  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
398  UNCLASSIFIED,
399  22,
400  "mod_two_doubles");
401 #ifndef V8_INTERPRETED_REGEXP
402  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
403  UNCLASSIFIED,
404  24,
405  "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
406  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
407  UNCLASSIFIED,
408  25,
409  "RegExpMacroAssembler*::CheckStackGuardState()");
410  Add(ExternalReference::re_grow_stack(isolate).address(),
411  UNCLASSIFIED,
412  26,
413  "NativeRegExpMacroAssembler::GrowStack()");
414  Add(ExternalReference::re_word_character_map().address(),
415  UNCLASSIFIED,
416  27,
417  "NativeRegExpMacroAssembler::word_character_map");
418 #endif // V8_INTERPRETED_REGEXP
419  // Keyed lookup cache.
420  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
421  UNCLASSIFIED,
422  28,
423  "KeyedLookupCache::keys()");
424  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
425  UNCLASSIFIED,
426  29,
427  "KeyedLookupCache::field_offsets()");
428  Add(ExternalReference::handle_scope_next_address(isolate).address(),
429  UNCLASSIFIED,
430  31,
431  "HandleScope::next");
432  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
433  UNCLASSIFIED,
434  32,
435  "HandleScope::limit");
436  Add(ExternalReference::handle_scope_level_address(isolate).address(),
437  UNCLASSIFIED,
438  33,
439  "HandleScope::level");
440  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
441  UNCLASSIFIED,
442  34,
443  "Deoptimizer::New()");
444  Add(ExternalReference::compute_output_frames_function(isolate).address(),
445  UNCLASSIFIED,
446  35,
447  "Deoptimizer::ComputeOutputFrames()");
448  Add(ExternalReference::address_of_min_int().address(),
449  UNCLASSIFIED,
450  36,
451  "LDoubleConstant::min_int");
452  Add(ExternalReference::address_of_one_half().address(),
453  UNCLASSIFIED,
454  37,
455  "LDoubleConstant::one_half");
456  Add(ExternalReference::isolate_address(isolate).address(),
457  UNCLASSIFIED,
458  38,
459  "isolate");
460  Add(ExternalReference::address_of_minus_zero().address(),
461  UNCLASSIFIED,
462  39,
463  "LDoubleConstant::minus_zero");
464  Add(ExternalReference::address_of_negative_infinity().address(),
465  UNCLASSIFIED,
466  40,
467  "LDoubleConstant::negative_infinity");
468  Add(ExternalReference::power_double_double_function(isolate).address(),
469  UNCLASSIFIED,
470  41,
471  "power_double_double_function");
472  Add(ExternalReference::power_double_int_function(isolate).address(),
473  UNCLASSIFIED,
474  42,
475  "power_double_int_function");
476  Add(ExternalReference::store_buffer_top(isolate).address(),
477  UNCLASSIFIED,
478  43,
479  "store_buffer_top");
480  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
481  UNCLASSIFIED,
482  44,
483  "canonical_nan");
484  Add(ExternalReference::address_of_the_hole_nan().address(),
485  UNCLASSIFIED,
486  45,
487  "the_hole_nan");
488  Add(ExternalReference::get_date_field_function(isolate).address(),
489  UNCLASSIFIED,
490  46,
491  "JSDate::GetField");
492  Add(ExternalReference::date_cache_stamp(isolate).address(),
493  UNCLASSIFIED,
494  47,
495  "date_cache_stamp");
496  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
497  UNCLASSIFIED,
498  48,
499  "address_of_pending_message_obj");
500  Add(ExternalReference::address_of_has_pending_message(isolate).address(),
501  UNCLASSIFIED,
502  49,
503  "address_of_has_pending_message");
504  Add(ExternalReference::address_of_pending_message_script(isolate).address(),
505  UNCLASSIFIED,
506  50,
507  "pending_message_script");
508  Add(ExternalReference::get_make_code_young_function(isolate).address(),
509  UNCLASSIFIED,
510  51,
511  "Code::MakeCodeYoung");
512  Add(ExternalReference::cpu_features().address(),
513  UNCLASSIFIED,
514  52,
515  "cpu_features");
516  Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(),
517  UNCLASSIFIED,
518  53,
519  "Runtime::AllocateInNewSpace");
520  Add(ExternalReference(
521  Runtime::kHiddenAllocateInTargetSpace, isolate).address(),
522  UNCLASSIFIED,
523  54,
524  "Runtime::AllocateInTargetSpace");
525  Add(ExternalReference::old_pointer_space_allocation_top_address(
526  isolate).address(),
527  UNCLASSIFIED,
528  55,
529  "Heap::OldPointerSpaceAllocationTopAddress");
530  Add(ExternalReference::old_pointer_space_allocation_limit_address(
531  isolate).address(),
532  UNCLASSIFIED,
533  56,
534  "Heap::OldPointerSpaceAllocationLimitAddress");
535  Add(ExternalReference::old_data_space_allocation_top_address(
536  isolate).address(),
537  UNCLASSIFIED,
538  57,
539  "Heap::OldDataSpaceAllocationTopAddress");
540  Add(ExternalReference::old_data_space_allocation_limit_address(
541  isolate).address(),
542  UNCLASSIFIED,
543  58,
544  "Heap::OldDataSpaceAllocationLimitAddress");
545  Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
546  address(),
547  UNCLASSIFIED,
548  59,
549  "Heap::NewSpaceAllocationLimitAddress");
550  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
551  UNCLASSIFIED,
552  60,
553  "Heap::allocation_sites_list_address()");
554  Add(ExternalReference::address_of_uint32_bias().address(),
555  UNCLASSIFIED,
556  61,
557  "uint32_bias");
558  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
559  UNCLASSIFIED,
560  62,
561  "Code::MarkCodeAsExecuted");
562 
563  // Add a small set of deopt entry addresses to encoder without generating the
564  // deopt table code, which isn't possible at deserialization time.
565  HandleScope scope(isolate);
566  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
568  isolate,
569  entry,
572  Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
573  }
574 }
575 
576 
578  : encodings_(Match),
579  isolate_(isolate) {
580  ExternalReferenceTable* external_references =
582  for (int i = 0; i < external_references->size(); ++i) {
583  Put(external_references->address(i), i);
584  }
585 }
586 
587 
589  int index = IndexOf(key);
590  ASSERT(key == NULL || index >= 0);
591  return index >=0 ?
592  ExternalReferenceTable::instance(isolate_)->code(index) : 0;
593 }
594 
595 
597  int index = IndexOf(key);
598  return index >= 0 ?
599  ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
600 }
601 
602 
603 int ExternalReferenceEncoder::IndexOf(Address key) const {
604  if (key == NULL) return -1;
605  HashMap::Entry* entry =
606  const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
607  return entry == NULL
608  ? -1
609  : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
610 }
611 
612 
613 void ExternalReferenceEncoder::Put(Address key, int index) {
614  HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
615  entry->value = reinterpret_cast<void*>(index);
616 }
617 
618 
620  : encodings_(NewArray<Address*>(kTypeCodeCount)),
621  isolate_(isolate) {
622  ExternalReferenceTable* external_references =
624  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
625  int max = external_references->max_id(type) + 1;
626  encodings_[type] = NewArray<Address>(max + 1);
627  }
628  for (int i = 0; i < external_references->size(); ++i) {
629  Put(external_references->code(i), external_references->address(i));
630  }
631 }
632 
633 
635  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
636  DeleteArray(encodings_[type]);
637  }
638  DeleteArray(encodings_);
639 }
640 
641 
644 
645 
647  public:
648  explicit CodeAddressMap(Isolate* isolate)
649  : isolate_(isolate) {
650  isolate->logger()->addCodeEventListener(this);
651  }
652 
653  virtual ~CodeAddressMap() {
654  isolate_->logger()->removeCodeEventListener(this);
655  }
656 
657  virtual void CodeMoveEvent(Address from, Address to) {
658  address_to_name_map_.Move(from, to);
659  }
660 
661  virtual void CodeDeleteEvent(Address from) {
662  address_to_name_map_.Remove(from);
663  }
664 
665  const char* Lookup(Address address) {
666  return address_to_name_map_.Lookup(address);
667  }
668 
669  private:
670  class NameMap {
671  public:
672  NameMap() : impl_(&PointerEquals) {}
673 
674  ~NameMap() {
675  for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
676  DeleteArray(static_cast<const char*>(p->value));
677  }
678  }
679 
680  void Insert(Address code_address, const char* name, int name_size) {
681  HashMap::Entry* entry = FindOrCreateEntry(code_address);
682  if (entry->value == NULL) {
683  entry->value = CopyName(name, name_size);
684  }
685  }
686 
687  const char* Lookup(Address code_address) {
688  HashMap::Entry* entry = FindEntry(code_address);
689  return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
690  }
691 
692  void Remove(Address code_address) {
693  HashMap::Entry* entry = FindEntry(code_address);
694  if (entry != NULL) {
695  DeleteArray(static_cast<char*>(entry->value));
696  RemoveEntry(entry);
697  }
698  }
699 
700  void Move(Address from, Address to) {
701  if (from == to) return;
702  HashMap::Entry* from_entry = FindEntry(from);
703  ASSERT(from_entry != NULL);
704  void* value = from_entry->value;
705  RemoveEntry(from_entry);
706  HashMap::Entry* to_entry = FindOrCreateEntry(to);
707  ASSERT(to_entry->value == NULL);
708  to_entry->value = value;
709  }
710 
711  private:
712  static bool PointerEquals(void* lhs, void* rhs) {
713  return lhs == rhs;
714  }
715 
716  static char* CopyName(const char* name, int name_size) {
717  char* result = NewArray<char>(name_size + 1);
718  for (int i = 0; i < name_size; ++i) {
719  char c = name[i];
720  if (c == '\0') c = ' ';
721  result[i] = c;
722  }
723  result[name_size] = '\0';
724  return result;
725  }
726 
727  HashMap::Entry* FindOrCreateEntry(Address code_address) {
728  return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
729  }
730 
731  HashMap::Entry* FindEntry(Address code_address) {
732  return impl_.Lookup(code_address,
733  ComputePointerHash(code_address),
734  false);
735  }
736 
737  void RemoveEntry(HashMap::Entry* entry) {
738  impl_.Remove(entry->key, entry->hash);
739  }
740 
741  HashMap impl_;
742 
743  DISALLOW_COPY_AND_ASSIGN(NameMap);
744  };
745 
746  virtual void LogRecordedBuffer(Code* code,
747  SharedFunctionInfo*,
748  const char* name,
749  int length) {
750  address_to_name_map_.Insert(code->address(), name, length);
751  }
752 
753  NameMap address_to_name_map_;
754  Isolate* isolate_;
755 };
756 
757 
758 CodeAddressMap* Serializer::code_address_map_ = NULL;
759 
760 
761 void Serializer::Enable(Isolate* isolate) {
762  if (!serialization_enabled_) {
764  }
765  if (serialization_enabled_) return;
766  serialization_enabled_ = true;
767  isolate->InitializeLoggingAndCounters();
768  code_address_map_ = new CodeAddressMap(isolate);
769 }
770 
771 
773  if (!serialization_enabled_) return;
774  serialization_enabled_ = false;
775  delete code_address_map_;
776  code_address_map_ = NULL;
777 }
778 
779 
781  : isolate_(NULL),
782  source_(source),
783  external_reference_decoder_(NULL) {
784  for (int i = 0; i < LAST_SPACE + 1; i++) {
785  reservations_[i] = kUninitializedReservation;
786  }
787 }
788 
789 
790 void Deserializer::FlushICacheForNewCodeObjects() {
791  PageIterator it(isolate_->heap()->code_space());
792  while (it.has_next()) {
793  Page* p = it.next();
794  CPU::FlushICache(p->area_start(), p->area_end() - p->area_start());
795  }
796 }
797 
798 
800  isolate_ = isolate;
801  ASSERT(isolate_ != NULL);
802  isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
803  // No active threads.
805  // No active handles.
806  ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
807  ASSERT_EQ(NULL, external_reference_decoder_);
808  external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
809  isolate_->heap()->IterateSmiRoots(this);
810  isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
811  isolate_->heap()->RepairFreeListsAfterBoot();
812  isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
813 
814  isolate_->heap()->set_native_contexts_list(
815  isolate_->heap()->undefined_value());
816  isolate_->heap()->set_array_buffers_list(
817  isolate_->heap()->undefined_value());
818 
819  // The allocation site list is build during root iteration, but if no sites
820  // were encountered then it needs to be initialized to undefined.
821  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
822  isolate_->heap()->set_allocation_sites_list(
823  isolate_->heap()->undefined_value());
824  }
825 
826  isolate_->heap()->InitializeWeakObjectToCodeTable();
827 
828  // Update data pointers to the external strings containing natives sources.
829  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
830  Object* source = isolate_->heap()->natives_source_cache()->get(i);
831  if (!source->IsUndefined()) {
833  }
834  }
835 
836  FlushICacheForNewCodeObjects();
837 
838  // Issue code events for newly deserialized code objects.
839  LOG_CODE_EVENT(isolate_, LogCodeObjects());
840  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
841 }
842 
843 
845  isolate_ = isolate;
846  for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
847  ASSERT(reservations_[i] != kUninitializedReservation);
848  }
849  isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
850  if (external_reference_decoder_ == NULL) {
851  external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
852  }
853 
854  // Keep track of the code space start and end pointers in case new
855  // code objects were unserialized
856  OldSpace* code_space = isolate_->heap()->code_space();
857  Address start_address = code_space->top();
858  VisitPointer(root);
859 
860  // There's no code deserialized here. If this assert fires
861  // then that's changed and logging should be added to notify
862  // the profiler et al of the new code.
863  CHECK_EQ(start_address, code_space->top());
864 }
865 
866 
868  ASSERT(source_->AtEOF());
869  if (external_reference_decoder_) {
870  delete external_reference_decoder_;
871  external_reference_decoder_ = NULL;
872  }
873 }
874 
875 
876 // This is called on the roots. It is the driver of the deserialization
877 // process. It is also called on the body of each function.
878 void Deserializer::VisitPointers(Object** start, Object** end) {
879  // The space must be new space. Any other space would cause ReadChunk to try
880  // to update the remembered using NULL as the address.
881  ReadChunk(start, end, NEW_SPACE, NULL);
882 }
883 
884 
885 void Deserializer::RelinkAllocationSite(AllocationSite* site) {
886  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
887  site->set_weak_next(isolate_->heap()->undefined_value());
888  } else {
889  site->set_weak_next(isolate_->heap()->allocation_sites_list());
890  }
891  isolate_->heap()->set_allocation_sites_list(site);
892 }
893 
894 
895 // This routine writes the new object into the pointer provided and then
896 // returns true if the new object was in young space and false otherwise.
897 // The reason for this strange interface is that otherwise the object is
898 // written very late, which means the FreeSpace map is not set up by the
899 // time we need to use it to mark the space at the end of a page free.
900 void Deserializer::ReadObject(int space_number,
901  Object** write_back) {
902  int size = source_->GetInt() << kObjectAlignmentBits;
903  Address address = Allocate(space_number, size);
904  HeapObject* obj = HeapObject::FromAddress(address);
905  *write_back = obj;
906  Object** current = reinterpret_cast<Object**>(address);
907  Object** limit = current + (size >> kPointerSizeLog2);
908  if (FLAG_log_snapshot_positions) {
909  LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
910  }
911  ReadChunk(current, limit, space_number, address);
912 
913  // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
914  // as a (weak) root. If this root is relocated correctly,
915  // RelinkAllocationSite() isn't necessary.
916  if (obj->IsAllocationSite()) {
917  RelinkAllocationSite(AllocationSite::cast(obj));
918  }
919 
920 #ifdef DEBUG
921  bool is_codespace = (space_number == CODE_SPACE);
922  ASSERT(obj->IsCode() == is_codespace);
923 #endif
924 }
925 
926 void Deserializer::ReadChunk(Object** current,
927  Object** limit,
928  int source_space,
929  Address current_object_address) {
930  Isolate* const isolate = isolate_;
931  // Write barrier support costs around 1% in startup time. In fact there
932  // are no new space objects in current boot snapshots, so it's not needed,
933  // but that may change.
934  bool write_barrier_needed = (current_object_address != NULL &&
935  source_space != NEW_SPACE &&
936  source_space != CELL_SPACE &&
937  source_space != PROPERTY_CELL_SPACE &&
938  source_space != CODE_SPACE &&
939  source_space != OLD_DATA_SPACE);
940  while (current < limit) {
941  int data = source_->Get();
942  switch (data) {
943 #define CASE_STATEMENT(where, how, within, space_number) \
944  case where + how + within + space_number: \
945  ASSERT((where & ~kPointedToMask) == 0); \
946  ASSERT((how & ~kHowToCodeMask) == 0); \
947  ASSERT((within & ~kWhereToPointMask) == 0); \
948  ASSERT((space_number & ~kSpaceMask) == 0);
949 
950 #define CASE_BODY(where, how, within, space_number_if_any) \
951  { \
952  bool emit_write_barrier = false; \
953  bool current_was_incremented = false; \
954  int space_number = space_number_if_any == kAnyOldSpace ? \
955  (data & kSpaceMask) : space_number_if_any; \
956  if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
957  ReadObject(space_number, current); \
958  emit_write_barrier = (space_number == NEW_SPACE); \
959  } else { \
960  Object* new_object = NULL; /* May not be a real Object pointer. */ \
961  if (where == kNewObject) { \
962  ReadObject(space_number, &new_object); \
963  } else if (where == kRootArray) { \
964  int root_id = source_->GetInt(); \
965  new_object = isolate->heap()->roots_array_start()[root_id]; \
966  emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
967  } else if (where == kPartialSnapshotCache) { \
968  int cache_index = source_->GetInt(); \
969  new_object = isolate->serialize_partial_snapshot_cache() \
970  [cache_index]; \
971  emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
972  } else if (where == kExternalReference) { \
973  int skip = source_->GetInt(); \
974  current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \
975  current) + skip); \
976  int reference_id = source_->GetInt(); \
977  Address address = external_reference_decoder_-> \
978  Decode(reference_id); \
979  new_object = reinterpret_cast<Object*>(address); \
980  } else if (where == kBackref) { \
981  emit_write_barrier = (space_number == NEW_SPACE); \
982  new_object = GetAddressFromEnd(data & kSpaceMask); \
983  } else { \
984  ASSERT(where == kBackrefWithSkip); \
985  int skip = source_->GetInt(); \
986  current = reinterpret_cast<Object**>( \
987  reinterpret_cast<Address>(current) + skip); \
988  emit_write_barrier = (space_number == NEW_SPACE); \
989  new_object = GetAddressFromEnd(data & kSpaceMask); \
990  } \
991  if (within == kInnerPointer) { \
992  if (space_number != CODE_SPACE || new_object->IsCode()) { \
993  Code* new_code_object = reinterpret_cast<Code*>(new_object); \
994  new_object = reinterpret_cast<Object*>( \
995  new_code_object->instruction_start()); \
996  } else { \
997  ASSERT(space_number == CODE_SPACE); \
998  Cell* cell = Cell::cast(new_object); \
999  new_object = reinterpret_cast<Object*>( \
1000  cell->ValueAddress()); \
1001  } \
1002  } \
1003  if (how == kFromCode) { \
1004  Address location_of_branch_data = \
1005  reinterpret_cast<Address>(current); \
1006  Assembler::deserialization_set_special_target_at( \
1007  location_of_branch_data, \
1008  Code::cast(HeapObject::FromAddress(current_object_address)), \
1009  reinterpret_cast<Address>(new_object)); \
1010  location_of_branch_data += Assembler::kSpecialTargetSize; \
1011  current = reinterpret_cast<Object**>(location_of_branch_data); \
1012  current_was_incremented = true; \
1013  } else { \
1014  *current = new_object; \
1015  } \
1016  } \
1017  if (emit_write_barrier && write_barrier_needed) { \
1018  Address current_address = reinterpret_cast<Address>(current); \
1019  isolate->heap()->RecordWrite( \
1020  current_object_address, \
1021  static_cast<int>(current_address - current_object_address)); \
1022  } \
1023  if (!current_was_incremented) { \
1024  current++; \
1025  } \
1026  break; \
1027  } \
1028 
1029 // This generates a case and a body for the new space (which has to do extra
1030 // write barrier handling) and handles the other spaces with 8 fall-through
1031 // cases and one body.
1032 #define ALL_SPACES(where, how, within) \
1033  CASE_STATEMENT(where, how, within, NEW_SPACE) \
1034  CASE_BODY(where, how, within, NEW_SPACE) \
1035  CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
1036  CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
1037  CASE_STATEMENT(where, how, within, CODE_SPACE) \
1038  CASE_STATEMENT(where, how, within, CELL_SPACE) \
1039  CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
1040  CASE_STATEMENT(where, how, within, MAP_SPACE) \
1041  CASE_BODY(where, how, within, kAnyOldSpace)
1042 
1043 #define FOUR_CASES(byte_code) \
1044  case byte_code: \
1045  case byte_code + 1: \
1046  case byte_code + 2: \
1047  case byte_code + 3:
1048 
1049 #define SIXTEEN_CASES(byte_code) \
1050  FOUR_CASES(byte_code) \
1051  FOUR_CASES(byte_code + 4) \
1052  FOUR_CASES(byte_code + 8) \
1053  FOUR_CASES(byte_code + 12)
1054 
1055 #define COMMON_RAW_LENGTHS(f) \
1056  f(1) \
1057  f(2) \
1058  f(3) \
1059  f(4) \
1060  f(5) \
1061  f(6) \
1062  f(7) \
1063  f(8) \
1064  f(9) \
1065  f(10) \
1066  f(11) \
1067  f(12) \
1068  f(13) \
1069  f(14) \
1070  f(15) \
1071  f(16) \
1072  f(17) \
1073  f(18) \
1074  f(19) \
1075  f(20) \
1076  f(21) \
1077  f(22) \
1078  f(23) \
1079  f(24) \
1080  f(25) \
1081  f(26) \
1082  f(27) \
1083  f(28) \
1084  f(29) \
1085  f(30) \
1086  f(31)
1087 
1088  // We generate 15 cases and bodies that process special tags that combine
1089  // the raw data tag and the length into one byte.
1090 #define RAW_CASE(index) \
1091  case kRawData + index: { \
1092  byte* raw_data_out = reinterpret_cast<byte*>(current); \
1093  source_->CopyRaw(raw_data_out, index * kPointerSize); \
1094  current = \
1095  reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
1096  break; \
1097  }
1099 #undef RAW_CASE
1100 
1101  // Deserialize a chunk of raw data that doesn't have one of the popular
1102  // lengths.
1103  case kRawData: {
1104  int size = source_->GetInt();
1105  byte* raw_data_out = reinterpret_cast<byte*>(current);
1106  source_->CopyRaw(raw_data_out, size);
1107  break;
1108  }
1109 
1112  int root_id = RootArrayConstantFromByteCode(data);
1113  Object* object = isolate->heap()->roots_array_start()[root_id];
1114  ASSERT(!isolate->heap()->InNewSpace(object));
1115  *current++ = object;
1116  break;
1117  }
1118 
1121  int root_id = RootArrayConstantFromByteCode(data);
1122  int skip = source_->GetInt();
1123  current = reinterpret_cast<Object**>(
1124  reinterpret_cast<intptr_t>(current) + skip);
1125  Object* object = isolate->heap()->roots_array_start()[root_id];
1126  ASSERT(!isolate->heap()->InNewSpace(object));
1127  *current++ = object;
1128  break;
1129  }
1130 
1131  case kRepeat: {
1132  int repeats = source_->GetInt();
1133  Object* object = current[-1];
1134  ASSERT(!isolate->heap()->InNewSpace(object));
1135  for (int i = 0; i < repeats; i++) current[i] = object;
1136  current += repeats;
1137  break;
1138  }
1139 
1142  STATIC_ASSERT(kMaxRepeats == 13);
1143  case kConstantRepeat:
1147  int repeats = RepeatsForCode(data);
1148  Object* object = current[-1];
1149  ASSERT(!isolate->heap()->InNewSpace(object));
1150  for (int i = 0; i < repeats; i++) current[i] = object;
1151  current += repeats;
1152  break;
1153  }
1154 
1155  // Deserialize a new object and write a pointer to it to the current
1156  // object.
1158  // Support for direct instruction pointers in functions. It's an inner
1159  // pointer because it points at the entry point, not at the start of the
1160  // code object.
1162  CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
1163  // Deserialize a new code object and write a pointer to its first
1164  // instruction to the current code object.
1165  ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
1166  // Find a recently deserialized object using its offset from the current
1167  // allocation point and write a pointer to it to the current object.
1170 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
1171  // Deserialize a new object from pointer found in code and write
1172  // a pointer to it to the current object. Required only for MIPS or ARM
1173  // with ool constant pool, and omitted on the other architectures because
1174  // it is fully unrolled and would cause bloat.
1175  ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
1176  // Find a recently deserialized code object using its offset from the
1177  // current allocation point and write a pointer to it to the current
1178  // object. Required only for MIPS or ARM with ool constant pool.
1179  ALL_SPACES(kBackref, kFromCode, kStartOfObject)
1180  ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
1181 #endif
1182  // Find a recently deserialized code object using its offset from the
1183  // current allocation point and write a pointer to its first instruction
1184  // to the current code object or the instruction pointer in a function
1185  // object.
1186  ALL_SPACES(kBackref, kFromCode, kInnerPointer)
1187  ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
1188  ALL_SPACES(kBackref, kPlain, kInnerPointer)
1189  ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
1190  // Find an object in the roots array and write a pointer to it to the
1191  // current object.
1193  CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
1194  // Find an object in the partial snapshots cache and write a pointer to it
1195  // to the current object.
1196  CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
1197  CASE_BODY(kPartialSnapshotCache,
1198  kPlain,
1199  kStartOfObject,
1200  0)
1201  // Find an code entry in the partial snapshots cache and
1202  // write a pointer to it to the current object.
1203  CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
1204  CASE_BODY(kPartialSnapshotCache,
1205  kPlain,
1206  kInnerPointer,
1207  0)
1208  // Find an external reference and write a pointer to it to the current
1209  // object.
1210  CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
1211  CASE_BODY(kExternalReference,
1212  kPlain,
1213  kStartOfObject,
1214  0)
1215  // Find an external reference and write a pointer to it in the current
1216  // code object.
1217  CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
1218  CASE_BODY(kExternalReference,
1219  kFromCode,
1220  kStartOfObject,
1221  0)
1222 
1223 #undef CASE_STATEMENT
1224 #undef CASE_BODY
1225 #undef ALL_SPACES
1226 
1227  case kSkip: {
1228  int size = source_->GetInt();
1229  current = reinterpret_cast<Object**>(
1230  reinterpret_cast<intptr_t>(current) + size);
1231  break;
1232  }
1233 
1234  case kNativesStringResource: {
1235  int index = source_->Get();
1236  Vector<const char> source_vector = Natives::GetRawScriptSource(index);
1237  NativesExternalStringResource* resource =
1238  new NativesExternalStringResource(isolate->bootstrapper(),
1239  source_vector.start(),
1240  source_vector.length());
1241  *current++ = reinterpret_cast<Object*>(resource);
1242  break;
1243  }
1244 
1245  case kSynchronize: {
1246  // If we get here then that indicates that you have a mismatch between
1247  // the number of GC roots when serializing and deserializing.
1248  UNREACHABLE();
1249  }
1250 
1251  default:
1252  UNREACHABLE();
1253  }
1254  }
1255  ASSERT_EQ(limit, current);
1256 }
1257 
1258 
1259 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
1260  ASSERT(integer < 1 << 22);
1261  integer <<= 2;
1262  int bytes = 1;
1263  if (integer > 0xff) bytes = 2;
1264  if (integer > 0xffff) bytes = 3;
1265  integer |= bytes;
1266  Put(static_cast<int>(integer & 0xff), "IntPart1");
1267  if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
1268  if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
1269 }
1270 
1271 
1273  : isolate_(isolate),
1274  sink_(sink),
1275  external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
1276  root_index_wave_front_(0) {
1277  // The serializer is meant to be used only to generate initial heap images
1278  // from a context in which there is only one isolate.
1279  for (int i = 0; i <= LAST_SPACE; i++) {
1280  fullness_[i] = 0;
1281  }
1282 }
1283 
1284 
1287 }
1288 
1289 
1291  Isolate* isolate = this->isolate();
1292  // No active threads.
1294  // No active or weak handles.
1295  CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
1296  CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
1297  CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
1298  // We don't support serializing installed extensions.
1299  CHECK(!isolate->has_installed_extensions());
1300  isolate->heap()->IterateSmiRoots(this);
1301  isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
1302 }
1303 
1304 
1306  this->VisitPointer(object);
1307  Pad();
1308 }
1309 
1310 
1312  Object** roots = isolate()->heap()->roots_array_start();
1313  return current == &roots[Heap::kStoreBufferTopRootIndex]
1314  || current == &roots[Heap::kStackLimitRootIndex]
1315  || current == &roots[Heap::kRealStackLimitRootIndex];
1316 }
1317 
1318 
1320  Isolate* isolate = this->isolate();;
1321 
1322  for (Object** current = start; current < end; current++) {
1323  if (start == isolate->heap()->roots_array_start()) {
1325  Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
1326  }
1327  if (ShouldBeSkipped(current)) {
1328  sink_->Put(kSkip, "Skip");
1329  sink_->PutInt(kPointerSize, "SkipOneWord");
1330  } else if ((*current)->IsSmi()) {
1331  sink_->Put(kRawData + 1, "Smi");
1332  for (int i = 0; i < kPointerSize; i++) {
1333  sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1334  }
1335  } else {
1336  SerializeObject(*current, kPlain, kStartOfObject, 0);
1337  }
1338  }
1339 }
1340 
1341 
1342 // This ensures that the partial snapshot cache keeps things alive during GC and
1343 // tracks their movement. When it is called during serialization of the startup
1344 // snapshot nothing happens. When the partial (context) snapshot is created,
1345 // this array is populated with the pointers that the partial snapshot will
1346 // need. As that happens we emit serialized objects to the startup snapshot
1347 // that correspond to the elements of this cache array. On deserialization we
1348 // therefore need to visit the cache array. This fills it up with pointers to
1349 // deserialized objects.
1351  ObjectVisitor* visitor) {
1352  if (Serializer::enabled()) return;
1353  for (int i = 0; ; i++) {
1354  if (isolate->serialize_partial_snapshot_cache_length() <= i) {
1355  // Extend the array ready to get a value from the visitor when
1356  // deserializing.
1358  }
1359  Object** cache = isolate->serialize_partial_snapshot_cache();
1360  visitor->VisitPointers(&cache[i], &cache[i + 1]);
1361  // Sentinel is the undefined object, which is a root so it will not normally
1362  // be found in the cache.
1363  if (cache[i] == isolate->heap()->undefined_value()) {
1364  break;
1365  }
1366  }
1367 }
1368 
1369 
1371  Isolate* isolate = this->isolate();
1372 
1373  for (int i = 0;
1374  i < isolate->serialize_partial_snapshot_cache_length();
1375  i++) {
1376  Object* entry = isolate->serialize_partial_snapshot_cache()[i];
1377  if (entry == heap_object) return i;
1378  }
1379 
1380  // We didn't find the object in the cache. So we add it to the cache and
1381  // then visit the pointer so that it becomes part of the startup snapshot
1382  // and we can refer to it from the partial snapshot.
1383  int length = isolate->serialize_partial_snapshot_cache_length();
1384  isolate->PushToPartialSnapshotCache(heap_object);
1385  startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
1386  // We don't recurse from the startup snapshot generator into the partial
1387  // snapshot generator.
1388  ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
1389  return length;
1390 }
1391 
1392 
1393 int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
1394  Heap* heap = isolate()->heap();
1395  if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
1396  for (int i = 0; i < root_index_wave_front_; i++) {
1397  Object* root = heap->roots_array_start()[i];
1398  if (!root->IsSmi() && root == heap_object) {
1399 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
1400  if (from == kFromCode) {
1401  // In order to avoid code bloat in the deserializer we don't have
1402  // support for the encoding that specifies a particular root should
1403  // be written from within code.
1404  return kInvalidRootIndex;
1405  }
1406 #endif
1407  return i;
1408  }
1409  }
1410  return kInvalidRootIndex;
1411 }
1412 
1413 
1414 // Encode the location of an already deserialized object in order to write its
1415 // location into a later object. We can encode the location as an offset from
1416 // the start of the deserialized objects or as an offset backwards from the
1417 // current allocation pointer.
1419  int space,
1420  int address,
1421  HowToCode how_to_code,
1422  WhereToPoint where_to_point,
1423  int skip) {
1424  int offset = CurrentAllocationAddress(space) - address;
1425  // Shift out the bits that are always 0.
1426  offset >>= kObjectAlignmentBits;
1427  if (skip == 0) {
1428  sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
1429  } else {
1430  sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
1431  "BackRefSerWithSkip");
1432  sink_->PutInt(skip, "BackRefSkipDistance");
1433  }
1434  sink_->PutInt(offset, "offset");
1435 }
1436 
1437 
1439  Object* o,
1440  HowToCode how_to_code,
1441  WhereToPoint where_to_point,
1442  int skip) {
1443  CHECK(o->IsHeapObject());
1444  HeapObject* heap_object = HeapObject::cast(o);
1445 
1446  int root_index;
1447  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
1448  PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
1449  return;
1450  }
1451 
1452  if (address_mapper_.IsMapped(heap_object)) {
1453  int space = SpaceOfObject(heap_object);
1454  int address = address_mapper_.MappedTo(heap_object);
1456  address,
1457  how_to_code,
1458  where_to_point,
1459  skip);
1460  } else {
1461  if (skip != 0) {
1462  sink_->Put(kSkip, "FlushPendingSkip");
1463  sink_->PutInt(skip, "SkipDistance");
1464  }
1465 
1466  // Object has not yet been serialized. Serialize it here.
1467  ObjectSerializer object_serializer(this,
1468  heap_object,
1469  sink_,
1470  how_to_code,
1471  where_to_point);
1472  object_serializer.Serialize();
1473  }
1474 }
1475 
1476 
1478  // This phase comes right after the partial serialization (of the snapshot).
1479  // After we have done the partial serialization the partial snapshot cache
1480  // will contain some references needed to decode the partial snapshot. We
1481  // add one entry with 'undefined' which is the sentinel that the deserializer
1482  // uses to know it is done deserializing the array.
1483  Object* undefined = isolate()->heap()->undefined_value();
1484  VisitPointer(&undefined);
1485  isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
1486  Pad();
1487 }
1488 
1489 
1490 void Serializer::PutRoot(int root_index,
1491  HeapObject* object,
1493  SerializerDeserializer::WhereToPoint where_to_point,
1494  int skip) {
1495  if (how_to_code == kPlain &&
1496  where_to_point == kStartOfObject &&
1497  root_index < kRootArrayNumberOfConstantEncodings &&
1498  !isolate()->heap()->InNewSpace(object)) {
1499  if (skip == 0) {
1500  sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
1501  "RootConstant");
1502  } else {
1503  sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
1504  "RootConstant");
1505  sink_->PutInt(skip, "SkipInPutRoot");
1506  }
1507  } else {
1508  if (skip != 0) {
1509  sink_->Put(kSkip, "SkipFromPutRoot");
1510  sink_->PutInt(skip, "SkipFromPutRootDistance");
1511  }
1512  sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
1513  sink_->PutInt(root_index, "root_index");
1514  }
1515 }
1516 
1517 
1519  Object* o,
1520  HowToCode how_to_code,
1521  WhereToPoint where_to_point,
1522  int skip) {
1523  CHECK(o->IsHeapObject());
1524  HeapObject* heap_object = HeapObject::cast(o);
1525 
1526  if (heap_object->IsMap()) {
1527  // The code-caches link to context-specific code objects, which
1528  // the startup and context serializes cannot currently handle.
1529  ASSERT(Map::cast(heap_object)->code_cache() ==
1530  heap_object->GetHeap()->empty_fixed_array());
1531  }
1532 
1533  int root_index;
1534  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
1535  PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
1536  return;
1537  }
1538 
1539  if (ShouldBeInThePartialSnapshotCache(heap_object)) {
1540  if (skip != 0) {
1541  sink_->Put(kSkip, "SkipFromSerializeObject");
1542  sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
1543  }
1544 
1545  int cache_index = PartialSnapshotCacheIndex(heap_object);
1546  sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
1547  "PartialSnapshotCache");
1548  sink_->PutInt(cache_index, "partial_snapshot_cache_index");
1549  return;
1550  }
1551 
1552  // Pointers from the partial snapshot to the objects in the startup snapshot
1553  // should go through the root array or through the partial snapshot cache.
1554  // If this is not the case you may have to add something to the root array.
1555  ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
1556  // All the internalized strings that the partial snapshot needs should be
1557  // either in the root table or in the partial snapshot cache.
1558  ASSERT(!heap_object->IsInternalizedString());
1559 
1560  if (address_mapper_.IsMapped(heap_object)) {
1561  int space = SpaceOfObject(heap_object);
1562  int address = address_mapper_.MappedTo(heap_object);
1564  address,
1565  how_to_code,
1566  where_to_point,
1567  skip);
1568  } else {
1569  if (skip != 0) {
1570  sink_->Put(kSkip, "SkipFromSerializeObject");
1571  sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
1572  }
1573  // Object has not yet been serialized. Serialize it here.
1574  ObjectSerializer serializer(this,
1575  heap_object,
1576  sink_,
1577  how_to_code,
1578  where_to_point);
1579  serializer.Serialize();
1580  }
1581 }
1582 
1583 
1585  int space = Serializer::SpaceOfObject(object_);
1586  int size = object_->Size();
1587 
1588  sink_->Put(kNewObject + reference_representation_ + space,
1589  "ObjectSerialization");
1590  sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
1591 
1592  ASSERT(code_address_map_);
1593  const char* code_name = code_address_map_->Lookup(object_->address());
1594  LOG(serializer_->isolate_,
1595  CodeNameEvent(object_->address(), sink_->Position(), code_name));
1596  LOG(serializer_->isolate_,
1597  SnapshotPositionEvent(object_->address(), sink_->Position()));
1598 
1599  // Mark this object as already serialized.
1600  int offset = serializer_->Allocate(space, size);
1601  serializer_->address_mapper()->AddMapping(object_, offset);
1602 
1603  // Serialize the map (first word of the object).
1604  serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
1605 
1606  // Serialize the rest of the object.
1607  CHECK_EQ(0, bytes_processed_so_far_);
1608  bytes_processed_so_far_ = kPointerSize;
1609  object_->IterateBody(object_->map()->instance_type(), size, this);
1610  OutputRawData(object_->address() + size);
1611 }
1612 
1613 
1615  Object** end) {
1616  Object** current = start;
1617  while (current < end) {
1618  while (current < end && (*current)->IsSmi()) current++;
1619  if (current < end) OutputRawData(reinterpret_cast<Address>(current));
1620 
1621  while (current < end && !(*current)->IsSmi()) {
1622  HeapObject* current_contents = HeapObject::cast(*current);
1623  int root_index = serializer_->RootIndex(current_contents, kPlain);
1624  // Repeats are not subject to the write barrier so there are only some
1625  // objects that can be used in a repeat encoding. These are the early
1626  // ones in the root array that are never in new space.
1627  if (current != start &&
1628  root_index != kInvalidRootIndex &&
1629  root_index < kRootArrayNumberOfConstantEncodings &&
1630  current_contents == current[-1]) {
1631  ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents));
1632  int repeat_count = 1;
1633  while (current < end - 1 && current[repeat_count] == current_contents) {
1634  repeat_count++;
1635  }
1636  current += repeat_count;
1637  bytes_processed_so_far_ += repeat_count * kPointerSize;
1638  if (repeat_count > kMaxRepeats) {
1639  sink_->Put(kRepeat, "SerializeRepeats");
1640  sink_->PutInt(repeat_count, "SerializeRepeats");
1641  } else {
1642  sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
1643  }
1644  } else {
1645  serializer_->SerializeObject(
1646  current_contents, kPlain, kStartOfObject, 0);
1647  bytes_processed_so_far_ += kPointerSize;
1648  current++;
1649  }
1650  }
1651  }
1652 }
1653 
1654 
1656  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1657  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1658 
1659  int skip = OutputRawData(rinfo->target_address_address(),
1660  kCanReturnSkipInsteadOfSkipping);
1661  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1662  Object* object = rinfo->target_object();
1663  serializer_->SerializeObject(object, how_to_code, kStartOfObject, skip);
1664  bytes_processed_so_far_ += rinfo->target_address_size();
1665 }
1666 
1667 
1669  int skip = OutputRawData(reinterpret_cast<Address>(p),
1670  kCanReturnSkipInsteadOfSkipping);
1671  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
1672  sink_->PutInt(skip, "SkipB4ExternalRef");
1673  Address target = *p;
1674  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1675  bytes_processed_so_far_ += kPointerSize;
1676 }
1677 
1678 
1680  int skip = OutputRawData(rinfo->target_address_address(),
1681  kCanReturnSkipInsteadOfSkipping);
1682  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1683  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1684  sink_->PutInt(skip, "SkipB4ExternalRef");
1685  Address target = rinfo->target_reference();
1686  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1687  bytes_processed_so_far_ += rinfo->target_address_size();
1688 }
1689 
1690 
1692  int skip = OutputRawData(rinfo->target_address_address(),
1693  kCanReturnSkipInsteadOfSkipping);
1694  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1695  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1696  sink_->PutInt(skip, "SkipB4ExternalRef");
1697  Address target = rinfo->target_address();
1698  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1699  bytes_processed_so_far_ += rinfo->target_address_size();
1700 }
1701 
1702 
1704  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1705  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1706 
1707  int skip = OutputRawData(rinfo->target_address_address(),
1708  kCanReturnSkipInsteadOfSkipping);
1709  Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
1710  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
1711  bytes_processed_so_far_ += rinfo->target_address_size();
1712 }
1713 
1714 
1716  int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
1717  Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1718  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1719  bytes_processed_so_far_ += kPointerSize;
1720 }
1721 
1722 
1724  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1725  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1726 
1727  int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
1728  Cell* object = Cell::cast(rinfo->target_cell());
1729  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1730 }
1731 
1732 
1734  v8::String::ExternalAsciiStringResource** resource_pointer) {
1735  Address references_start = reinterpret_cast<Address>(resource_pointer);
1736  OutputRawData(references_start);
1737  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
1738  Object* source =
1739  serializer_->isolate()->heap()->natives_source_cache()->get(i);
1740  if (!source->IsUndefined()) {
1743  const Resource* resource = string->resource();
1744  if (resource == *resource_pointer) {
1745  sink_->Put(kNativesStringResource, "NativesStringResource");
1746  sink_->PutSection(i, "NativesStringResourceEnd");
1747  bytes_processed_so_far_ += sizeof(resource);
1748  return;
1749  }
1750  }
1751  }
1752  // One of the strings in the natives cache should match the resource. We
1753  // can't serialize any other kinds of external strings.
1754  UNREACHABLE();
1755 }
1756 
1757 
1758 static Code* CloneCodeObject(HeapObject* code) {
1759  Address copy = new byte[code->Size()];
1760  OS::MemCopy(copy, code->address(), code->Size());
1761  return Code::cast(HeapObject::FromAddress(copy));
1762 }
1763 
1764 
1765 static void WipeOutRelocations(Code* code) {
1766  int mode_mask =
1767  RelocInfo::kCodeTargetMask |
1768  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
1769  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
1770  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
1771  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
1772  if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
1773  it.rinfo()->WipeOut();
1774  }
1775  }
1776 }
1777 
1778 
1779 int Serializer::ObjectSerializer::OutputRawData(
1780  Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
1781  Address object_start = object_->address();
1782  int base = bytes_processed_so_far_;
1783  int up_to_offset = static_cast<int>(up_to - object_start);
1784  int to_skip = up_to_offset - bytes_processed_so_far_;
1785  int bytes_to_output = to_skip;
1786  bytes_processed_so_far_ += to_skip;
1787  // This assert will fail if the reloc info gives us the target_address_address
1788  // locations in a non-ascending order. Luckily that doesn't happen.
1789  ASSERT(to_skip >= 0);
1790  bool outputting_code = false;
1791  if (to_skip != 0 && code_object_ && !code_has_been_output_) {
1792  // Output the code all at once and fix later.
1793  bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
1794  outputting_code = true;
1795  code_has_been_output_ = true;
1796  }
1797  if (bytes_to_output != 0 &&
1798  (!code_object_ || outputting_code)) {
1799 #define RAW_CASE(index) \
1800  if (!outputting_code && bytes_to_output == index * kPointerSize && \
1801  index * kPointerSize == to_skip) { \
1802  sink_->PutSection(kRawData + index, "RawDataFixed"); \
1803  to_skip = 0; /* This insn already skips. */ \
1804  } else /* NOLINT */
1806 #undef RAW_CASE
1807  { /* NOLINT */
1808  // We always end up here if we are outputting the code of a code object.
1809  sink_->Put(kRawData, "RawData");
1810  sink_->PutInt(bytes_to_output, "length");
1811  }
1812 
1813  // To make snapshots reproducible, we need to wipe out all pointers in code.
1814  if (code_object_) {
1815  Code* code = CloneCodeObject(object_);
1816  WipeOutRelocations(code);
1817  // We need to wipe out the header fields *after* wiping out the
1818  // relocations, because some of these fields are needed for the latter.
1819  code->WipeOutHeader();
1820  object_start = code->address();
1821  }
1822 
1823  const char* description = code_object_ ? "Code" : "Byte";
1824  for (int i = 0; i < bytes_to_output; i++) {
1825  sink_->PutSection(object_start[base + i], description);
1826  }
1827  if (code_object_) delete[] object_start;
1828  }
1829  if (to_skip != 0 && return_skip == kIgnoringReturn) {
1830  sink_->Put(kSkip, "Skip");
1831  sink_->PutInt(to_skip, "SkipDistance");
1832  to_skip = 0;
1833  }
1834  return to_skip;
1835 }
1836 
1837 
1839  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
1840  AllocationSpace s = static_cast<AllocationSpace>(i);
1841  if (object->GetHeap()->InSpace(object, s)) {
1842  ASSERT(i < kNumberOfSpaces);
1843  return i;
1844  }
1845  }
1846  UNREACHABLE();
1847  return 0;
1848 }
1849 
1850 
1851 int Serializer::Allocate(int space, int size) {
1852  CHECK(space >= 0 && space < kNumberOfSpaces);
1853  int allocation_address = fullness_[space];
1854  fullness_[space] = allocation_address + size;
1855  return allocation_address;
1856 }
1857 
1858 
1860  if (space == CODE_SPACE) {
1862  } else {
1864  }
1865 }
1866 
1867 
1869  // The non-branching GetInt will read up to 3 bytes too far, so we need
1870  // to pad the snapshot to make sure we don't read over the end.
1871  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
1872  sink_->Put(kNop, "Padding");
1873  }
1874 }
1875 
1876 
1878  if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
1879  for (int x = position_; x < length_; x++) {
1880  if (data_[x] != SerializerDeserializer::nop()) return false;
1881  }
1882  return true;
1883 }
1884 
1885 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
Object ** roots_array_start()
Definition: heap.h:1451
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)=0
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const int kInvalidRootIndex
Definition: serialize.h:494
void VisitCodeTarget(RelocInfo *target)
Definition: serialize.cc:1703
int CurrentAllocationAddress(int space)
Definition: serialize.h:473
ExternalReferenceEncoder(Isolate *isolate)
Definition: serialize.cc:577
SerializationAddressMapper address_mapper_
Definition: serialize.h:586
#define CHECK_EQ(expected, value)
Definition: checks.h:252
static int CodePageAreaSize()
Definition: spaces.h:1188
void CopyRaw(byte *to, int number_of_bytes)
Definition: serialize.h:313
void AddMapping(HeapObject *obj, int to)
Definition: serialize.h:433
bool InNewSpace(Object *object)
Definition: heap-inl.h:307
#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize)
virtual void Serialize(Object **o)
Definition: serialize.cc:1305
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:902
static bool too_late_to_enable_now_
Definition: serialize.h:585
#define SIXTEEN_CASES(byte_code)
void IterateWeakRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6272
const int kTypeCodeCount
Definition: serialize.h:54
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
#define LOG(isolate, Call)
Definition: log.h:86
static int RootArrayConstantFromByteCode(int byte_code)
Definition: serialize.h:287
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:4673
#define ACCESSOR_DESCRIPTOR_DECLARATION(name)
static void Disable()
Definition: serialize.cc:772
static Vector< const char > GetRawScriptSource(int index)
static HeapObject * cast(Object *obj)
const int kDeoptTableSerializeEntryCount
Definition: serialize.h:63
T Max(T a, T b)
Definition: utils.h:227
virtual void SerializeStrongReferences()
Definition: serialize.cc:1290
static const int kOldSpaceRoots
Definition: heap.h:1829
static Map * cast(Object *obj)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
int int32_t
Definition: unicode.cc:47
static AllocationSite * cast(Object *obj)
int SpaceAreaSize(int space)
Definition: serialize.cc:1859
Serializer(Isolate *isolate, SnapshotByteSink *sink)
Definition: serialize.cc:1272
static bool enabled()
Definition: serialize.h:485
void VisitRuntimeEntry(RelocInfo *reloc)
Definition: serialize.cc:1691
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:6292
#define ASSERT(condition)
Definition: checks.h:329
bool InSpace(Address addr, AllocationSpace space)
Definition: heap.cc:5948
#define CASE_BODY(where, how, within, space_number_if_any)
const int kPointerSizeLog2
Definition: globals.h:281
unsigned short uint16_t
Definition: unicode.cc:46
void VisitPointers(Object **start, Object **end)
Definition: serialize.cc:1319
ThreadManager * thread_manager()
Definition: isolate.h:922
void InitializeWeakObjectToCodeTable()
Definition: heap.h:1939
static int CodeForRepeats(int repeats)
Definition: serialize.h:276
#define CHECK(condition)
Definition: checks.h:75
void PutInt(uintptr_t integer, const char *description)
Definition: serialize.cc:1259
static ExternalAsciiString * cast(Object *obj)
intptr_t root_index_wave_front_
Definition: serialize.h:587
static const int kPageSize
Definition: spaces.h:814
void IterateSmiRoots(ObjectVisitor *v)
Definition: heap.cc:6284
static Code * cast(Object *obj)
SnapshotByteSink * sink_
Definition: serialize.h:581
void PutRoot(int index, HeapObject *object, HowToCode how, WhereToPoint where, int skip)
Definition: serialize.cc:1490
#define RUNTIME_FUNCTION_LIST(F)
Definition: runtime.h:548
void VisitCodeEntry(Address entry_address)
Definition: serialize.cc:1715
#define ALL_SPACES(where, how, within)
void VisitPointers(Object **start, Object **end)
Definition: serialize.cc:1614
#define STATS_COUNTER_LIST_2(SC)
Definition: v8-counters.h:165
CodeAddressMap(Isolate *isolate)
Definition: serialize.cc:648
void IterateBody(InstanceType type, int object_size, ObjectVisitor *v)
Definition: objects.cc:1760
uint8_t byte
Definition: globals.h:185
void RepairFreeListsAfterBoot()
Definition: heap.cc:497
bool ShouldBeSkipped(Object **current)
Definition: serialize.cc:1311
uint32_t ComputePointerHash(void *ptr)
Definition: utils.h:347
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void addCodeEventListener(CodeEventListener *listener)
Definition: log.cc:953
int fullness_[LAST_SPACE+1]
Definition: serialize.h:580
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static Cell * cast(Object *obj)
#define RAW_CASE(index)
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
Deserializer(SnapshotByteSource *source)
Definition: serialize.cc:780
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:683
virtual int PartialSnapshotCacheIndex(HeapObject *o)
Definition: serialize.cc:1370
static const int kRootArrayConstants
Definition: serialize.h:284
const int kPointerSize
Definition: globals.h:268
MemoryAllocator * memory_allocator()
Definition: isolate.h:884
static int SpaceOfObject(HeapObject *object)
Definition: serialize.cc:1838
#define FOUR_CASES(byte_code)
T * NewArray(size_t size)
Definition: allocation.h:83
EternalHandles * eternal_handles()
Definition: isolate.h:920
SerializationAddressMapper * address_mapper()
Definition: serialize.h:486
#define COUNTER_ENTRY(name, caption)
#define DEF_ENTRY_A(name, kind, state, extra)
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1518
GlobalHandles * global_handles()
Definition: isolate.h:918
void set_allocation_sites_list(Object *object)
Definition: heap.h:1361
ExternalReferenceEncoder * external_reference_encoder_
Definition: serialize.h:582
void Deserialize(Isolate *isolate)
Definition: serialize.cc:799
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:131
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
void ReserveSpace(int *sizes, Address *addresses)
Definition: heap.cc:941
const int kDebugIdShift
Definition: serialize.h:61
int Allocate(int space, int size)
Definition: serialize.cc:1851
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
OldSpace * code_space()
Definition: heap.h:640
static const int kRootArrayNumberOfConstantEncodings
Definition: serialize.h:286
uint32_t Encode(Address key) const
Definition: serialize.cc:588
static bool serialization_enabled_
Definition: serialize.h:583
static void Iterate(Isolate *isolate, ObjectVisitor *visitor)
Definition: serialize.cc:1350
void DeserializePartial(Isolate *isolate, Object **root)
Definition: serialize.cc:844
virtual void CodeDeleteEvent(Address from)
Definition: serialize.cc:661
static int RepeatsForCode(int byte_code)
Definition: serialize.h:280
#define COMMON_RAW_LENGTHS(f)
void SerializeReferenceToPreviousObject(int space, int address, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1418
bool has_installed_extensions()
Definition: isolate.h:954
static void Enable(Isolate *isolate)
Definition: serialize.cc:761
List< internal::Object ** > * blocks()
Definition: api.h:557
static const int kObjectStartOffset
Definition: spaces.h:592
void PushToPartialSnapshotCache(Object *obj)
Definition: isolate.cc:1683
#define ACCESSOR_DESCRIPTOR_LIST(V)
Definition: accessors.h:39
#define BUILTIN_LIST_DEBUG_A(V)
Definition: builtins.h:230
#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize)
#define IC_UTIL_LIST(ICU)
Definition: ic.h:42
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C)
Definition: isolate.h:154
const int kObjectAlignmentBits
Definition: v8globals.h:43
#define RUNTIME_HIDDEN_FUNCTION_LIST(F)
Definition: runtime.h:558
TemplateHashMapImpl< FreeStoreAllocationPolicy > HashMap
Definition: hashmap.h:113
#define CASE_STATEMENT(where, how, within, space_number)
void VisitExternalAsciiString(v8::String::ExternalAsciiStringResource **resource)
Definition: serialize.cc:1733
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
Isolate * isolate() const
Definition: serialize.h:478
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1438
InstanceType instance_type()
Definition: objects-inl.h:4012
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
const int kFirstTypeCode
Definition: serialize.h:55
#define ASSERT_NE(v1, v2)
Definition: checks.h:331
void InitializeLoggingAndCounters()
Definition: isolate.cc:1851
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
Logger * logger()
Definition: isolate.h:868
#define BUILD_NAME_LITERAL(CamelName, hacker_name)
HeapObject * obj
#define RUNTIME_ENTRY(name, nargs, ressize)
static const int kNativesStringResource
Definition: serialize.h:271
#define IC_ENTRY(name)
int RootIndex(HeapObject *heap_object, HowToCode from)
Definition: serialize.cc:1393
void removeCodeEventListener(CodeEventListener *listener)
Definition: log.cc:959
virtual void CodeMoveEvent(Address from, Address to)
Definition: serialize.cc:657
void DeleteArray(T *array)
Definition: allocation.h:91
#define DEF_ENTRY_C(name, ignored)
void set_array_buffers_list(Object *object)
Definition: heap.h:1356
virtual void Put(int byte, const char *description)=0
#define LOG_CODE_EVENT(isolate, Call)
Definition: log.h:94
static ExternalReferenceTable * instance(Isolate *isolate)
Definition: serialize.cc:67
virtual bool ShouldBeInThePartialSnapshotCache(HeapObject *o)
Definition: serialize.h:618
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
#define BUILTIN_LIST_C(V)
Definition: builtins.h:67
ThreadState * FirstThreadStateInUse()
Definition: v8threads.cc:271
#define ARRAY_SIZE(a)
Definition: globals.h:333
#define STATS_COUNTER_LIST_1(SC)
Definition: v8-counters.h:113
Object * allocation_sites_list()
Definition: heap.h:1364
#define BUILTIN_LIST_A(V)
Definition: builtins.h:88
virtual void PutSection(int byte, const char *description)
Definition: serialize.h:403
ExternalReferenceDecoder(Isolate *isolate)
Definition: serialize.cc:619
const char * Lookup(Address address)
Definition: serialize.cc:665
void VisitEmbeddedPointer(RelocInfo *target)
Definition: serialize.cc:1655
#define INLINE_OPTIMIZED_FUNCTION_LIST(F)
Definition: runtime.h:692
void set_native_contexts_list(Object *object)
Definition: heap.h:1351
const char * NameOfAddress(Address key) const
Definition: serialize.cc:596