v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
profile-generator.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "profile-generator-inl.h"
31 
32 #include "compiler.h"
33 #include "debug.h"
34 #include "sampler.h"
35 #include "global-handles.h"
36 #include "scopeinfo.h"
37 #include "unicode.h"
38 #include "zone-inl.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 
44 bool StringsStorage::StringsMatch(void* key1, void* key2) {
45  return strcmp(reinterpret_cast<char*>(key1),
46  reinterpret_cast<char*>(key2)) == 0;
47 }
48 
49 
51  : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
52 }
53 
54 
56  for (HashMap::Entry* p = names_.Start();
57  p != NULL;
58  p = names_.Next(p)) {
59  DeleteArray(reinterpret_cast<const char*>(p->value));
60  }
61 }
62 
63 
64 const char* StringsStorage::GetCopy(const char* src) {
65  int len = static_cast<int>(strlen(src));
66  HashMap::Entry* entry = GetEntry(src, len);
67  if (entry->value == NULL) {
68  Vector<char> dst = Vector<char>::New(len + 1);
69  OS::StrNCpy(dst, src, len);
70  dst[len] = '\0';
71  entry->key = dst.start();
72  entry->value = entry->key;
73  }
74  return reinterpret_cast<const char*>(entry->value);
75 }
76 
77 
78 const char* StringsStorage::GetFormatted(const char* format, ...) {
79  va_list args;
80  va_start(args, format);
81  const char* result = GetVFormatted(format, args);
82  va_end(args);
83  return result;
84 }
85 
86 
87 const char* StringsStorage::AddOrDisposeString(char* str, int len) {
88  HashMap::Entry* entry = GetEntry(str, len);
89  if (entry->value == NULL) {
90  // New entry added.
91  entry->key = str;
92  entry->value = str;
93  } else {
94  DeleteArray(str);
95  }
96  return reinterpret_cast<const char*>(entry->value);
97 }
98 
99 
100 const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
101  Vector<char> str = Vector<char>::New(1024);
102  int len = OS::VSNPrintF(str, format, args);
103  if (len == -1) {
104  DeleteArray(str.start());
105  return GetCopy(format);
106  }
107  return AddOrDisposeString(str.start(), len);
108 }
109 
110 
112  if (name->IsString()) {
113  String* str = String::cast(name);
114  int length = Min(kMaxNameSize, str->length());
115  int actual_length = 0;
118  &actual_length);
119  return AddOrDisposeString(data.Detach(), actual_length);
120  } else if (name->IsSymbol()) {
121  return "<symbol>";
122  }
123  return "";
124 }
125 
126 
127 const char* StringsStorage::GetName(int index) {
128  return GetFormatted("%d", index);
129 }
130 
131 
133  return BeautifyFunctionName(GetName(name));
134 }
135 
136 
137 const char* StringsStorage::GetFunctionName(const char* name) {
138  return BeautifyFunctionName(GetCopy(name));
139 }
140 
141 
142 const char* StringsStorage::BeautifyFunctionName(const char* name) {
143  return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
144 }
145 
146 
148  size_t size = sizeof(*this);
149  size += sizeof(HashMap::Entry) * names_.capacity();
150  for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
151  size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
152  }
153  return size;
154 }
155 
156 
157 HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
158  uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
159  return names_.Lookup(const_cast<char*>(str), hash, true);
160 }
161 
162 
163 const char* const CodeEntry::kEmptyNamePrefix = "";
164 const char* const CodeEntry::kEmptyResourceName = "";
165 const char* const CodeEntry::kEmptyBailoutReason = "";
166 
167 
169  delete no_frame_ranges_;
170 }
171 
172 
173 uint32_t CodeEntry::GetCallUid() const {
174  uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
175  if (shared_id_ != 0) {
176  hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
177  v8::internal::kZeroHashSeed);
178  } else {
179  hash ^= ComputeIntegerHash(
180  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
181  v8::internal::kZeroHashSeed);
182  hash ^= ComputeIntegerHash(
183  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
184  v8::internal::kZeroHashSeed);
185  hash ^= ComputeIntegerHash(
186  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
187  v8::internal::kZeroHashSeed);
188  hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
189  }
190  return hash;
191 }
192 
193 
194 bool CodeEntry::IsSameAs(CodeEntry* entry) const {
195  return this == entry
196  || (tag_ == entry->tag_
197  && shared_id_ == entry->shared_id_
198  && (shared_id_ != 0
199  || (name_prefix_ == entry->name_prefix_
200  && name_ == entry->name_
201  && resource_name_ == entry->resource_name_
202  && line_number_ == entry->line_number_)));
203 }
204 
205 
207  tag_ = Logger::BUILTIN_TAG;
208  builtin_id_ = id;
209 }
210 
211 
213  HashMap::Entry* map_entry =
214  children_.Lookup(entry, CodeEntryHash(entry), false);
215  return map_entry != NULL ?
216  reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
217 }
218 
219 
221  HashMap::Entry* map_entry =
222  children_.Lookup(entry, CodeEntryHash(entry), true);
223  if (map_entry->value == NULL) {
224  // New node added.
225  ProfileNode* new_node = new ProfileNode(tree_, entry);
226  map_entry->value = new_node;
227  children_list_.Add(new_node);
228  }
229  return reinterpret_cast<ProfileNode*>(map_entry->value);
230 }
231 
232 
233 void ProfileNode::Print(int indent) {
234  OS::Print("%5u %*c %s%s %d #%d %s",
235  self_ticks_,
236  indent, ' ',
237  entry_->name_prefix(),
238  entry_->name(),
239  entry_->script_id(),
240  id(),
241  entry_->bailout_reason());
242  if (entry_->resource_name()[0] != '\0')
243  OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
244  OS::Print("\n");
245  for (HashMap::Entry* p = children_.Start();
246  p != NULL;
247  p = children_.Next(p)) {
248  reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
249  }
250 }
251 
252 
254  public:
256 
258  delete node;
259  }
260 
262 };
263 
264 
266  : root_entry_(Logger::FUNCTION_TAG, "(root)"),
267  next_node_id_(1),
268  root_(new ProfileNode(this, &root_entry_)) {
269 }
270 
271 
274  TraverseDepthFirst(&cb);
275 }
276 
277 
279  ProfileNode* node = root_;
280  for (CodeEntry** entry = path.start() + path.length() - 1;
281  entry != path.start() - 1;
282  --entry) {
283  if (*entry != NULL) {
284  node = node->FindOrAddChild(*entry);
285  }
286  }
287  node->IncrementSelfTicks();
288  return node;
289 }
290 
291 
293  ProfileNode* node = root_;
294  for (CodeEntry** entry = path.start();
295  entry != path.start() + path.length();
296  ++entry) {
297  if (*entry != NULL) {
298  node = node->FindOrAddChild(*entry);
299  }
300  }
301  node->IncrementSelfTicks();
302 }
303 
304 
305 struct NodesPair {
307  : src(src), dst(dst) { }
310 };
311 
312 
313 class Position {
314  public:
316  : node(node), child_idx_(0) { }
317  INLINE(ProfileNode* current_child()) {
318  return node->children()->at(child_idx_);
319  }
320  INLINE(bool has_current_child()) {
321  return child_idx_ < node->children()->length();
322  }
323  INLINE(void next_child()) { ++child_idx_; }
324 
326  private:
327  int child_idx_;
328 };
329 
330 
331 // Non-recursive implementation of a depth-first post-order tree traversal.
332 template <typename Callback>
333 void ProfileTree::TraverseDepthFirst(Callback* callback) {
334  List<Position> stack(10);
335  stack.Add(Position(root_));
336  while (stack.length() > 0) {
337  Position& current = stack.last();
338  if (current.has_current_child()) {
339  callback->BeforeTraversingChild(current.node, current.current_child());
340  stack.Add(Position(current.current_child()));
341  } else {
342  callback->AfterAllChildrenTraversed(current.node);
343  if (stack.length() > 1) {
344  Position& parent = stack[stack.length() - 2];
345  callback->AfterChildTraversed(parent.node, current.node);
346  parent.next_child();
347  }
348  // Remove child from the stack.
349  stack.RemoveLast();
350  }
351  }
352 }
353 
354 
355 CpuProfile::CpuProfile(const char* title, bool record_samples)
356  : title_(title),
357  record_samples_(record_samples),
358  start_time_(Time::NowFromSystemTime()) {
359  timer_.Start();
360 }
361 
362 
364  ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
365  if (record_samples_) samples_.Add(top_frame_node);
366 }
367 
368 
370  end_time_ = start_time_ + timer_.Elapsed();
371 }
372 
373 
375  OS::Print("[Top down]:\n");
376  top_down_.Print();
377 }
378 
379 
380 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
381 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
382 
383 
384 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
385  DeleteAllCoveredCode(addr, addr + size);
386  CodeTree::Locator locator;
387  tree_.Insert(addr, &locator);
388  locator.set_value(CodeEntryInfo(entry, size));
389 }
390 
391 
392 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
393  List<Address> to_delete;
394  Address addr = end - 1;
395  while (addr >= start) {
396  CodeTree::Locator locator;
397  if (!tree_.FindGreatestLessThan(addr, &locator)) break;
398  Address start2 = locator.key(), end2 = start2 + locator.value().size;
399  if (start2 < end && start < end2) to_delete.Add(start2);
400  addr = start2 - 1;
401  }
402  for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
403 }
404 
405 
407  CodeTree::Locator locator;
408  if (tree_.FindGreatestLessThan(addr, &locator)) {
409  // locator.key() <= addr. Need to check that addr is within entry.
410  const CodeEntryInfo& entry = locator.value();
411  if (addr < (locator.key() + entry.size)) {
412  if (start) {
413  *start = locator.key();
414  }
415  return entry.entry;
416  }
417  }
418  return NULL;
419 }
420 
421 
423  CodeTree::Locator locator;
424  // For shared function entries, 'size' field is used to store their IDs.
425  if (tree_.Find(addr, &locator)) {
426  const CodeEntryInfo& entry = locator.value();
427  ASSERT(entry.entry == kSharedFunctionCodeEntry);
428  return entry.size;
429  } else {
430  tree_.Insert(addr, &locator);
431  int id = next_shared_id_++;
432  locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
433  return id;
434  }
435 }
436 
437 
439  if (from == to) return;
440  CodeTree::Locator locator;
441  if (!tree_.Find(from, &locator)) return;
442  CodeEntryInfo entry = locator.value();
443  tree_.Remove(from);
444  AddCode(to, entry.entry, entry.size);
445 }
446 
447 
448 void CodeMap::CodeTreePrinter::Call(
449  const Address& key, const CodeMap::CodeEntryInfo& value) {
450  // For shared function entries, 'size' field is used to store their IDs.
451  if (value.entry == kSharedFunctionCodeEntry) {
452  OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
453  } else {
454  OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
455  }
456 }
457 
458 
460  CodeTreePrinter printer;
461  tree_.ForEach(&printer);
462 }
463 
464 
466  : function_and_resource_names_(heap),
467  current_profiles_semaphore_(1) {
468 }
469 
470 
471 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
472  delete *entry_ptr;
473 }
474 
475 
476 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
477  delete *profile_ptr;
478 }
479 
480 
482  finished_profiles_.Iterate(DeleteCpuProfile);
483  current_profiles_.Iterate(DeleteCpuProfile);
484  code_entries_.Iterate(DeleteCodeEntry);
485 }
486 
487 
489  bool record_samples) {
490  current_profiles_semaphore_.Wait();
491  if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
492  current_profiles_semaphore_.Signal();
493  return false;
494  }
495  for (int i = 0; i < current_profiles_.length(); ++i) {
496  if (strcmp(current_profiles_[i]->title(), title) == 0) {
497  // Ignore attempts to start profile with the same title.
498  current_profiles_semaphore_.Signal();
499  return false;
500  }
501  }
502  current_profiles_.Add(new CpuProfile(title, record_samples));
503  current_profiles_semaphore_.Signal();
504  return true;
505 }
506 
507 
509  const int title_len = StrLength(title);
510  CpuProfile* profile = NULL;
511  current_profiles_semaphore_.Wait();
512  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
513  if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
514  profile = current_profiles_.Remove(i);
515  break;
516  }
517  }
518  current_profiles_semaphore_.Signal();
519 
520  if (profile == NULL) return NULL;
522  finished_profiles_.Add(profile);
523  return profile;
524 }
525 
526 
527 bool CpuProfilesCollection::IsLastProfile(const char* title) {
528  // Called from VM thread, and only it can mutate the list,
529  // so no locking is needed here.
530  if (current_profiles_.length() != 1) return false;
531  return StrLength(title) == 0
532  || strcmp(current_profiles_[0]->title(), title) == 0;
533 }
534 
535 
537  // Called from VM thread for a completed profile.
538  for (int i = 0; i < finished_profiles_.length(); i++) {
539  if (profile == finished_profiles_[i]) {
540  finished_profiles_.Remove(i);
541  return;
542  }
543  }
544  UNREACHABLE();
545 }
546 
547 
549  const Vector<CodeEntry*>& path) {
550  // As starting / stopping profiles is rare relatively to this
551  // method, we don't bother minimizing the duration of lock holding,
552  // e.g. copying contents of the list to a local vector.
553  current_profiles_semaphore_.Wait();
554  for (int i = 0; i < current_profiles_.length(); ++i) {
555  current_profiles_[i]->AddPath(path);
556  }
557  current_profiles_semaphore_.Signal();
558 }
559 
560 
563  const char* name,
564  const char* name_prefix,
565  const char* resource_name,
566  int line_number,
567  int column_number) {
568  CodeEntry* code_entry = new CodeEntry(tag,
569  name,
570  name_prefix,
571  resource_name,
572  line_number,
573  column_number);
574  code_entries_.Add(code_entry);
575  return code_entry;
576 }
577 
578 
580  "(anonymous function)";
581 const char* const ProfileGenerator::kProgramEntryName =
582  "(program)";
583 const char* const ProfileGenerator::kIdleEntryName =
584  "(idle)";
586  "(garbage collector)";
588  "(unresolved function)";
589 
590 
592  : profiles_(profiles),
593  program_entry_(
594  profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
595  idle_entry_(
596  profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
597  gc_entry_(
598  profiles->NewCodeEntry(Logger::BUILTIN_TAG,
599  kGarbageCollectorEntryName)),
600  unresolved_entry_(
601  profiles->NewCodeEntry(Logger::FUNCTION_TAG,
602  kUnresolvedFunctionName)) {
603 }
604 
605 
607  // Allocate space for stack frames + pc + function + vm-state.
608  ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
609  // As actual number of decoded code entries may vary, initialize
610  // entries vector with NULL values.
611  CodeEntry** entry = entries.start();
612  memset(entry, 0, entries.length() * sizeof(*entry));
613  if (sample.pc != NULL) {
614  if (sample.has_external_callback && sample.state == EXTERNAL &&
615  sample.top_frame_type == StackFrame::EXIT) {
616  // Don't use PC when in external callback code, as it can point
617  // inside callback's code, and we will erroneously report
618  // that a callback calls itself.
619  *entry++ = code_map_.FindEntry(sample.external_callback);
620  } else {
621  Address start;
622  CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
623  // If pc is in the function code before it set up stack frame or after the
624  // frame was destroyed SafeStackFrameIterator incorrectly thinks that
625  // ebp contains return address of the current function and skips caller's
626  // frame. Check for this case and just skip such samples.
627  if (pc_entry) {
628  List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
629  if (ranges) {
631  int pc_offset = static_cast<int>(
632  sample.pc - code->instruction_start());
633  for (int i = 0; i < ranges->length(); i++) {
634  OffsetRange& range = ranges->at(i);
635  if (range.from <= pc_offset && pc_offset < range.to) {
636  return;
637  }
638  }
639  }
640  *entry++ = pc_entry;
641 
642  if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
643  pc_entry->builtin_id() == Builtins::kFunctionApply) {
644  // When current function is FunctionCall or FunctionApply builtin the
645  // top frame is either frame of the calling JS function or internal
646  // frame. In the latter case we know the caller for sure but in the
647  // former case we don't so we simply replace the frame with
648  // 'unresolved' entry.
649  if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
650  *entry++ = unresolved_entry_;
651  }
652  }
653  }
654  }
655 
656  for (const Address* stack_pos = sample.stack,
657  *stack_end = stack_pos + sample.frames_count;
658  stack_pos != stack_end;
659  ++stack_pos) {
660  *entry++ = code_map_.FindEntry(*stack_pos);
661  }
662  }
663 
664  if (FLAG_prof_browser_mode) {
665  bool no_symbolized_entries = true;
666  for (CodeEntry** e = entries.start(); e != entry; ++e) {
667  if (*e != NULL) {
668  no_symbolized_entries = false;
669  break;
670  }
671  }
672  // If no frames were symbolized, put the VM state entry in.
673  if (no_symbolized_entries) {
674  *entry++ = EntryForVMState(sample.state);
675  }
676  }
677 
678  profiles_->AddPathToCurrentProfiles(entries);
679 }
680 
681 
682 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
683  switch (tag) {
684  case GC:
685  return gc_entry_;
686  case JS:
687  case COMPILER:
688  // DOM events handlers are reported as OTHER / EXTERNAL entries.
689  // To avoid confusing people, let's put all these entries into
690  // one bucket.
691  case OTHER:
692  case EXTERNAL:
693  return program_entry_;
694  case IDLE:
695  return idle_entry_;
696  default: return NULL;
697  }
698 }
699 
700 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const char *const kEmptyBailoutReason
bool Find(const Key &key, Locator *locator)
const char * bailout_reason() const
bool Insert(const Key &key, Locator *locator)
static int VSNPrintF(Vector< char > str, const char *format, va_list args)
uint32_t GetCallUid() const
static String * cast(Object *obj)
int line_number() const
ProfileGenerator(CpuProfilesCollection *profiles)
static const char *const kUnresolvedFunctionName
TickSample * sample
NodesPair(ProfileNode *src, ProfileNode *dst)
Address external_callback
Definition: sampler.h:68
void ForEach(Callback *callback)
T & at(int i) const
Definition: list.h:90
const char * GetName(Name *name)
#define ASSERT(condition)
Definition: checks.h:329
const char * GetFormatted(const char *format,...)
int script_id() const
~CodeEntry()
List< OffsetRange > * no_frame_ranges() const
void SetBuiltinId(Builtins::Name id)
const List< ProfileNode * > * children() const
static Code * cast(Object *obj)
void AfterChildTraversed(ProfileNode *, ProfileNode *)
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
Definition: objects.cc:8272
void BeforeTraversingChild(ProfileNode *, ProfileNode *)
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
T * start() const
Definition: utils.h:426
T & last() const
Definition: list.h:91
Position(ProfileNode *node)
void RecordTickSample(const TickSample &sample)
const char * GetFunctionName(Name *name)
int GetSharedId(Address addr)
ProfileNode * FindChild(CodeEntry *entry)
byte * instruction_start()
Definition: objects-inl.h:5857
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:131
void MoveCode(Address from, Address to)
static const char *const kProgramEntryName
CpuProfile(const char *title, bool record_samples)
int length() const
Definition: utils.h:420
const char * resource_name() const
bool StartProfiling(const char *title, bool record_samples)
StackFrame::Type top_frame_type
Definition: sampler.h:74
bool IsSameAs(CodeEntry *entry) const
Address stack[kMaxFramesCount]
Definition: sampler.h:71
static Vector< T > New(int length)
Definition: utils.h:406
void RemoveProfile(CpuProfile *profile)
int StrLength(const char *string)
Definition: utils.h:253
static void Print(const char *format,...)
const char * name() const
static const char *const kGarbageCollectorEntryName
INLINE(bool has_current_child())
void AddPathToCurrentProfiles(const Vector< CodeEntry * > &path)
static const char *const kIdleEntryName
Builtins::Name builtin_id() const
ProfileNode(ProfileTree *tree, CodeEntry *entry)
void AddPath(const Vector< CodeEntry * > &path)
void AddCode(Address addr, CodeEntry *entry, unsigned size)
INLINE(void next_child())
void AddPathFromStart(const Vector< CodeEntry * > &path)
static uint32_t HashSequentialString(const schar *chars, int length, uint32_t seed)
Definition: objects-inl.h:6258
bool Remove(const Key &key)
static const char *const kAnonymousFunctionName
uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed)
Definition: utils.h:322
bool FindGreatestLessThan(const Key &key, Locator *locator)
INLINE(ProfileNode *current_child())
static const char *const kEmptyResourceName
const char * GetCopy(const char *src)
void AfterAllChildrenTraversed(ProfileNode *node)
static const char *const kEmptyNamePrefix
uint32_t capacity() const
Definition: hashmap.h:88
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
static void StrNCpy(Vector< char > dest, const char *src, size_t n)
CodeEntry * NewCodeEntry(Logger::LogEventsAndTags tag, const char *name, const char *name_prefix=CodeEntry::kEmptyNamePrefix, const char *resource_name=CodeEntry::kEmptyResourceName, int line_number=v8::CpuProfileNode::kNoLineNumberInfo, int column_number=v8::CpuProfileNode::kNoColumnNumberInfo)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
ProfileNode * AddPathFromEnd(const Vector< CodeEntry * > &path)
CpuProfile * StopProfiling(const char *title)
const char * GetVFormatted(const char *format, va_list args)
CodeEntry * FindEntry(Address addr, Address *start=NULL)
void DeleteArray(T *array)
Definition: allocation.h:91
T Min(T a, T b)
Definition: utils.h:234
ProfileNode * FindOrAddChild(CodeEntry *entry)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
Entry * Next(Entry *p) const
Definition: hashmap.h:243
const char * name_prefix() const