v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
profile-generator.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_PROFILE_GENERATOR_H_
29 #define V8_PROFILE_GENERATOR_H_
30 
31 #include "allocation.h"
32 #include "hashmap.h"
33 #include "../include/v8-profiler.h"
34 
35 namespace v8 {
36 namespace internal {
37 
39  public:
42  int GetTokenId(Object* token);
43 
44  static const int kNoSecurityToken = -1;
45  static const int kInheritsSecurityToken = -2;
46 
47  private:
48  static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
49  void* parameter);
50  void TokenRemoved(Object** token_location);
51 
52  List<Object**> token_locations_;
53  List<bool> token_removed_;
54 
55  friend class TokenEnumeratorTester;
56 
58 };
59 
60 
61 // Provides a storage of strings allocated in C++ heap, to hold them
62 // forever, even if they disappear from JS heap or external storage.
64  public:
67 
68  const char* GetCopy(const char* src);
69  const char* GetFormatted(const char* format, ...);
70  const char* GetVFormatted(const char* format, va_list args);
71  const char* GetName(String* name);
72  const char* GetName(int index);
73  inline const char* GetFunctionName(String* name);
74  inline const char* GetFunctionName(const char* name);
75  size_t GetUsedMemorySize() const;
76 
77  private:
78  static const int kMaxNameSize = 1024;
79 
80  INLINE(static bool StringsMatch(void* key1, void* key2)) {
81  return strcmp(reinterpret_cast<char*>(key1),
82  reinterpret_cast<char*>(key2)) == 0;
83  }
84  const char* AddOrDisposeString(char* str, uint32_t hash);
85 
86  // Mapping of strings by String::Hash to const char* strings.
87  HashMap names_;
88 
89  DISALLOW_COPY_AND_ASSIGN(StringsStorage);
90 };
91 
92 
93 class CodeEntry {
94  public:
95  // CodeEntry doesn't own name strings, just references them.
97  const char* name_prefix,
98  const char* name,
99  const char* resource_name,
100  int line_number,
101  int security_token_id));
102 
103  INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
104  INLINE(const char* name_prefix() const) { return name_prefix_; }
105  INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
106  INLINE(const char* name() const) { return name_; }
107  INLINE(const char* resource_name() const) { return resource_name_; }
108  INLINE(int line_number() const) { return line_number_; }
109  INLINE(int shared_id() const) { return shared_id_; }
110  INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
111  INLINE(int security_token_id() const) { return security_token_id_; }
112 
113  INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
114 
115  void CopyData(const CodeEntry& source);
116  uint32_t GetCallUid() const;
117  bool IsSameAs(CodeEntry* entry) const;
118 
119  static const char* const kEmptyNamePrefix;
120 
121  private:
123  const char* name_prefix_;
124  const char* name_;
125  const char* resource_name_;
126  int line_number_;
127  int shared_id_;
128  int security_token_id_;
129 
130  DISALLOW_COPY_AND_ASSIGN(CodeEntry);
131 };
132 
133 
134 class ProfileTree;
135 
136 class ProfileNode {
137  public:
138  INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
139 
142  INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
143  INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
144  INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
145 
146  INLINE(CodeEntry* entry() const) { return entry_; }
147  INLINE(unsigned self_ticks() const) { return self_ticks_; }
148  INLINE(unsigned total_ticks() const) { return total_ticks_; }
149  INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
150  double GetSelfMillis() const;
151  double GetTotalMillis() const;
152 
153  void Print(int indent);
154 
155  private:
156  INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
157  return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
158  reinterpret_cast<CodeEntry*>(entry2));
159  }
160 
161  INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
162  return entry->GetCallUid();
163  }
164 
165  ProfileTree* tree_;
166  CodeEntry* entry_;
167  unsigned total_ticks_;
168  unsigned self_ticks_;
169  // Mapping from CodeEntry* to ProfileNode*
170  HashMap children_;
171  List<ProfileNode*> children_list_;
172 
173  DISALLOW_COPY_AND_ASSIGN(ProfileNode);
174 };
175 
176 
177 class ProfileTree {
178  public:
179  ProfileTree();
180  ~ProfileTree();
181 
182  void AddPathFromEnd(const Vector<CodeEntry*>& path);
183  void AddPathFromStart(const Vector<CodeEntry*>& path);
184  void CalculateTotalTicks();
185  void FilteredClone(ProfileTree* src, int security_token_id);
186 
187  double TicksToMillis(unsigned ticks) const {
188  return ticks * ms_to_ticks_scale_;
189  }
190  ProfileNode* root() const { return root_; }
191  void SetTickRatePerMs(double ticks_per_ms);
192 
193  void ShortPrint();
194  void Print() {
195  root_->Print(0);
196  }
197 
198  private:
199  template <typename Callback>
200  void TraverseDepthFirst(Callback* callback);
201 
202  CodeEntry root_entry_;
203  ProfileNode* root_;
204  double ms_to_ticks_scale_;
205 
206  DISALLOW_COPY_AND_ASSIGN(ProfileTree);
207 };
208 
209 
210 class CpuProfile {
211  public:
212  CpuProfile(const char* title, unsigned uid)
213  : title_(title), uid_(uid) { }
214 
215  // Add pc -> ... -> main() call path to the profile.
216  void AddPath(const Vector<CodeEntry*>& path);
217  void CalculateTotalTicks();
218  void SetActualSamplingRate(double actual_sampling_rate);
219  CpuProfile* FilteredClone(int security_token_id);
220 
221  INLINE(const char* title() const) { return title_; }
222  INLINE(unsigned uid() const) { return uid_; }
223  INLINE(const ProfileTree* top_down() const) { return &top_down_; }
224  INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
225 
226  void UpdateTicksScale();
227 
228  void ShortPrint();
229  void Print();
230 
231  private:
232  const char* title_;
233  unsigned uid_;
234  ProfileTree top_down_;
235  ProfileTree bottom_up_;
236 
237  DISALLOW_COPY_AND_ASSIGN(CpuProfile);
238 };
239 
240 
241 class CodeMap {
242  public:
243  CodeMap() : next_shared_id_(1) { }
244  void AddCode(Address addr, CodeEntry* entry, unsigned size);
245  void MoveCode(Address from, Address to);
246  CodeEntry* FindEntry(Address addr);
247  int GetSharedId(Address addr);
248 
249  void Print();
250 
251  private:
252  struct CodeEntryInfo {
253  CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
254  : entry(an_entry), size(a_size) { }
255  CodeEntry* entry;
256  unsigned size;
257  };
258 
259  struct CodeTreeConfig {
260  typedef Address Key;
261  typedef CodeEntryInfo Value;
262  static const Key kNoKey;
263  static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
264  static int Compare(const Key& a, const Key& b) {
265  return a < b ? -1 : (a > b ? 1 : 0);
266  }
267  };
268  typedef SplayTree<CodeTreeConfig> CodeTree;
269 
270  class CodeTreePrinter {
271  public:
272  void Call(const Address& key, const CodeEntryInfo& value);
273  };
274 
275  void DeleteAllCoveredCode(Address start, Address end);
276 
277  // Fake CodeEntry pointer to distinguish shared function entries.
278  static CodeEntry* const kSharedFunctionCodeEntry;
279 
280  CodeTree tree_;
281  int next_shared_id_;
282 
283  DISALLOW_COPY_AND_ASSIGN(CodeMap);
284 };
285 
286 
288  public:
291 
292  bool StartProfiling(const char* title, unsigned uid);
293  bool StartProfiling(String* title, unsigned uid);
294  CpuProfile* StopProfiling(int security_token_id,
295  const char* title,
296  double actual_sampling_rate);
297  List<CpuProfile*>* Profiles(int security_token_id);
298  const char* GetName(String* name) {
299  return function_and_resource_names_.GetName(name);
300  }
301  const char* GetName(int args_count) {
302  return function_and_resource_names_.GetName(args_count);
303  }
304  CpuProfile* GetProfile(int security_token_id, unsigned uid);
305  bool IsLastProfile(const char* title);
306  void RemoveProfile(CpuProfile* profile);
307  bool HasDetachedProfiles() { return detached_profiles_.length() > 0; }
308 
310  String* name, String* resource_name, int line_number);
311  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
313  const char* name_prefix, String* name);
314  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
315  CodeEntry* NewCodeEntry(int security_token_id);
316 
317  // Called from profile generator thread.
319 
320  // Limits the number of profiles that can be simultaneously collected.
321  static const int kMaxSimultaneousProfiles = 100;
322 
323  private:
324  const char* GetFunctionName(String* name) {
325  return function_and_resource_names_.GetFunctionName(name);
326  }
327  const char* GetFunctionName(const char* name) {
328  return function_and_resource_names_.GetFunctionName(name);
329  }
330  int GetProfileIndex(unsigned uid);
331  List<CpuProfile*>* GetProfilesList(int security_token_id);
332  int TokenToIndex(int security_token_id);
333 
334  INLINE(static bool UidsMatch(void* key1, void* key2)) {
335  return key1 == key2;
336  }
337 
338  StringsStorage function_and_resource_names_;
339  List<CodeEntry*> code_entries_;
340  List<List<CpuProfile*>* > profiles_by_token_;
341  // Mapping from profiles' uids to indexes in the second nested list
342  // of profiles_by_token_.
343  HashMap profiles_uids_;
344  List<CpuProfile*> detached_profiles_;
345 
346  // Accessed by VM thread and profile generator thread.
347  List<CpuProfile*> current_profiles_;
348  Semaphore* current_profiles_semaphore_;
349 
350  DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
351 };
352 
353 
355  public:
357  : result_(Logger::kSamplingIntervalMs * kResultScale),
358  ticks_per_ms_(Logger::kSamplingIntervalMs),
359  measurements_count_(0),
360  wall_time_query_countdown_(1) {
361  }
362 
363  double ticks_per_ms() {
364  return result_ / static_cast<double>(kResultScale);
365  }
366  void Tick();
367  void UpdateMeasurements(double current_time);
368 
369  // Instead of querying current wall time each tick,
370  // we use this constant to control query intervals.
371  static const unsigned kWallTimeQueryIntervalMs = 100;
372 
373  private:
374  // As the result needs to be accessed from a different thread, we
375  // use type that guarantees atomic writes to memory. There should
376  // be <= 1000 ticks per second, thus storing a value of a 10 ** 5
377  // order should provide enough precision while keeping away from a
378  // potential overflow.
379  static const int kResultScale = 100000;
380 
381  AtomicWord result_;
382  // All other fields are accessed only from the sampler thread.
383  double ticks_per_ms_;
384  unsigned measurements_count_;
385  unsigned wall_time_query_countdown_;
386  double last_wall_time_;
387 
388  DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
389 };
390 
391 
393  public:
394  explicit ProfileGenerator(CpuProfilesCollection* profiles);
395 
397  String* name,
398  String* resource_name,
399  int line_number)) {
400  return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
401  }
402 
404  const char* name)) {
405  return profiles_->NewCodeEntry(tag, name);
406  }
407 
409  const char* name_prefix,
410  String* name)) {
411  return profiles_->NewCodeEntry(tag, name_prefix, name);
412  }
413 
415  int args_count)) {
416  return profiles_->NewCodeEntry(tag, args_count);
417  }
418 
419  INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
420  return profiles_->NewCodeEntry(security_token_id);
421  }
422 
423  void RecordTickSample(const TickSample& sample);
424 
425  INLINE(CodeMap* code_map()) { return &code_map_; }
426 
427  INLINE(void Tick()) { sample_rate_calc_.Tick(); }
428  INLINE(double actual_sampling_rate()) {
429  return sample_rate_calc_.ticks_per_ms();
430  }
431 
432  static const char* const kAnonymousFunctionName;
433  static const char* const kProgramEntryName;
434  static const char* const kGarbageCollectorEntryName;
435 
436  private:
437  INLINE(CodeEntry* EntryForVMState(StateTag tag));
438 
439  CpuProfilesCollection* profiles_;
440  CodeMap code_map_;
441  CodeEntry* program_entry_;
442  CodeEntry* gc_entry_;
443  SampleRateCalculator sample_rate_calc_;
444 
445  DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
446 };
447 
448 
449 class HeapEntry;
450 class HeapSnapshot;
451 
453  public:
454  enum Type {
462  };
463 
465  HeapGraphEdge(Type type, const char* name, int from, int to);
466  HeapGraphEdge(Type type, int index, int from, int to);
467  void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
468 
469  Type type() const { return static_cast<Type>(type_); }
470  int index() const {
471  ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
472  return index_;
473  }
474  const char* name() const {
475  ASSERT(type_ == kContextVariable
476  || type_ == kProperty
477  || type_ == kInternal
478  || type_ == kShortcut);
479  return name_;
480  }
481  INLINE(HeapEntry* from() const);
482  HeapEntry* to() const { return to_entry_; }
483 
484  private:
485  INLINE(HeapSnapshot* snapshot() const);
486 
487  unsigned type_ : 3;
488  int from_index_ : 29;
489  union {
490  // During entries population |to_index_| is used for storing the index,
491  // afterwards it is replaced with a pointer to the entry.
493  HeapEntry* to_entry_;
494  };
495  union {
496  int index_;
497  const char* name_;
498  };
499 };
500 
501 
502 // HeapEntry instances represent an entity from the heap (or a special
503 // virtual node, e.g. root).
504 class HeapEntry BASE_EMBEDDED {
505  public:
506  enum Type {
507  kHidden = v8::HeapGraphNode::kHidden,
517  };
518  static const int kNoEntry;
519 
520  HeapEntry() { }
521  HeapEntry(HeapSnapshot* snapshot,
522  Type type,
523  const char* name,
524  SnapshotObjectId id,
525  int self_size);
526 
527  HeapSnapshot* snapshot() { return snapshot_; }
528  Type type() { return static_cast<Type>(type_); }
529  const char* name() { return name_; }
530  void set_name(const char* name) { name_ = name; }
531  inline SnapshotObjectId id() { return id_; }
532  int self_size() { return self_size_; }
533  INLINE(int index() const);
534  int children_count() const { return children_count_; }
535  INLINE(int set_children_index(int index));
536  void add_child(HeapGraphEdge* edge) {
537  children_arr()[children_count_++] = edge;
538  }
540  return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
541 
542  void SetIndexedReference(
543  HeapGraphEdge::Type type, int index, HeapEntry* entry);
544  void SetNamedReference(
545  HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
546 
547  void Print(
548  const char* prefix, const char* edge_name, int max_depth, int indent);
549 
550  Handle<HeapObject> GetHeapObject();
551 
552  private:
553  INLINE(HeapGraphEdge** children_arr());
554  const char* TypeAsString();
555 
556  unsigned type_: 4;
557  int children_count_: 28;
558  int children_index_;
559  int self_size_;
560  SnapshotObjectId id_;
561  HeapSnapshot* snapshot_;
562  const char* name_;
563 };
564 
565 
566 class HeapSnapshotsCollection;
567 
568 // HeapSnapshot represents a single heap snapshot. It is stored in
569 // HeapSnapshotsCollection, which is also a factory for
570 // HeapSnapshots. All HeapSnapshots share strings copied from JS heap
571 // to be able to return them even if they were collected.
572 // HeapSnapshotGenerator fills in a HeapSnapshot.
574  public:
575  enum Type {
577  };
578 
580  Type type,
581  const char* title,
582  unsigned uid);
583  void Delete();
584 
585  HeapSnapshotsCollection* collection() { return collection_; }
586  Type type() { return type_; }
587  const char* title() { return title_; }
588  unsigned uid() { return uid_; }
589  size_t RawSnapshotSize() const;
590  HeapEntry* root() { return &entries_[root_index_]; }
591  HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
592  HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
593  HeapEntry* gc_subroot(int index) {
594  return &entries_[gc_subroot_indexes_[index]];
595  }
596  List<HeapEntry>& entries() { return entries_; }
597  List<HeapGraphEdge>& edges() { return edges_; }
598  List<HeapGraphEdge*>& children() { return children_; }
599  void RememberLastJSObjectId();
601  return max_snapshot_js_object_id_;
602  }
603 
604  HeapEntry* AddEntry(HeapEntry::Type type,
605  const char* name,
606  SnapshotObjectId id,
607  int size);
608  HeapEntry* AddRootEntry();
609  HeapEntry* AddGcRootsEntry();
610  HeapEntry* AddGcSubrootEntry(int tag);
611  HeapEntry* AddNativesRootEntry();
612  HeapEntry* GetEntryById(SnapshotObjectId id);
614  void FillChildren();
615 
616  void Print(int max_depth);
617  void PrintEntriesSize();
618 
619  private:
620  HeapSnapshotsCollection* collection_;
621  Type type_;
622  const char* title_;
623  unsigned uid_;
624  int root_index_;
625  int gc_roots_index_;
626  int natives_root_index_;
627  int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
628  List<HeapEntry> entries_;
629  List<HeapGraphEdge> edges_;
630  List<HeapGraphEdge*> children_;
631  List<HeapEntry*> sorted_entries_;
632  SnapshotObjectId max_snapshot_js_object_id_;
633 
634  friend class HeapSnapshotTester;
635 
637 };
638 
639 
641  public:
642  HeapObjectsMap();
643 
646  SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
647  void MoveObject(Address from, Address to);
649  return next_id_ - kObjectIdStep;
650  }
651 
654  size_t GetUsedMemorySize() const;
655 
657  static inline SnapshotObjectId GetNthGcSubrootId(int delta);
658 
659  static const int kObjectIdStep = 2;
665 
666  private:
667  struct EntryInfo {
668  EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
669  : id(id), addr(addr), size(size), accessed(true) { }
670  EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
671  : id(id), addr(addr), size(size), accessed(accessed) { }
672  SnapshotObjectId id;
673  Address addr;
674  unsigned int size;
675  bool accessed;
676  };
677  struct TimeInterval {
678  explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
679  SnapshotObjectId id;
680  uint32_t size;
681  uint32_t count;
682  };
683 
684  void UpdateHeapObjectsMap();
685  void RemoveDeadEntries();
686 
687  static bool AddressesMatch(void* key1, void* key2) {
688  return key1 == key2;
689  }
690 
691  static uint32_t AddressHash(Address addr) {
692  return ComputeIntegerHash(
693  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
694  v8::internal::kZeroHashSeed);
695  }
696 
697  SnapshotObjectId next_id_;
698  HashMap entries_map_;
699  List<EntryInfo> entries_;
700  List<TimeInterval> time_intervals_;
701 
702  DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
703 };
704 
705 
707  public:
710 
711  bool is_tracking_objects() { return is_tracking_objects_; }
713  return ids_.PushHeapObjectsStats(stream);
714  }
715  void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
717 
719  HeapSnapshot::Type type, const char* name, unsigned uid);
721  List<HeapSnapshot*>* snapshots() { return &snapshots_; }
722  HeapSnapshot* GetSnapshot(unsigned uid);
724 
725  StringsStorage* names() { return &names_; }
726  TokenEnumerator* token_enumerator() { return token_enumerator_; }
727 
729  return ids_.FindEntry(object_addr);
730  }
731  SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
732  return ids_.FindOrAddEntry(object_addr, object_size);
733  }
735  void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
737  return ids_.last_assigned_id();
738  }
739  size_t GetUsedMemorySize() const;
740 
741  private:
742  INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
743  return key1 == key2;
744  }
745 
746  bool is_tracking_objects_; // Whether tracking object moves is needed.
747  List<HeapSnapshot*> snapshots_;
748  // Mapping from snapshots' uids to HeapSnapshot* pointers.
749  HashMap snapshots_uids_;
750  StringsStorage names_;
751  TokenEnumerator* token_enumerator_;
752  // Mapping from HeapObject addresses to objects' uids.
753  HeapObjectsMap ids_;
754 
755  DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
756 };
757 
758 
759 // A typedef for referencing anything that can be snapshotted living
760 // in any kind of heap memory.
761 typedef void* HeapThing;
762 
763 
764 // An interface that creates HeapEntries by HeapThings.
766  public:
767  virtual ~HeapEntriesAllocator() { }
768  virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
769 };
770 
771 
772 // The HeapEntriesMap instance is used to track a mapping between
773 // real heap objects and their representations in heap snapshots.
775  public:
776  HeapEntriesMap();
777 
778  int Map(HeapThing thing);
779  void Pair(HeapThing thing, int entry);
780 
781  private:
782  static uint32_t Hash(HeapThing thing) {
783  return ComputeIntegerHash(
784  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
785  v8::internal::kZeroHashSeed);
786  }
787  static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
788  return key1 == key2;
789  }
790 
791  HashMap entries_;
792 
793  friend class HeapObjectsSet;
794 
796 };
797 
798 
800  public:
801  HeapObjectsSet();
802  void Clear();
803  bool Contains(Object* object);
804  void Insert(Object* obj);
805  const char* GetTag(Object* obj);
806  void SetTag(Object* obj, const char* tag);
807  bool is_empty() const { return entries_.occupancy() == 0; }
808 
809  private:
810  HashMap entries_;
811 
812  DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
813 };
814 
815 
816 // An interface used to populate a snapshot with nodes and edges.
818  public:
820  virtual HeapEntry* AddEntry(HeapThing ptr,
821  HeapEntriesAllocator* allocator) = 0;
822  virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
823  virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
824  HeapEntriesAllocator* allocator) = 0;
825  virtual void SetIndexedReference(HeapGraphEdge::Type type,
826  int parent_entry,
827  int index,
828  HeapEntry* child_entry) = 0;
830  int parent_entry,
831  HeapEntry* child_entry) = 0;
832  virtual void SetNamedReference(HeapGraphEdge::Type type,
833  int parent_entry,
834  const char* reference_name,
835  HeapEntry* child_entry) = 0;
837  int parent_entry,
838  HeapEntry* child_entry) = 0;
839 };
840 
841 
843  public:
845  virtual void ProgressStep() = 0;
846  virtual bool ProgressReport(bool force) = 0;
847 };
848 
849 
850 // An implementation of V8 heap graph extractor.
852  public:
855  virtual ~V8HeapExplorer();
856  virtual HeapEntry* AllocateEntry(HeapThing ptr);
858  int EstimateObjectsCount(HeapIterator* iterator);
860  void TagGlobalObjects();
861 
862  static String* GetConstructorName(JSObject* object);
863 
865 
866  private:
867  HeapEntry* AddEntry(HeapObject* object);
868  HeapEntry* AddEntry(HeapObject* object,
869  HeapEntry::Type type,
870  const char* name);
871  const char* GetSystemEntryName(HeapObject* object);
872 
873  void ExtractReferences(HeapObject* obj);
874  void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
875  void ExtractJSObjectReferences(int entry, JSObject* js_obj);
876  void ExtractStringReferences(int entry, String* obj);
877  void ExtractContextReferences(int entry, Context* context);
878  void ExtractMapReferences(int entry, Map* map);
879  void ExtractSharedFunctionInfoReferences(int entry,
880  SharedFunctionInfo* shared);
881  void ExtractScriptReferences(int entry, Script* script);
882  void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
883  void ExtractCodeReferences(int entry, Code* code);
884  void ExtractJSGlobalPropertyCellReferences(int entry,
885  JSGlobalPropertyCell* cell);
886  void ExtractClosureReferences(JSObject* js_obj, int entry);
887  void ExtractPropertyReferences(JSObject* js_obj, int entry);
888  void ExtractElementReferences(JSObject* js_obj, int entry);
889  void ExtractInternalReferences(JSObject* js_obj, int entry);
890  bool IsEssentialObject(Object* object);
891  void SetClosureReference(HeapObject* parent_obj,
892  int parent,
893  String* reference_name,
894  Object* child);
895  void SetNativeBindReference(HeapObject* parent_obj,
896  int parent,
897  const char* reference_name,
898  Object* child);
899  void SetElementReference(HeapObject* parent_obj,
900  int parent,
901  int index,
902  Object* child);
903  void SetInternalReference(HeapObject* parent_obj,
904  int parent,
905  const char* reference_name,
906  Object* child,
907  int field_offset = -1);
908  void SetInternalReference(HeapObject* parent_obj,
909  int parent,
910  int index,
911  Object* child,
912  int field_offset = -1);
913  void SetHiddenReference(HeapObject* parent_obj,
914  int parent,
915  int index,
916  Object* child);
917  void SetWeakReference(HeapObject* parent_obj,
918  int parent,
919  int index,
920  Object* child_obj,
921  int field_offset);
922  void SetPropertyReference(HeapObject* parent_obj,
923  int parent,
924  String* reference_name,
925  Object* child,
926  const char* name_format_string = NULL,
927  int field_offset = -1);
928  void SetUserGlobalReference(Object* user_global);
929  void SetRootGcRootsReference();
930  void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
931  void SetGcSubrootReference(
932  VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
933  const char* GetStrongGcSubrootName(Object* object);
934  void TagObject(Object* obj, const char* tag);
935 
936  HeapEntry* GetEntry(Object* obj);
937 
938  static inline HeapObject* GetNthGcSubrootObject(int delta);
939  static inline int GetGcSubrootOrder(HeapObject* subroot);
940 
941  Heap* heap_;
942  HeapSnapshot* snapshot_;
943  HeapSnapshotsCollection* collection_;
945  SnapshotFillerInterface* filler_;
946  HeapObjectsSet objects_tags_;
947  HeapObjectsSet strong_gc_subroot_names_;
948 
949  static HeapObject* const kGcRootsObject;
950  static HeapObject* const kFirstGcSubrootObject;
951  static HeapObject* const kLastGcSubrootObject;
952 
954  friend class GcSubrootsEnumerator;
956 
958 };
959 
960 
962 
963 
964 // An implementation of retained native objects extractor.
966  public:
969  virtual ~NativeObjectsExplorer();
971  int EstimateObjectsCount();
973 
974  private:
975  void FillRetainedObjects();
976  void FillImplicitReferences();
977  List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
978  void SetNativeRootReference(v8::RetainedObjectInfo* info);
979  void SetRootNativeRootsReference();
980  void SetWrapperNativeReferences(HeapObject* wrapper,
981  v8::RetainedObjectInfo* info);
982  void VisitSubtreeWrapper(Object** p, uint16_t class_id);
983 
984  static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
985  return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
986  v8::internal::kZeroHashSeed);
987  }
988  static bool RetainedInfosMatch(void* key1, void* key2) {
989  return key1 == key2 ||
990  (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
991  reinterpret_cast<v8::RetainedObjectInfo*>(key2));
992  }
993  INLINE(static bool StringsMatch(void* key1, void* key2)) {
994  return strcmp(reinterpret_cast<char*>(key1),
995  reinterpret_cast<char*>(key2)) == 0;
996  }
997 
998  NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
999 
1000  HeapSnapshot* snapshot_;
1001  HeapSnapshotsCollection* collection_;
1003  bool embedder_queried_;
1004  HeapObjectsSet in_groups_;
1005  // RetainedObjectInfo* -> List<HeapObject*>*
1006  HashMap objects_by_info_;
1007  HashMap native_groups_;
1008  HeapEntriesAllocator* synthetic_entries_allocator_;
1009  HeapEntriesAllocator* native_entries_allocator_;
1010  // Used during references extraction.
1011  SnapshotFillerInterface* filler_;
1012 
1013  static HeapThing const kNativesRootObject;
1014 
1016 
1018 };
1019 
1020 
1022  public:
1024  v8::ActivityControl* control);
1025  bool GenerateSnapshot();
1026 
1027  private:
1028  bool FillReferences();
1029  void ProgressStep();
1030  bool ProgressReport(bool force = false);
1031  void SetProgressTotal(int iterations_count);
1032 
1033  HeapSnapshot* snapshot_;
1034  v8::ActivityControl* control_;
1035  V8HeapExplorer v8_heap_explorer_;
1036  NativeObjectsExplorer dom_explorer_;
1037  // Mapping from HeapThing pointers to HeapEntry* pointers.
1038  HeapEntriesMap entries_;
1039  // Used during snapshot generation.
1040  int progress_counter_;
1041  int progress_total_;
1042 
1043  DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
1044 };
1045 
1046 class OutputStreamWriter;
1047 
1049  public:
1051  : snapshot_(snapshot),
1052  strings_(ObjectsMatch),
1053  next_node_id_(1),
1054  next_string_id_(1),
1055  writer_(NULL) {
1056  }
1057  void Serialize(v8::OutputStream* stream);
1058 
1059  private:
1060  INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
1061  return key1 == key2;
1062  }
1063 
1064  INLINE(static uint32_t ObjectHash(const void* key)) {
1065  return ComputeIntegerHash(
1066  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
1067  v8::internal::kZeroHashSeed);
1068  }
1069 
1070  HeapSnapshot* CreateFakeSnapshot();
1071  int GetStringId(const char* s);
1072  int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
1073  void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
1074  void SerializeEdges();
1075  void SerializeImpl();
1076  void SerializeNode(HeapEntry* entry);
1077  void SerializeNodes();
1078  void SerializeSnapshot();
1079  void SerializeString(const unsigned char* s);
1080  void SerializeStrings();
1081  void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
1082 
1083  static const int kEdgeFieldsCount;
1084  static const int kNodeFieldsCount;
1085 
1086  HeapSnapshot* snapshot_;
1087  HashMap strings_;
1088  int next_node_id_;
1089  int next_string_id_;
1090  OutputStreamWriter* writer_;
1091 
1094 
1096 };
1097 
1098 } } // namespace v8::internal
1099 
1100 #endif // V8_PROFILE_GENERATOR_H_
byte * Address
Definition: globals.h:157
virtual HeapEntry * AllocateEntry(HeapThing ptr)=0
static const SnapshotObjectId kGcRootsFirstSubrootId
SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size)
void RemoveSnapshot(HeapSnapshot *snapshot)
static const int kInheritsSecurityToken
Handle< HeapObject > FindHeapObjectById(SnapshotObjectId id)
virtual HeapEntry * AllocateEntry(HeapThing ptr)
void SetTickRatePerMs(double ticks_per_ms)
virtual intptr_t GetHash()=0
uint32_t GetCallUid() const
INLINE(CodeEntry *NewCodeEntry(Logger::LogEventsAndTags tag, const char *name))
ProfileNode * root() const
INLINE(CodeEntry *NewCodeEntry(Logger::LogEventsAndTags tag, const char *name_prefix, String *name))
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the snapshot(mksnapshot only)") DEFINE_bool(help
ProfileGenerator(CpuProfilesCollection *profiles)
INLINE(double actual_sampling_rate())
static SnapshotObjectId GetNthGcSubrootId(int delta)
CpuProfile * GetProfile(int security_token_id, unsigned uid)
static const SnapshotObjectId kNativesRootObjectId
INLINE(const char *title() const)
INLINE(void IncreaseTotalTicks(unsigned amount))
bool IterateAndExtractReferences(SnapshotFillerInterface *filler)
CodeEntry * NewCodeEntry(Logger::LogEventsAndTags tag, String *name, String *resource_name, int line_number)
INLINE(unsigned total_ticks() const)
INLINE(bool is_js_function() const)
void FilteredClone(ProfileTree *src, int security_token_id)
TickSample * sample
List< HeapSnapshot * > * snapshots()
const char * name() const
virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type, int parent_entry, HeapEntry *child_entry)=0
#define ASSERT(condition)
Definition: checks.h:270
v8::Handle< v8::Value > Print(const v8::Arguments &args)
const char * GetFormatted(const char *format,...)
SnapshotObjectId PushHeapObjectsStats(OutputStream *stream)
unsigned short uint16_t
Definition: unicode.cc:46
virtual HeapEntry * AddEntry(HeapThing ptr, HeapEntriesAllocator *allocator)=0
INLINE(CodeEntry(Logger::LogEventsAndTags tag, const char *name_prefix, const char *name, const char *resource_name, int line_number, int security_token_id))
void set_name(const char *name)
void SetTag(Object *obj, const char *tag)
INLINE(const char *name() const)
void SetActualSamplingRate(double actual_sampling_rate)
bool IterateAndExtractReferences(SnapshotFillerInterface *filler)
const char * GetName(int args_count)
INLINE(void set_shared_id(int shared_id))
HeapEntry * AddNativesRootEntry()
virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, int parent_entry, HeapEntry *child_entry)=0
INLINE(unsigned self_ticks() const)
static const SnapshotObjectId kGcRootsObjectId
static SnapshotObjectId GenerateId(v8::RetainedObjectInfo *info)
INLINE(CodeEntry *entry() const)
INLINE(CodeEntry *NewCodeEntry(Logger::LogEventsAndTags tag, String *name, String *resource_name, int line_number))
const char * GetName(String *name)
SnapshotObjectId last_assigned_id() const
INLINE(CodeEntry *NewCodeEntry(int security_token_id))
virtual void SetNamedReference(HeapGraphEdge::Type type, int parent_entry, const char *reference_name, HeapEntry *child_entry)=0
CodeEntry * FindEntry(Address addr)
NativeObjectsExplorer(HeapSnapshot *snapshot, SnapshottingProgressReportingInterface *progress)
List< HeapEntry > & entries()
void RecordTickSample(const TickSample &sample)
const char * GetTag(Object *obj)
int GetSharedId(Address addr)
SnapshotObjectId FindObjectId(Address object_addr)
static String * GetConstructorName(JSObject *object)
HeapEntry * gc_subroot(int index)
ProfileNode * FindChild(CodeEntry *entry)
HeapSnapshotsCollection * collection()
uint32_t occupancy() const
Definition: hashmap.h:83
int Compare(const T &a, const T &b)
Definition: utils.h:156
static HeapObject *const kInternalRootObject
INLINE(CodeEntry *NewCodeEntry(Logger::LogEventsAndTags tag, int args_count))
INLINE(int shared_id() const)
INLINE(const ProfileTree *bottom_up() const)
void MoveCode(Address from, Address to)
static const char *const kProgramEntryName
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
double TicksToMillis(unsigned ticks) const
static const SnapshotObjectId kFirstAvailableObjectId
List< HeapGraphEdge > & edges()
HeapSnapshotGenerator(HeapSnapshot *snapshot, v8::ActivityControl *control)
INLINE(unsigned uid() const)
SnapshotObjectId FindEntry(Address addr)
bool IsSameAs(CodeEntry *entry) const
void AddPathFromEnd(const Vector< CodeEntry * > &path)
virtual void SetIndexedReference(HeapGraphEdge::Type type, int parent_entry, int index, HeapEntry *child_entry)=0
V8HeapExplorer(HeapSnapshot *snapshot, SnapshottingProgressReportingInterface *progress)
INLINE(const char *name_prefix() const)
#define BASE_EMBEDDED
Definition: allocation.h:68
void RemoveProfile(CpuProfile *profile)
CpuProfile * FilteredClone(int security_token_id)
INLINE(int line_number() const)
SnapshotObjectId PushHeapObjectsStats(OutputStream *stream)
static const char *const kGarbageCollectorEntryName
void AddPathToCurrentProfiles(const Vector< CodeEntry * > &path)
HeapEntry * GetEntryById(SnapshotObjectId id)
virtual HeapEntry * FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator *allocator)=0
void AddRootEntries(SnapshotFillerInterface *filler)
void CopyData(const CodeEntry &source)
void UpdateMeasurements(double current_time)
static const unsigned kWallTimeQueryIntervalMs
void AddPath(const Vector< CodeEntry * > &path)
void AddCode(Address addr, CodeEntry *entry, unsigned size)
void Serialize(v8::OutputStream *stream)
void AddPathFromStart(const Vector< CodeEntry * > &path)
INLINE(int security_token_id() const)
HeapEntry * AddEntry(HeapEntry::Type type, const char *name, SnapshotObjectId id, int size)
static const char *const kAnonymousFunctionName
uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed)
Definition: utils.h:286
static const SnapshotObjectId kInternalRootObjectId
INLINE(bool has_name_prefix() const)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
List< HeapEntry * > * GetSortedEntriesList()
uint32_t SnapshotObjectId
Definition: v8-profiler.h:68
virtual HeapEntry * FindEntry(HeapThing ptr)=0
void SnapshotGenerationFinished(HeapSnapshot *snapshot)
const char * GetCopy(const char *src)
void Pair(HeapThing thing, int entry)
HeapEntry * AddGcSubrootEntry(int tag)
TemplateHashMapImpl< FreeStoreAllocationPolicy > HashMap
Definition: hashmap.h:113
SnapshotObjectId last_assigned_id() const
INLINE(const char *resource_name() const)
static const char *const kEmptyNamePrefix
void AddRootEntries(SnapshotFillerInterface *filler)
const char * GetFunctionName(String *name)
HeapSnapshot * NewSnapshot(HeapSnapshot::Type type, const char *name, unsigned uid)
bool StartProfiling(const char *title, unsigned uid)
HeapSnapshot * GetSnapshot(unsigned uid)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
List< HeapGraphEdge * > & children()
SnapshotObjectId GetObjectId(Address object_addr, int object_size)
INLINE(const List< ProfileNode * > *children() const)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
INLINE(ProfileNode(ProfileTree *tree, CodeEntry *entry))
int EstimateObjectsCount(HeapIterator *iterator)
const char * GetVFormatted(const char *format, va_list args)
CpuProfile * StopProfiling(int security_token_id, const char *title, double actual_sampling_rate)
INLINE(void IncrementSelfTicks())
SnapshotObjectId max_snapshot_js_object_id() const
const char * GetName(String *name)
void MoveObject(Address from, Address to)
ProfileNode * FindOrAddChild(CodeEntry *entry)
CpuProfile(const char *title, unsigned uid)
void add_child(HeapGraphEdge *edge)
HeapSnapshot(HeapSnapshotsCollection *collection, Type type, const char *title, unsigned uid)
INLINE(void IncreaseSelfTicks(unsigned amount))
INLINE(const ProfileTree *top_down() const)
Vector< HeapGraphEdge * > children()
HeapSnapshotJSONSerializer(HeapSnapshot *snapshot)
void ObjectMoveEvent(Address from, Address to)
List< CpuProfile * > * Profiles(int security_token_id)