44 bool StringsStorage::StringsMatch(
void* key1,
void* key2) {
45 return strcmp(reinterpret_cast<char*>(key1),
46 reinterpret_cast<char*>(key2)) == 0;
51 : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
56 for (HashMap::Entry* p = names_.
Start();
59 DeleteArray(reinterpret_cast<const char*>(p->value));
65 int len =
static_cast<int>(strlen(src));
66 HashMap::Entry* entry = GetEntry(src, len);
67 if (entry->value ==
NULL) {
71 entry->key = dst.
start();
72 entry->value = entry->key;
74 return reinterpret_cast<const char*
>(entry->value);
80 va_start(args, format);
87 const char* StringsStorage::AddOrDisposeString(
char* str,
int len) {
88 HashMap::Entry* entry = GetEntry(str, len);
89 if (entry->value ==
NULL) {
96 return reinterpret_cast<const char*
>(entry->value);
107 return AddOrDisposeString(str.
start(), len);
112 if (name->IsString()) {
114 int length =
Min(kMaxNameSize, str->
length());
115 int actual_length = 0;
119 return AddOrDisposeString(data.
Detach(), actual_length);
120 }
else if (name->IsSymbol()) {
133 return BeautifyFunctionName(
GetName(name));
138 return BeautifyFunctionName(
GetCopy(name));
142 const char* StringsStorage::BeautifyFunctionName(
const char*
name) {
148 size_t size =
sizeof(*this);
149 size +=
sizeof(HashMap::Entry) * names_.
capacity();
150 for (HashMap::Entry* p = names_.
Start(); p !=
NULL; p = names_.
Next(p)) {
151 size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
157 HashMap::Entry* StringsStorage::GetEntry(
const char* str,
int len) {
159 return names_.
Lookup(const_cast<char*>(str), hash,
true);
169 delete no_frame_ranges_;
175 if (shared_id_ != 0) {
177 v8::internal::kZeroHashSeed);
180 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
181 v8::internal::kZeroHashSeed);
183 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
184 v8::internal::kZeroHashSeed);
186 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
187 v8::internal::kZeroHashSeed);
196 || (tag_ == entry->tag_
197 && shared_id_ == entry->shared_id_
199 || (name_prefix_ == entry->name_prefix_
200 && name_ == entry->name_
201 && resource_name_ == entry->resource_name_
202 && line_number_ == entry->line_number_)));
207 tag_ = Logger::BUILTIN_TAG;
213 HashMap::Entry* map_entry =
214 children_.
Lookup(entry, CodeEntryHash(entry),
false);
215 return map_entry !=
NULL ?
221 HashMap::Entry* map_entry =
222 children_.
Lookup(entry, CodeEntryHash(entry),
true);
223 if (map_entry->value ==
NULL) {
226 map_entry->value = new_node;
227 children_list_.Add(new_node);
229 return reinterpret_cast<ProfileNode*
>(map_entry->value);
245 for (HashMap::Entry* p = children_.
Start();
247 p = children_.
Next(p)) {
266 : root_entry_(
Logger::FUNCTION_TAG,
"(root)"),
274 TraverseDepthFirst(&cb);
281 entry != path.
start() - 1;
283 if (*entry !=
NULL) {
297 if (*entry !=
NULL) {
307 : src(src), dst(dst) { }
316 : node(node), child_idx_(0) { }
323 INLINE(
void next_child()) { ++child_idx_; }
332 template <
typename Callback>
333 void ProfileTree::TraverseDepthFirst(Callback* callback) {
336 while (stack.length() > 0) {
338 if (current.has_current_child()) {
339 callback->BeforeTraversingChild(current.
node, current.current_child());
342 callback->AfterAllChildrenTraversed(current.
node);
343 if (stack.length() > 1) {
344 Position& parent = stack[stack.length() - 2];
345 callback->AfterChildTraversed(parent.
node, current.
node);
357 record_samples_(record_samples),
358 start_time_(Time::NowFromSystemTime()) {
365 if (record_samples_) samples_.Add(top_frame_node);
370 end_time_ = start_time_ + timer_.Elapsed();
381 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey =
NULL;
385 DeleteAllCoveredCode(addr, addr + size);
386 CodeTree::Locator locator;
387 tree_.
Insert(addr, &locator);
388 locator.set_value(CodeEntryInfo(entry, size));
395 while (addr >= start) {
396 CodeTree::Locator locator;
398 Address start2 = locator.key(), end2 = start2 + locator.value().size;
399 if (start2 < end && start < end2) to_delete.
Add(start2);
402 for (
int i = 0; i < to_delete.length(); ++i) tree_.
Remove(to_delete[i]);
407 CodeTree::Locator locator;
410 const CodeEntryInfo& entry = locator.value();
411 if (addr < (locator.key() + entry.size)) {
413 *start = locator.key();
423 CodeTree::Locator locator;
425 if (tree_.
Find(addr, &locator)) {
426 const CodeEntryInfo& entry = locator.value();
427 ASSERT(entry.entry == kSharedFunctionCodeEntry);
430 tree_.
Insert(addr, &locator);
431 int id = next_shared_id_++;
432 locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry,
id));
439 if (from == to)
return;
440 CodeTree::Locator locator;
441 if (!tree_.
Find(from, &locator))
return;
442 CodeEntryInfo entry = locator.value();
444 AddCode(to, entry.entry, entry.size);
448 void CodeMap::CodeTreePrinter::Call(
449 const Address& key,
const CodeMap::CodeEntryInfo& value) {
451 if (value.entry == kSharedFunctionCodeEntry) {
452 OS::Print(
"%p SharedFunctionInfo %d\n", key, value.size);
454 OS::Print(
"%p %5d %s\n", key, value.size, value.entry->name());
460 CodeTreePrinter printer;
466 : function_and_resource_names_(heap),
467 current_profiles_semaphore_(1) {
471 static void DeleteCodeEntry(
CodeEntry** entry_ptr) {
476 static void DeleteCpuProfile(
CpuProfile** profile_ptr) {
482 finished_profiles_.Iterate(DeleteCpuProfile);
483 current_profiles_.Iterate(DeleteCpuProfile);
484 code_entries_.Iterate(DeleteCodeEntry);
489 bool record_samples) {
490 current_profiles_semaphore_.Wait();
492 current_profiles_semaphore_.Signal();
495 for (
int i = 0; i < current_profiles_.length(); ++i) {
496 if (strcmp(current_profiles_[i]->title(), title) == 0) {
498 current_profiles_semaphore_.Signal();
502 current_profiles_.Add(
new CpuProfile(title, record_samples));
503 current_profiles_semaphore_.Signal();
511 current_profiles_semaphore_.Wait();
512 for (
int i = current_profiles_.length() - 1; i >= 0; --i) {
513 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
514 profile = current_profiles_.Remove(i);
518 current_profiles_semaphore_.Signal();
522 finished_profiles_.Add(profile);
530 if (current_profiles_.length() != 1)
return false;
532 || strcmp(current_profiles_[0]->title(), title) == 0;
538 for (
int i = 0; i < finished_profiles_.length(); i++) {
539 if (profile == finished_profiles_[i]) {
540 finished_profiles_.Remove(i);
553 current_profiles_semaphore_.Wait();
554 for (
int i = 0; i < current_profiles_.length(); ++i) {
555 current_profiles_[i]->AddPath(path);
557 current_profiles_semaphore_.Signal();
564 const char* name_prefix,
565 const char* resource_name,
574 code_entries_.Add(code_entry);
580 "(anonymous function)";
586 "(garbage collector)";
588 "(unresolved function)";
592 : profiles_(profiles),
594 profiles->NewCodeEntry(
Logger::FUNCTION_TAG, kProgramEntryName)),
596 profiles->NewCodeEntry(
Logger::FUNCTION_TAG, kIdleEntryName)),
598 profiles->NewCodeEntry(
Logger::BUILTIN_TAG,
599 kGarbageCollectorEntryName)),
601 profiles->NewCodeEntry(
Logger::FUNCTION_TAG,
602 kUnresolvedFunctionName)) {
612 memset(entry, 0, entries.
length() *
sizeof(*entry));
631 int pc_offset =
static_cast<int>(
633 for (
int i = 0; i < ranges->length(); i++) {
635 if (range.
from <= pc_offset && pc_offset < range.
to) {
642 if (pc_entry->
builtin_id() == Builtins::kFunctionCall ||
643 pc_entry->
builtin_id() == Builtins::kFunctionApply) {
650 *entry++ = unresolved_entry_;
658 stack_pos != stack_end;
660 *entry++ = code_map_.
FindEntry(*stack_pos);
664 if (FLAG_prof_browser_mode) {
665 bool no_symbolized_entries =
true;
668 no_symbolized_entries =
false;
673 if (no_symbolized_entries) {
674 *entry++ = EntryForVMState(sample.
state);
693 return program_entry_;
696 default:
return NULL;
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const char *const kEmptyBailoutReason
size_t GetUsedMemorySize() const
bool IsLastProfile(const char *title)
bool Find(const Key &key, Locator *locator)
const char * bailout_reason() const
bool Insert(const Key &key, Locator *locator)
static int VSNPrintF(Vector< char > str, const char *format, va_list args)
uint32_t GetCallUid() const
static const int kMaxSimultaneousProfiles
static String * cast(Object *obj)
void CalculateTotalTicksAndSamplingRate()
ProfileGenerator(CpuProfilesCollection *profiles)
static const char *const kUnresolvedFunctionName
NodesPair(ProfileNode *src, ProfileNode *dst)
Address external_callback
void ForEach(Callback *callback)
const char * GetName(Name *name)
#define ASSERT(condition)
const char * GetFormatted(const char *format,...)
bool has_external_callback
List< OffsetRange > * no_frame_ranges() const
void SetBuiltinId(Builtins::Name id)
const List< ProfileNode * > * children() const
static Code * cast(Object *obj)
void AfterChildTraversed(ProfileNode *, ProfileNode *)
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
void BeforeTraversingChild(ProfileNode *, ProfileNode *)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Position(ProfileNode *node)
void RecordTickSample(const TickSample &sample)
const char * GetFunctionName(Name *name)
int GetSharedId(Address addr)
void IncrementSelfTicks()
ProfileNode * FindChild(CodeEntry *entry)
byte * instruction_start()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
void MoveCode(Address from, Address to)
static const char *const kProgramEntryName
CpuProfile(const char *title, bool record_samples)
const char * resource_name() const
bool StartProfiling(const char *title, bool record_samples)
StackFrame::Type top_frame_type
bool IsSameAs(CodeEntry *entry) const
Address stack[kMaxFramesCount]
CpuProfilesCollection(Heap *heap)
StringsStorage(Heap *heap)
static Vector< T > New(int length)
void RemoveProfile(CpuProfile *profile)
int StrLength(const char *string)
static void Print(const char *format,...)
const char * name() const
static const char *const kGarbageCollectorEntryName
INLINE(bool has_current_child())
void AddPathToCurrentProfiles(const Vector< CodeEntry * > &path)
static const char *const kIdleEntryName
Builtins::Name builtin_id() const
ProfileNode(ProfileTree *tree, CodeEntry *entry)
void AddPath(const Vector< CodeEntry * > &path)
void AddCode(Address addr, CodeEntry *entry, unsigned size)
INLINE(void next_child())
void AddPathFromStart(const Vector< CodeEntry * > &path)
static uint32_t HashSequentialString(const schar *chars, int length, uint32_t seed)
bool Remove(const Key &key)
static const char *const kAnonymousFunctionName
uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed)
bool FindGreatestLessThan(const Key &key, Locator *locator)
INLINE(ProfileNode *current_child())
static const char *const kEmptyResourceName
const char * GetCopy(const char *src)
void AfterAllChildrenTraversed(ProfileNode *node)
static const char *const kEmptyNamePrefix
uint32_t capacity() const
static HeapObject * FromAddress(Address address)
static void StrNCpy(Vector< char > dest, const char *src, size_t n)
CodeEntry * NewCodeEntry(Logger::LogEventsAndTags tag, const char *name, const char *name_prefix=CodeEntry::kEmptyNamePrefix, const char *resource_name=CodeEntry::kEmptyResourceName, int line_number=v8::CpuProfileNode::kNoLineNumberInfo, int column_number=v8::CpuProfileNode::kNoColumnNumberInfo)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
ProfileNode * AddPathFromEnd(const Vector< CodeEntry * > &path)
CpuProfile * StopProfiling(const char *title)
const char * GetVFormatted(const char *format, va_list args)
CodeEntry * FindEntry(Address addr, Address *start=NULL)
void DeleteArray(T *array)
ProfileNode * FindOrAddChild(CodeEntry *entry)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Entry * Next(Entry *p) const
const char * name_prefix() const