37 #include "../include/v8-profiler.h"
42 static const int kEventsBufferSize = 256 *
KB;
43 static const int kTickSamplesBufferChunkSize = 64 *
KB;
44 static const int kTickSamplesBufferChunksCount = 16;
45 static const int kProfilerStackSize = 64 *
KB;
50 generator_(generator),
53 kTickSamplesBufferChunkSize,
54 kTickSamplesBufferChunksCount),
63 if (FilterOutCodeCreateEvent(tag))
return;
64 CodeEventsContainer evt_rec;
66 rec->
type = CodeEventRecord::CODE_CREATION;
67 rec->
order = ++enqueue_order_;
69 rec->
entry = generator_->NewCodeEntry(tag, prefix, name);
72 events_buffer_.Enqueue(evt_rec);
83 if (FilterOutCodeCreateEvent(tag))
return;
84 CodeEventsContainer evt_rec;
86 rec->
type = CodeEventRecord::CODE_CREATION;
87 rec->
order = ++enqueue_order_;
89 rec->
entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
92 events_buffer_.Enqueue(evt_rec);
100 if (FilterOutCodeCreateEvent(tag))
return;
101 CodeEventsContainer evt_rec;
103 rec->
type = CodeEventRecord::CODE_CREATION;
104 rec->
order = ++enqueue_order_;
106 rec->
entry = generator_->NewCodeEntry(tag, name);
109 events_buffer_.Enqueue(evt_rec);
117 if (FilterOutCodeCreateEvent(tag))
return;
118 CodeEventsContainer evt_rec;
120 rec->
type = CodeEventRecord::CODE_CREATION;
121 rec->
order = ++enqueue_order_;
123 rec->
entry = generator_->NewCodeEntry(tag, args_count);
126 events_buffer_.Enqueue(evt_rec);
131 CodeEventsContainer evt_rec;
133 rec->
type = CodeEventRecord::CODE_MOVE;
134 rec->
order = ++enqueue_order_;
137 events_buffer_.Enqueue(evt_rec);
143 CodeEventsContainer evt_rec;
145 &evt_rec.SharedFunctionInfoMoveEventRecord_;
146 rec->
type = CodeEventRecord::SHARED_FUNC_MOVE;
147 rec->
order = ++enqueue_order_;
150 events_buffer_.Enqueue(evt_rec);
160 if (FilterOutCodeCreateEvent(tag))
return;
161 CodeEventsContainer evt_rec;
163 rec->
type = CodeEventRecord::CODE_CREATION;
164 rec->
order = ++enqueue_order_;
166 rec->
entry = generator_->NewCodeEntry(tag, prefix, name);
168 events_buffer_.Enqueue(evt_rec);
175 Isolate* isolate = Isolate::Current();
183 ticks_from_vm_buffer_.Enqueue(record);
187 bool ProfilerEventsProcessor::ProcessCodeEvent(
unsigned* dequeue_order) {
188 if (!events_buffer_.IsEmpty()) {
189 CodeEventsContainer record;
190 events_buffer_.Dequeue(&record);
191 switch (record.generic.type) {
192 #define PROFILER_TYPE_CASE(type, clss) \
193 case CodeEventRecord::type: \
194 record.clss##_.UpdateCodeMap(generator_->code_map()); \
199 #undef PROFILER_TYPE_CASE
200 default:
return true;
202 *dequeue_order = record.generic.order;
209 bool ProfilerEventsProcessor::ProcessTicks(
unsigned dequeue_order) {
211 if (!ticks_from_vm_buffer_.IsEmpty()
212 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
213 TickSampleEventRecord record;
214 ticks_from_vm_buffer_.Dequeue(&record);
218 const TickSampleEventRecord* rec =
220 if (rec ==
NULL)
return !ticks_from_vm_buffer_.IsEmpty();
226 TickSampleEventRecord record = *rec;
227 if (record.order == dequeue_order) {
230 if (record.sample.frames_count < 0
232 record.sample.frames_count = 0;
243 unsigned dequeue_order = 0;
247 if (ProcessTicks(dequeue_order)) {
250 ProcessCodeEvent(&dequeue_order);
258 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
263 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
264 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
269 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
270 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
275 Isolate* isolate = Isolate::Current();
276 return is_profiling(isolate) ?
277 isolate->cpu_profiler()->StopCollectingProfile(title) :
NULL;
282 Isolate* isolate = Isolate::Current();
283 return is_profiling(isolate) ?
284 isolate->cpu_profiler()->StopCollectingProfile(
285 security_token, title) :
NULL;
290 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
292 return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
298 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
299 CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
300 const int token = profiler->token_enumerator_->
GetTokenId(security_token);
301 return profiler->profiles_->
Profiles(token)->at(index);
306 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
307 CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
308 const int token = profiler->token_enumerator_->
GetTokenId(security_token);
309 return profiler->profiles_->
GetProfile(token, uid);
314 if (CpuProfiler::is_profiling(isolate)) {
315 return isolate->cpu_profiler()->processor_->TickSampleEvent();
323 Isolate* isolate = Isolate::Current();
325 if (is_profiling(isolate)) {
326 isolate->cpu_profiler()->StopProcessor();
328 isolate->cpu_profiler()->ResetProfiles();
333 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
334 Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
340 ASSERT(Isolate::Current()->cpu_profiler() !=
NULL);
341 return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
346 Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
353 Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
360 Isolate* isolate = Isolate::Current();
361 isolate->cpu_profiler()->processor_->CodeCreateEvent(
364 isolate->
heap()->empty_string(),
376 Isolate* isolate = Isolate::Current();
377 isolate->cpu_profiler()->processor_->CodeCreateEvent(
380 isolate->
heap()->empty_string(),
391 String* source,
int line) {
392 Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
405 Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
414 Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
423 CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
429 Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
430 Logger::CALLBACK_TAG,
"get ", name, entry_point);
435 Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
445 Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
446 Logger::CALLBACK_TAG,
"set ", name, entry_point);
450 CpuProfiler::CpuProfiler()
452 next_profile_uid_(1),
456 need_to_stop_sampler_(
false),
457 is_profiling_(
false) {
461 CpuProfiler::~CpuProfiler() {
462 delete token_enumerator_;
467 void CpuProfiler::ResetProfiles() {
469 profiles_ =
new CpuProfilesCollection();
472 void CpuProfiler::StartCollectingProfile(
const char* title) {
474 StartProcessorIfNotStarted();
480 void CpuProfiler::StartCollectingProfile(String* title) {
481 StartCollectingProfile(profiles_->
GetName(title));
485 void CpuProfiler::StartProcessorIfNotStarted() {
486 if (processor_ ==
NULL) {
487 Isolate* isolate = Isolate::Current();
490 saved_logging_nesting_ = isolate->logger()->logging_nesting_;
491 isolate->logger()->logging_nesting_ = 0;
492 generator_ =
new ProfileGenerator(profiles_);
493 processor_ =
new ProfilerEventsProcessor(generator_);
497 if (isolate->heap()->HasBeenSetUp()) {
498 if (!FLAG_prof_browser_mode) {
499 bool saved_log_code_flag = FLAG_log_code;
500 FLAG_log_code =
true;
501 isolate->logger()->LogCodeObjects();
502 FLAG_log_code = saved_log_code_flag;
504 isolate->logger()->LogCompiledFunctions();
505 isolate->logger()->LogAccessorCallbacks();
508 Sampler* sampler =
reinterpret_cast<Sampler*
>(isolate->logger()->ticker_);
509 if (!sampler->IsActive()) {
511 need_to_stop_sampler_ =
true;
513 sampler->IncreaseProfilingDepth();
518 CpuProfile* CpuProfiler::StopCollectingProfile(
const char* title) {
519 const double actual_sampling_rate = generator_->actual_sampling_rate();
520 StopProcessorIfLastProfile(title);
524 actual_sampling_rate);
525 if (result !=
NULL) {
532 CpuProfile* CpuProfiler::StopCollectingProfile(
Object* security_token,
534 const double actual_sampling_rate = generator_->actual_sampling_rate();
535 const char* profile_title = profiles_->
GetName(title);
536 StopProcessorIfLastProfile(profile_title);
537 int token = token_enumerator_->
GetTokenId(security_token);
538 return profiles_->
StopProfiling(token, profile_title, actual_sampling_rate);
542 void CpuProfiler::StopProcessorIfLastProfile(
const char* title) {
547 void CpuProfiler::StopProcessor() {
548 Logger* logger = Isolate::Current()->logger();
549 Sampler* sampler =
reinterpret_cast<Sampler*
>(logger->ticker_);
550 sampler->DecreaseProfilingDepth();
551 if (need_to_stop_sampler_) {
553 need_to_stop_sampler_ =
false;
562 logger->logging_nesting_ = saved_logging_nesting_;
567 Isolate* isolate = Isolate::Current();
568 if (isolate->cpu_profiler() ==
NULL) {
575 Isolate* isolate = Isolate::Current();
576 if (isolate->cpu_profiler() !=
NULL) {
577 delete isolate->cpu_profiler();
579 isolate->set_cpu_profiler(
NULL);
static CpuProfile * FindProfile(Object *security_token, unsigned uid)
static void DeleteProfile(CpuProfile *profile)
bool IsLastProfile(const char *title)
static void SetterCallbackEvent(String *name, Address entry_point)
static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code *code, const char *comment)
static void CodeDeleteEvent(Address from)
void FlushResidualRecords()
StateTag current_vm_state()
void CallbackCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start)
static void GetterCallbackEvent(String *name, Address entry_point)
CpuProfile * GetProfile(int security_token_id, unsigned uid)
static CpuProfile * StopProfiling(const char *title)
#define CODE_EVENTS_TYPE_LIST(V)
#define ASSERT(condition)
static const int kNoSecurityToken
void SharedFunctionInfoMoveEvent(Address from, Address to)
Address stack[kMaxFramesCount]
static CpuProfile * GetProfile(Object *security_token, int index)
static bool HasDetachedProfiles()
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
void RecordTickSample(const TickSample &sample)
static TickSample * TickSampleEvent(Isolate *isolate)
void CodeMoveEvent(Address from, Address to)
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start, unsigned size)
static int GetProfilesCount()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
ProfilerEventsProcessor(ProfileGenerator *generator)
activate correct semantics for inheriting readonliness false
void CodeCreateEvent(Logger::LogEventsAndTags tag, String *name, String *resource_name, int line_number, Address start, unsigned size, Address shared)
static void SharedFunctionInfoMoveEvent(Address from, Address to)
static TickSampleEventRecord * cast(void *value)
int GetTokenId(Object *token)
static const int kMaxFramesCount
static const char *const kEmptyNamePrefix
static void StartProfiling(const char *title)
bool StartProfiling(const char *title, unsigned uid)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static void DeleteAllProfiles()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void RegExpCodeCreateEvent(Code *code, String *source)
CpuProfile * StopProfiling(int security_token_id, const char *title, double actual_sampling_rate)
static const int kNoLineNumberInfo
#define PROFILER_TYPE_CASE(type, clss)
const char * GetName(String *name)
static void CodeMoveEvent(Address from, Address to)
static void CallbackEvent(String *name, Address entry_point)
List< CpuProfile * > * Profiles(int security_token_id)