28 #ifndef V8_CPU_PROFILER_H_
29 #define V8_CPU_PROFILER_H_
44 class CompilationInfo;
46 class CpuProfilesCollection;
47 class ProfileGenerator;
49 #define CODE_EVENTS_TYPE_LIST(V) \
50 V(CODE_CREATION, CodeCreateEventRecord) \
51 V(CODE_MOVE, CodeMoveEventRecord) \
52 V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
53 V(REPORT_BUILTIN, ReportBuiltinEventRecord)
58 #define DECLARE_TYPE(type, ignore) type,
129 #define DECLARE_CLASS(ignore, type) type type##_;
147 void StopSynchronously();
148 INLINE(
bool running()) {
return running_; }
152 void AddCurrentStack(
Isolate* isolate);
159 inline void FinishTickSample();
163 void*
operator new(
size_t size);
164 void operator delete(
void* ptr);
168 bool ProcessCodeEvent();
170 enum SampleProcessingResult {
172 FoundSampleForNextCodeEvent,
175 SampleProcessingResult ProcessOneSample();
177 ProfileGenerator* generator_;
181 const TimeDelta period_;
182 UnboundQueue<CodeEventsContainer> events_buffer_;
183 static const size_t kTickSampleBufferSize = 1 *
MB;
184 static const size_t kTickSampleQueueLength =
185 kTickSampleBufferSize /
sizeof(TickSampleEventRecord);
186 SamplingCircularQueue<TickSampleEventRecord,
187 kTickSampleQueueLength> ticks_buffer_;
188 UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
189 unsigned last_code_event_id_;
190 unsigned last_processed_code_event_id_;
194 #define PROFILE(IsolateGetter, Call) \
196 Isolate* cpu_profiler_isolate = (IsolateGetter); \
197 v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
198 CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
199 if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
216 void set_sampling_interval(TimeDelta value);
217 void StartProfiling(
const char* title,
bool record_samples =
false);
218 void StartProfiling(
String* title,
bool record_samples);
221 int GetProfilesCount();
223 void DeleteAllProfiles();
228 inline void FinishTickSample();
246 Name* source,
int line,
int column);
248 Code* code,
int args_count);
251 virtual void CodeDeleteEvent(
Address from);
252 virtual void GetterCallbackEvent(
Name* name,
Address entry_point);
253 virtual void RegExpCodeCreateEvent(
Code* code,
String* source);
254 virtual void SetterCallbackEvent(
Name* name,
Address entry_point);
255 virtual void SharedFunctionInfoMoveEvent(
Address from,
Address to);
257 INLINE(
bool is_profiling()
const) {
return is_profiling_; }
259 return &is_profiling_;
267 void StartProcessorIfNotStarted();
268 void StopProcessorIfLastProfile(
const char* title);
269 void StopProcessor();
270 void ResetProfiles();
274 TimeDelta sampling_interval_;
278 bool saved_is_logging_;
287 #endif // V8_CPU_PROFILER_H_
INLINE(bool is_profiling() const)
#define CODE_EVENTS_TYPE_LIST(V)
ProfileGenerator * generator() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
bool * is_profiling_address()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
CodeEventsContainer(CodeEventRecord::Type type=CodeEventRecord::NONE)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Isolate * isolate() const
TickSampleEventRecord(unsigned order)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
virtual ~ProfilerEventsProcessor()
ProfilerEventsProcessor * processor() const
Builtins::Name builtin_id
#define DECLARE_CLASS(ignore, type)
virtual void CodeMovingGCEvent()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
#define DECLARE_TYPE(type, ignore)