v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
cpu-profiler.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_CPU_PROFILER_H_
29 #define V8_CPU_PROFILER_H_
30 
31 #include "allocation.h"
32 #include "atomicops.h"
33 #include "circular-queue.h"
34 #include "platform/time.h"
35 #include "sampler.h"
36 #include "unbound-queue.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 // Forward declarations.
42 class CodeEntry;
43 class CodeMap;
44 class CompilationInfo;
45 class CpuProfile;
46 class CpuProfilesCollection;
47 class ProfileGenerator;
48 
49 #define CODE_EVENTS_TYPE_LIST(V) \
50  V(CODE_CREATION, CodeCreateEventRecord) \
51  V(CODE_MOVE, CodeMoveEventRecord) \
52  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
53  V(REPORT_BUILTIN, ReportBuiltinEventRecord)
54 
55 
57  public:
58 #define DECLARE_TYPE(type, ignore) type,
59  enum Type {
60  NONE = 0,
63  };
64 #undef DECLARE_TYPE
65 
67  mutable unsigned order;
68 };
69 
70 
72  public:
75  unsigned size;
77 
78  INLINE(void UpdateCodeMap(CodeMap* code_map));
79 };
80 
81 
83  public:
86 
87  INLINE(void UpdateCodeMap(CodeMap* code_map));
88 };
89 
90 
92  public:
95 
96  INLINE(void UpdateCodeMap(CodeMap* code_map));
97 };
98 
99 
101  public:
104 
105  INLINE(void UpdateCodeMap(CodeMap* code_map));
106 };
107 
108 
110  public:
111  // The parameterless constructor is used when we dequeue data from
112  // the ticks buffer.
114  explicit TickSampleEventRecord(unsigned order) : order(order) { }
115 
116  unsigned order;
118 };
119 
120 
122  public:
125  generic.type = type;
126  }
127  union {
129 #define DECLARE_CLASS(ignore, type) type type##_;
131 #undef DECLARE_TYPE
132  };
133 };
134 
135 
136 // This class implements both the profile events processor thread and
137 // methods called by event producers: VM and stack sampler threads.
139  public:
141  Sampler* sampler,
142  TimeDelta period);
144 
145  // Thread control.
146  virtual void Run();
147  void StopSynchronously();
148  INLINE(bool running()) { return running_; }
149  void Enqueue(const CodeEventsContainer& event);
150 
151  // Puts current stack into tick sample events buffer.
152  void AddCurrentStack(Isolate* isolate);
153 
154  // Tick sample events are filled directly in the buffer of the circular
155  // queue (because the structure is of fixed width, but usually not all
156  // stack frame entries are filled.) This method returns a pointer to the
157  // next record of the buffer.
158  inline TickSample* StartTickSample();
159  inline void FinishTickSample();
160 
161  // SamplingCircularQueue has stricter alignment requirements than a normal new
162  // can fulfil, so we need to provide our own new/delete here.
163  void* operator new(size_t size);
164  void operator delete(void* ptr);
165 
166  private:
167  // Called from events processing thread (Run() method.)
168  bool ProcessCodeEvent();
169 
170  enum SampleProcessingResult {
171  OneSampleProcessed,
172  FoundSampleForNextCodeEvent,
173  NoSamplesInQueue
174  };
175  SampleProcessingResult ProcessOneSample();
176 
177  ProfileGenerator* generator_;
178  Sampler* sampler_;
179  bool running_;
180  // Sampling period in microseconds.
181  const TimeDelta period_;
182  UnboundQueue<CodeEventsContainer> events_buffer_;
183  static const size_t kTickSampleBufferSize = 1 * MB;
184  static const size_t kTickSampleQueueLength =
185  kTickSampleBufferSize / sizeof(TickSampleEventRecord);
186  SamplingCircularQueue<TickSampleEventRecord,
187  kTickSampleQueueLength> ticks_buffer_;
188  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
189  unsigned last_code_event_id_;
190  unsigned last_processed_code_event_id_;
191 };
192 
193 
194 #define PROFILE(IsolateGetter, Call) \
195  do { \
196  Isolate* cpu_profiler_isolate = (IsolateGetter); \
197  v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
198  CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
199  if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
200  logger->Call; \
201  } \
202  } while (false)
203 
204 
206  public:
207  explicit CpuProfiler(Isolate* isolate);
208 
209  CpuProfiler(Isolate* isolate,
210  CpuProfilesCollection* test_collection,
211  ProfileGenerator* test_generator,
212  ProfilerEventsProcessor* test_processor);
213 
214  virtual ~CpuProfiler();
215 
216  void set_sampling_interval(TimeDelta value);
217  void StartProfiling(const char* title, bool record_samples = false);
218  void StartProfiling(String* title, bool record_samples);
219  CpuProfile* StopProfiling(const char* title);
220  CpuProfile* StopProfiling(String* title);
221  int GetProfilesCount();
222  CpuProfile* GetProfile(int index);
223  void DeleteAllProfiles();
224  void DeleteProfile(CpuProfile* profile);
225 
226  // Invoked from stack sampler (thread or signal handler.)
227  inline TickSample* StartTickSample();
228  inline void FinishTickSample();
229 
230  // Must be called via PROFILE macro, otherwise will crash when
231  // profiling is not enabled.
232  virtual void CallbackEvent(Name* name, Address entry_point);
233  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
234  Code* code, const char* comment);
235  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
236  Code* code, Name* name);
237  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
238  Code* code,
239  SharedFunctionInfo* shared,
241  Name* name);
242  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
243  Code* code,
244  SharedFunctionInfo* shared,
245  CompilationInfo* info,
246  Name* source, int line, int column);
247  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
248  Code* code, int args_count);
249  virtual void CodeMovingGCEvent() {}
250  virtual void CodeMoveEvent(Address from, Address to);
251  virtual void CodeDeleteEvent(Address from);
252  virtual void GetterCallbackEvent(Name* name, Address entry_point);
253  virtual void RegExpCodeCreateEvent(Code* code, String* source);
254  virtual void SetterCallbackEvent(Name* name, Address entry_point);
255  virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
256 
257  INLINE(bool is_profiling() const) { return is_profiling_; }
259  return &is_profiling_;
260  }
261 
262  ProfileGenerator* generator() const { return generator_; }
263  ProfilerEventsProcessor* processor() const { return processor_; }
264  Isolate* isolate() const { return isolate_; }
265 
266  private:
267  void StartProcessorIfNotStarted();
268  void StopProcessorIfLastProfile(const char* title);
269  void StopProcessor();
270  void ResetProfiles();
271  void LogBuiltins();
272 
273  Isolate* isolate_;
274  TimeDelta sampling_interval_;
275  CpuProfilesCollection* profiles_;
276  ProfileGenerator* generator_;
277  ProfilerEventsProcessor* processor_;
278  bool saved_is_logging_;
279  bool is_profiling_;
280 
282 };
283 
284 } } // namespace v8::internal
285 
286 
287 #endif // V8_CPU_PROFILER_H_
byte * Address
Definition: globals.h:186
INLINE(bool is_profiling() const)
Definition: cpu-profiler.h:257
#define CODE_EVENTS_TYPE_LIST(V)
Definition: cpu-profiler.h:49
ProfileGenerator * generator() const
Definition: cpu-profiler.h:262
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
CodeEventsContainer(CodeEventRecord::Type type=CodeEventRecord::NONE)
Definition: cpu-profiler.h:123
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
Isolate * isolate() const
Definition: cpu-profiler.h:264
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
ProfilerEventsProcessor * processor() const
Definition: cpu-profiler.h:263
#define DECLARE_CLASS(ignore, type)
Definition: cpu-profiler.h:129
virtual void CodeMovingGCEvent()
Definition: cpu-profiler.h:249
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
#define DECLARE_TYPE(type, ignore)
Definition: cpu-profiler.h:58
const int MB
Definition: globals.h:246