v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
cpu-profiler.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_CPU_PROFILER_H_
29 #define V8_CPU_PROFILER_H_
30 
31 #include "allocation.h"
32 #include "atomicops.h"
33 #include "circular-queue.h"
34 #include "unbound-queue.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 // Forward declarations.
40 class CodeEntry;
41 class CodeMap;
42 class CpuProfile;
43 class CpuProfilesCollection;
44 class ProfileGenerator;
45 class TokenEnumerator;
46 
47 #define CODE_EVENTS_TYPE_LIST(V) \
48  V(CODE_CREATION, CodeCreateEventRecord) \
49  V(CODE_MOVE, CodeMoveEventRecord) \
50  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
51 
52 
54  public:
55 #define DECLARE_TYPE(type, ignore) type,
56  enum Type {
57  NONE = 0,
60  };
61 #undef DECLARE_TYPE
62 
64  unsigned order;
65 };
66 
67 
69  public:
72  unsigned size;
74 
75  INLINE(void UpdateCodeMap(CodeMap* code_map));
76 };
77 
78 
80  public:
83 
84  INLINE(void UpdateCodeMap(CodeMap* code_map));
85 };
86 
87 
89  public:
92 
93  INLINE(void UpdateCodeMap(CodeMap* code_map));
94 };
95 
96 
98  public:
99  // The parameterless constructor is used when we dequeue data from
100  // the ticks buffer.
102  explicit TickSampleEventRecord(unsigned order)
103  : filler(1),
104  order(order) {
106  }
107 
108  // The first machine word of a TickSampleEventRecord must not ever
109  // become equal to SamplingCircularQueue::kClear. As both order and
110  // TickSample's first field are not reliable in this sense (order
111  // can overflow, TickSample can have all fields reset), we are
112  // forced to use an artificial filler field.
113  int filler;
114  unsigned order;
116 
117  static TickSampleEventRecord* cast(void* value) {
118  return reinterpret_cast<TickSampleEventRecord*>(value);
119  }
120 };
121 
122 
123 // This class implements both the profile events processor thread and
124 // methods called by event producers: VM and stack sampler threads.
126  public:
129 
130  // Thread control.
131  virtual void Run();
132  inline void Stop() { running_ = false; }
133  INLINE(bool running()) { return running_; }
134 
135  // Events adding methods. Called by VM threads.
136  void CallbackCreateEvent(Logger::LogEventsAndTags tag,
137  const char* prefix, String* name,
138  Address start);
139  void CodeCreateEvent(Logger::LogEventsAndTags tag,
140  String* name,
141  String* resource_name, int line_number,
142  Address start, unsigned size,
143  Address shared);
144  void CodeCreateEvent(Logger::LogEventsAndTags tag,
145  const char* name,
146  Address start, unsigned size);
147  void CodeCreateEvent(Logger::LogEventsAndTags tag,
148  int args_count,
149  Address start, unsigned size);
150  void CodeMoveEvent(Address from, Address to);
151  void CodeDeleteEvent(Address from);
152  void SharedFunctionInfoMoveEvent(Address from, Address to);
153  void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
154  const char* prefix, String* name,
155  Address start, unsigned size);
156  // Puts current stack into tick sample events buffer.
157  void AddCurrentStack();
158 
159  // Tick sample events are filled directly in the buffer of the circular
160  // queue (because the structure is of fixed width, but usually not all
161  // stack frame entries are filled.) This method returns a pointer to the
162  // next record of the buffer.
163  INLINE(TickSample* TickSampleEvent());
164 
165  private:
166  union CodeEventsContainer {
167  CodeEventRecord generic;
168 #define DECLARE_CLASS(ignore, type) type type##_;
170 #undef DECLARE_TYPE
171  };
172 
173  // Called from events processing thread (Run() method.)
174  bool ProcessCodeEvent(unsigned* dequeue_order);
175  bool ProcessTicks(unsigned dequeue_order);
176 
177  INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
178 
179  ProfileGenerator* generator_;
180  bool running_;
181  UnboundQueue<CodeEventsContainer> events_buffer_;
182  SamplingCircularQueue ticks_buffer_;
183  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
184  unsigned enqueue_order_;
185 };
186 
187 } } // namespace v8::internal
188 
189 
190 #define PROFILE(isolate, Call) \
191  LOG_CODE_EVENT(isolate, Call); \
192  do { \
193  if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
194  v8::internal::CpuProfiler::Call; \
195  } \
196  } while (false)
197 
198 
199 namespace v8 {
200 namespace internal {
201 
202 
203 // TODO(isolates): isolatify this class.
204 class CpuProfiler {
205  public:
206  static void SetUp();
207  static void TearDown();
208 
209  static void StartProfiling(const char* title);
210  static void StartProfiling(String* title);
211  static CpuProfile* StopProfiling(const char* title);
212  static CpuProfile* StopProfiling(Object* security_token, String* title);
213  static int GetProfilesCount();
214  static CpuProfile* GetProfile(Object* security_token, int index);
215  static CpuProfile* FindProfile(Object* security_token, unsigned uid);
216  static void DeleteAllProfiles();
217  static void DeleteProfile(CpuProfile* profile);
218  static bool HasDetachedProfiles();
219 
220  // Invoked from stack sampler (thread or signal handler.)
221  static TickSample* TickSampleEvent(Isolate* isolate);
222 
223  // Must be called via PROFILE macro, otherwise will crash when
224  // profiling is not enabled.
225  static void CallbackEvent(String* name, Address entry_point);
226  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
227  Code* code, const char* comment);
228  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
229  Code* code, String* name);
230  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
231  Code* code,
232  SharedFunctionInfo* shared,
233  String* name);
234  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
235  Code* code,
236  SharedFunctionInfo* shared,
237  String* source, int line);
238  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
239  Code* code, int args_count);
240  static void CodeMovingGCEvent() {}
241  static void CodeMoveEvent(Address from, Address to);
242  static void CodeDeleteEvent(Address from);
243  static void GetterCallbackEvent(String* name, Address entry_point);
244  static void RegExpCodeCreateEvent(Code* code, String* source);
245  static void SetterCallbackEvent(String* name, Address entry_point);
246  static void SharedFunctionInfoMoveEvent(Address from, Address to);
247 
248  // TODO(isolates): this doesn't have to use atomics anymore.
249 
250  static INLINE(bool is_profiling(Isolate* isolate)) {
251  CpuProfiler* profiler = isolate->cpu_profiler();
252  return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
253  }
254 
255  private:
256  CpuProfiler();
257  ~CpuProfiler();
258  void StartCollectingProfile(const char* title);
259  void StartCollectingProfile(String* title);
260  void StartProcessorIfNotStarted();
261  CpuProfile* StopCollectingProfile(const char* title);
262  CpuProfile* StopCollectingProfile(Object* security_token, String* title);
263  void StopProcessorIfLastProfile(const char* title);
264  void StopProcessor();
265  void ResetProfiles();
266 
267  CpuProfilesCollection* profiles_;
268  unsigned next_profile_uid_;
269  TokenEnumerator* token_enumerator_;
270  ProfileGenerator* generator_;
271  ProfilerEventsProcessor* processor_;
272  int saved_logging_nesting_;
273  bool need_to_stop_sampler_;
274  Atomic32 is_profiling_;
275 
276  private:
278 };
279 
280 } } // namespace v8::internal
281 
282 
283 #endif // V8_CPU_PROFILER_H_
byte * Address
Definition: globals.h:157
#define CODE_EVENTS_TYPE_LIST(V)
Definition: cpu-profiler.h:47
#define ASSERT(condition)
Definition: checks.h:270
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
static TickSampleEventRecord * cast(void *value)
Definition: cpu-profiler.h:117
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
int32_t Atomic32
Definition: atomicops.h:57
static void CodeMovingGCEvent()
Definition: cpu-profiler.h:240
static INLINE(bool is_profiling(Isolate *isolate))
Definition: cpu-profiler.h:250
#define DECLARE_CLASS(ignore, type)
Definition: cpu-profiler.h:168
#define DECLARE_TYPE(type, ignore)
Definition: cpu-profiler.h:55