v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
cpu-profiler.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "cpu-profiler-inl.h"
31 
32 #include "frames-inl.h"
33 #include "hashmap.h"
34 #include "log-inl.h"
35 #include "vm-state-inl.h"
36 
37 #include "../include/v8-profiler.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 static const int kEventsBufferSize = 256 * KB;
43 static const int kTickSamplesBufferChunkSize = 64 * KB;
44 static const int kTickSamplesBufferChunksCount = 16;
45 static const int kProfilerStackSize = 64 * KB;
46 
47 
49  : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
50  generator_(generator),
51  running_(true),
52  ticks_buffer_(sizeof(TickSampleEventRecord),
53  kTickSamplesBufferChunkSize,
54  kTickSamplesBufferChunksCount),
55  enqueue_order_(0) {
56 }
57 
58 
60  const char* prefix,
61  String* name,
62  Address start) {
63  if (FilterOutCodeCreateEvent(tag)) return;
64  CodeEventsContainer evt_rec;
65  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
66  rec->type = CodeEventRecord::CODE_CREATION;
67  rec->order = ++enqueue_order_;
68  rec->start = start;
69  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
70  rec->size = 1;
71  rec->shared = NULL;
72  events_buffer_.Enqueue(evt_rec);
73 }
74 
75 
77  String* name,
78  String* resource_name,
79  int line_number,
80  Address start,
81  unsigned size,
82  Address shared) {
83  if (FilterOutCodeCreateEvent(tag)) return;
84  CodeEventsContainer evt_rec;
85  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
86  rec->type = CodeEventRecord::CODE_CREATION;
87  rec->order = ++enqueue_order_;
88  rec->start = start;
89  rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
90  rec->size = size;
91  rec->shared = shared;
92  events_buffer_.Enqueue(evt_rec);
93 }
94 
95 
97  const char* name,
98  Address start,
99  unsigned size) {
100  if (FilterOutCodeCreateEvent(tag)) return;
101  CodeEventsContainer evt_rec;
102  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
103  rec->type = CodeEventRecord::CODE_CREATION;
104  rec->order = ++enqueue_order_;
105  rec->start = start;
106  rec->entry = generator_->NewCodeEntry(tag, name);
107  rec->size = size;
108  rec->shared = NULL;
109  events_buffer_.Enqueue(evt_rec);
110 }
111 
112 
114  int args_count,
115  Address start,
116  unsigned size) {
117  if (FilterOutCodeCreateEvent(tag)) return;
118  CodeEventsContainer evt_rec;
119  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
120  rec->type = CodeEventRecord::CODE_CREATION;
121  rec->order = ++enqueue_order_;
122  rec->start = start;
123  rec->entry = generator_->NewCodeEntry(tag, args_count);
124  rec->size = size;
125  rec->shared = NULL;
126  events_buffer_.Enqueue(evt_rec);
127 }
128 
129 
131  CodeEventsContainer evt_rec;
132  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
133  rec->type = CodeEventRecord::CODE_MOVE;
134  rec->order = ++enqueue_order_;
135  rec->from = from;
136  rec->to = to;
137  events_buffer_.Enqueue(evt_rec);
138 }
139 
140 
142  Address to) {
143  CodeEventsContainer evt_rec;
145  &evt_rec.SharedFunctionInfoMoveEventRecord_;
146  rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
147  rec->order = ++enqueue_order_;
148  rec->from = from;
149  rec->to = to;
150  events_buffer_.Enqueue(evt_rec);
151 }
152 
153 
156  const char* prefix,
157  String* name,
158  Address start,
159  unsigned size) {
160  if (FilterOutCodeCreateEvent(tag)) return;
161  CodeEventsContainer evt_rec;
162  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
163  rec->type = CodeEventRecord::CODE_CREATION;
164  rec->order = ++enqueue_order_;
165  rec->start = start;
166  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
167  rec->size = size;
168  events_buffer_.Enqueue(evt_rec);
169 }
170 
171 
173  TickSampleEventRecord record(enqueue_order_);
174  TickSample* sample = &record.sample;
175  Isolate* isolate = Isolate::Current();
176  sample->state = isolate->current_vm_state();
177  sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
178  for (StackTraceFrameIterator it(isolate);
179  !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
180  it.Advance()) {
181  sample->stack[sample->frames_count++] = it.frame()->pc();
182  }
183  ticks_from_vm_buffer_.Enqueue(record);
184 }
185 
186 
187 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
188  if (!events_buffer_.IsEmpty()) {
189  CodeEventsContainer record;
190  events_buffer_.Dequeue(&record);
191  switch (record.generic.type) {
192 #define PROFILER_TYPE_CASE(type, clss) \
193  case CodeEventRecord::type: \
194  record.clss##_.UpdateCodeMap(generator_->code_map()); \
195  break;
196 
198 
199 #undef PROFILER_TYPE_CASE
200  default: return true; // Skip record.
201  }
202  *dequeue_order = record.generic.order;
203  return true;
204  }
205  return false;
206 }
207 
208 
209 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
210  while (true) {
211  if (!ticks_from_vm_buffer_.IsEmpty()
212  && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
213  TickSampleEventRecord record;
214  ticks_from_vm_buffer_.Dequeue(&record);
215  generator_->RecordTickSample(record.sample);
216  }
217 
218  const TickSampleEventRecord* rec =
219  TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
220  if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
221  // Make a local copy of tick sample record to ensure that it won't
222  // be modified as we are processing it. This is possible as the
223  // sampler writes w/o any sync to the queue, so if the processor
224  // will get far behind, a record may be modified right under its
225  // feet.
226  TickSampleEventRecord record = *rec;
227  if (record.order == dequeue_order) {
228  // A paranoid check to make sure that we don't get a memory overrun
229  // in case of frames_count having a wild value.
230  if (record.sample.frames_count < 0
231  || record.sample.frames_count > TickSample::kMaxFramesCount)
232  record.sample.frames_count = 0;
233  generator_->RecordTickSample(record.sample);
234  ticks_buffer_.FinishDequeue();
235  } else {
236  return true;
237  }
238  }
239 }
240 
241 
243  unsigned dequeue_order = 0;
244 
245  while (running_) {
246  // Process ticks until we have any.
247  if (ProcessTicks(dequeue_order)) {
248  // All ticks of the current dequeue_order are processed,
249  // proceed to the next code event.
250  ProcessCodeEvent(&dequeue_order);
251  }
252  YieldCPU();
253  }
254 
255  // Process remaining tick events.
256  ticks_buffer_.FlushResidualRecords();
257  // Perform processing until we have tick events, skip remaining code events.
258  while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
259 }
260 
261 
262 void CpuProfiler::StartProfiling(const char* title) {
263  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
264  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
265 }
266 
267 
269  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
270  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
271 }
272 
273 
275  Isolate* isolate = Isolate::Current();
276  return is_profiling(isolate) ?
277  isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
278 }
279 
280 
282  Isolate* isolate = Isolate::Current();
283  return is_profiling(isolate) ?
284  isolate->cpu_profiler()->StopCollectingProfile(
285  security_token, title) : NULL;
286 }
287 
288 
290  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
291  // The count of profiles doesn't depend on a security token.
292  return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
294 }
295 
296 
297 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
298  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
299  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
300  const int token = profiler->token_enumerator_->GetTokenId(security_token);
301  return profiler->profiles_->Profiles(token)->at(index);
302 }
303 
304 
305 CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
306  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
307  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
308  const int token = profiler->token_enumerator_->GetTokenId(security_token);
309  return profiler->profiles_->GetProfile(token, uid);
310 }
311 
312 
314  if (CpuProfiler::is_profiling(isolate)) {
315  return isolate->cpu_profiler()->processor_->TickSampleEvent();
316  } else {
317  return NULL;
318  }
319 }
320 
321 
323  Isolate* isolate = Isolate::Current();
324  ASSERT(isolate->cpu_profiler() != NULL);
325  if (is_profiling(isolate)) {
326  isolate->cpu_profiler()->StopProcessor();
327  }
328  isolate->cpu_profiler()->ResetProfiles();
329 }
330 
331 
333  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
334  Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
335  delete profile;
336 }
337 
338 
340  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
341  return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
342 }
343 
344 
345 void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
346  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
347  Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
348 }
349 
350 
352  Code* code, const char* comment) {
353  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
354  tag, comment, code->address(), code->ExecutableSize());
355 }
356 
357 
359  Code* code, String* name) {
360  Isolate* isolate = Isolate::Current();
361  isolate->cpu_profiler()->processor_->CodeCreateEvent(
362  tag,
363  name,
364  isolate->heap()->empty_string(),
366  code->address(),
367  code->ExecutableSize(),
368  NULL);
369 }
370 
371 
373  Code* code,
374  SharedFunctionInfo* shared,
375  String* name) {
376  Isolate* isolate = Isolate::Current();
377  isolate->cpu_profiler()->processor_->CodeCreateEvent(
378  tag,
379  name,
380  isolate->heap()->empty_string(),
382  code->address(),
383  code->ExecutableSize(),
384  shared->address());
385 }
386 
387 
389  Code* code,
390  SharedFunctionInfo* shared,
391  String* source, int line) {
392  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
393  tag,
394  shared->DebugName(),
395  source,
396  line,
397  code->address(),
398  code->ExecutableSize(),
399  shared->address());
400 }
401 
402 
404  Code* code, int args_count) {
405  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
406  tag,
407  args_count,
408  code->address(),
409  code->ExecutableSize());
410 }
411 
412 
414  Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
415 }
416 
417 
419 }
420 
421 
423  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
424  profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
425 }
426 
427 
429  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
430  Logger::CALLBACK_TAG, "get ", name, entry_point);
431 }
432 
433 
435  Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
436  Logger::REG_EXP_TAG,
437  "RegExp: ",
438  source,
439  code->address(),
440  code->ExecutableSize());
441 }
442 
443 
445  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
446  Logger::CALLBACK_TAG, "set ", name, entry_point);
447 }
448 
449 
450 CpuProfiler::CpuProfiler()
451  : profiles_(new CpuProfilesCollection()),
452  next_profile_uid_(1),
453  token_enumerator_(new TokenEnumerator()),
454  generator_(NULL),
455  processor_(NULL),
456  need_to_stop_sampler_(false),
457  is_profiling_(false) {
458 }
459 
460 
461 CpuProfiler::~CpuProfiler() {
462  delete token_enumerator_;
463  delete profiles_;
464 }
465 
466 
467 void CpuProfiler::ResetProfiles() {
468  delete profiles_;
469  profiles_ = new CpuProfilesCollection();
470 }
471 
472 void CpuProfiler::StartCollectingProfile(const char* title) {
473  if (profiles_->StartProfiling(title, next_profile_uid_++)) {
474  StartProcessorIfNotStarted();
475  }
476  processor_->AddCurrentStack();
477 }
478 
479 
480 void CpuProfiler::StartCollectingProfile(String* title) {
481  StartCollectingProfile(profiles_->GetName(title));
482 }
483 
484 
485 void CpuProfiler::StartProcessorIfNotStarted() {
486  if (processor_ == NULL) {
487  Isolate* isolate = Isolate::Current();
488 
489  // Disable logging when using the new implementation.
490  saved_logging_nesting_ = isolate->logger()->logging_nesting_;
491  isolate->logger()->logging_nesting_ = 0;
492  generator_ = new ProfileGenerator(profiles_);
493  processor_ = new ProfilerEventsProcessor(generator_);
494  NoBarrier_Store(&is_profiling_, true);
495  processor_->Start();
496  // Enumerate stuff we already have in the heap.
497  if (isolate->heap()->HasBeenSetUp()) {
498  if (!FLAG_prof_browser_mode) {
499  bool saved_log_code_flag = FLAG_log_code;
500  FLAG_log_code = true;
501  isolate->logger()->LogCodeObjects();
502  FLAG_log_code = saved_log_code_flag;
503  }
504  isolate->logger()->LogCompiledFunctions();
505  isolate->logger()->LogAccessorCallbacks();
506  }
507  // Enable stack sampling.
508  Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
509  if (!sampler->IsActive()) {
510  sampler->Start();
511  need_to_stop_sampler_ = true;
512  }
513  sampler->IncreaseProfilingDepth();
514  }
515 }
516 
517 
518 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
519  const double actual_sampling_rate = generator_->actual_sampling_rate();
520  StopProcessorIfLastProfile(title);
521  CpuProfile* result =
523  title,
524  actual_sampling_rate);
525  if (result != NULL) {
526  result->Print();
527  }
528  return result;
529 }
530 
531 
532 CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
533  String* title) {
534  const double actual_sampling_rate = generator_->actual_sampling_rate();
535  const char* profile_title = profiles_->GetName(title);
536  StopProcessorIfLastProfile(profile_title);
537  int token = token_enumerator_->GetTokenId(security_token);
538  return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
539 }
540 
541 
542 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
543  if (profiles_->IsLastProfile(title)) StopProcessor();
544 }
545 
546 
547 void CpuProfiler::StopProcessor() {
548  Logger* logger = Isolate::Current()->logger();
549  Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
550  sampler->DecreaseProfilingDepth();
551  if (need_to_stop_sampler_) {
552  sampler->Stop();
553  need_to_stop_sampler_ = false;
554  }
555  NoBarrier_Store(&is_profiling_, false);
556  processor_->Stop();
557  processor_->Join();
558  delete processor_;
559  delete generator_;
560  processor_ = NULL;
561  generator_ = NULL;
562  logger->logging_nesting_ = saved_logging_nesting_;
563 }
564 
565 
567  Isolate* isolate = Isolate::Current();
568  if (isolate->cpu_profiler() == NULL) {
569  isolate->set_cpu_profiler(new CpuProfiler());
570  }
571 }
572 
573 
575  Isolate* isolate = Isolate::Current();
576  if (isolate->cpu_profiler() != NULL) {
577  delete isolate->cpu_profiler();
578  }
579  isolate->set_cpu_profiler(NULL);
580 }
581 
582 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static CpuProfile * FindProfile(Object *security_token, unsigned uid)
static void DeleteProfile(CpuProfile *profile)
static void SetterCallbackEvent(String *name, Address entry_point)
static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code *code, const char *comment)
static void CodeDeleteEvent(Address from)
StateTag current_vm_state()
Definition: isolate.h:1003
void CallbackCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start)
Definition: cpu-profiler.cc:59
const int KB
Definition: globals.h:207
static void GetterCallbackEvent(String *name, Address entry_point)
CpuProfile * GetProfile(int security_token_id, unsigned uid)
static CpuProfile * StopProfiling(const char *title)
TickSample * sample
#define CODE_EVENTS_TYPE_LIST(V)
Definition: cpu-profiler.h:47
#define ASSERT(condition)
Definition: checks.h:270
void SharedFunctionInfoMoveEvent(Address from, Address to)
Address stack[kMaxFramesCount]
Definition: platform.h:728
static CpuProfile * GetProfile(Object *security_token, int index)
static bool HasDetachedProfiles()
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
void RecordTickSample(const TickSample &sample)
static TickSample * TickSampleEvent(Isolate *isolate)
void CodeMoveEvent(Address from, Address to)
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start, unsigned size)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
ProfilerEventsProcessor(ProfileGenerator *generator)
Definition: cpu-profiler.cc:48
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
void CodeCreateEvent(Logger::LogEventsAndTags tag, String *name, String *resource_name, int line_number, Address start, unsigned size, Address shared)
Definition: cpu-profiler.cc:76
static void SharedFunctionInfoMoveEvent(Address from, Address to)
static TickSampleEventRecord * cast(void *value)
Definition: cpu-profiler.h:117
static const int kMaxFramesCount
Definition: platform.h:727
int ExecutableSize()
Definition: objects.h:4494
static const char *const kEmptyNamePrefix
static void StartProfiling(const char *title)
bool StartProfiling(const char *title, unsigned uid)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static void DeleteAllProfiles()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void RegExpCodeCreateEvent(Code *code, String *source)
CpuProfile * StopProfiling(int security_token_id, const char *title, double actual_sampling_rate)
static const int kNoLineNumberInfo
Definition: v8-profiler.h:114
#define PROFILER_TYPE_CASE(type, clss)
const char * GetName(String *name)
static void CodeMoveEvent(Address from, Address to)
static void CallbackEvent(String *name, Address entry_point)
List< CpuProfile * > * Profiles(int security_token_id)