v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
cpu-profiler.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "cpu-profiler-inl.h"
31 
32 #include "frames-inl.h"
33 #include "hashmap.h"
34 #include "log-inl.h"
35 #include "vm-state-inl.h"
36 
37 #include "../include/v8-profiler.h"
38 
39 namespace v8 {
40 namespace internal {
41 
42 static const int kEventsBufferSize = 256 * KB;
43 static const int kTickSamplesBufferChunkSize = 64 * KB;
44 static const int kTickSamplesBufferChunksCount = 16;
45 static const int kProfilerStackSize = 64 * KB;
46 
47 
49  : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
50  generator_(generator),
51  running_(true),
52  ticks_buffer_(sizeof(TickSampleEventRecord),
53  kTickSamplesBufferChunkSize,
54  kTickSamplesBufferChunksCount),
55  enqueue_order_(0) {
56 }
57 
58 
60  const char* prefix,
61  String* name,
62  Address start) {
63  if (FilterOutCodeCreateEvent(tag)) return;
64  CodeEventsContainer evt_rec;
65  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
66  rec->type = CodeEventRecord::CODE_CREATION;
67  rec->order = ++enqueue_order_;
68  rec->start = start;
69  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
70  rec->size = 1;
71  rec->shared = NULL;
72  events_buffer_.Enqueue(evt_rec);
73 }
74 
75 
77  String* name,
78  String* resource_name,
79  int line_number,
80  Address start,
81  unsigned size,
82  Address shared) {
83  if (FilterOutCodeCreateEvent(tag)) return;
84  CodeEventsContainer evt_rec;
85  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
86  rec->type = CodeEventRecord::CODE_CREATION;
87  rec->order = ++enqueue_order_;
88  rec->start = start;
89  rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
90  rec->size = size;
91  rec->shared = shared;
92  events_buffer_.Enqueue(evt_rec);
93 }
94 
95 
97  const char* name,
98  Address start,
99  unsigned size) {
100  if (FilterOutCodeCreateEvent(tag)) return;
101  CodeEventsContainer evt_rec;
102  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
103  rec->type = CodeEventRecord::CODE_CREATION;
104  rec->order = ++enqueue_order_;
105  rec->start = start;
106  rec->entry = generator_->NewCodeEntry(tag, name);
107  rec->size = size;
108  rec->shared = NULL;
109  events_buffer_.Enqueue(evt_rec);
110 }
111 
112 
114  int args_count,
115  Address start,
116  unsigned size) {
117  if (FilterOutCodeCreateEvent(tag)) return;
118  CodeEventsContainer evt_rec;
119  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
120  rec->type = CodeEventRecord::CODE_CREATION;
121  rec->order = ++enqueue_order_;
122  rec->start = start;
123  rec->entry = generator_->NewCodeEntry(tag, args_count);
124  rec->size = size;
125  rec->shared = NULL;
126  events_buffer_.Enqueue(evt_rec);
127 }
128 
129 
131  CodeEventsContainer evt_rec;
132  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
133  rec->type = CodeEventRecord::CODE_MOVE;
134  rec->order = ++enqueue_order_;
135  rec->from = from;
136  rec->to = to;
137  events_buffer_.Enqueue(evt_rec);
138 }
139 
140 
142  Address to) {
143  CodeEventsContainer evt_rec;
145  &evt_rec.SharedFunctionInfoMoveEventRecord_;
146  rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
147  rec->order = ++enqueue_order_;
148  rec->from = from;
149  rec->to = to;
150  events_buffer_.Enqueue(evt_rec);
151 }
152 
153 
156  const char* prefix,
157  String* name,
158  Address start,
159  unsigned size) {
160  if (FilterOutCodeCreateEvent(tag)) return;
161  CodeEventsContainer evt_rec;
162  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
163  rec->type = CodeEventRecord::CODE_CREATION;
164  rec->order = ++enqueue_order_;
165  rec->start = start;
166  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
167  rec->size = size;
168  events_buffer_.Enqueue(evt_rec);
169 }
170 
171 
173  TickSampleEventRecord record(enqueue_order_);
174  TickSample* sample = &record.sample;
175  Isolate* isolate = Isolate::Current();
176  sample->state = isolate->current_vm_state();
177  sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
178  for (StackTraceFrameIterator it(isolate);
179  !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
180  it.Advance()) {
181  sample->stack[sample->frames_count++] = it.frame()->pc();
182  }
183  ticks_from_vm_buffer_.Enqueue(record);
184 }
185 
186 
187 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
188  if (!events_buffer_.IsEmpty()) {
189  CodeEventsContainer record;
190  events_buffer_.Dequeue(&record);
191  switch (record.generic.type) {
192 #define PROFILER_TYPE_CASE(type, clss) \
193  case CodeEventRecord::type: \
194  record.clss##_.UpdateCodeMap(generator_->code_map()); \
195  break;
196 
198 
199 #undef PROFILER_TYPE_CASE
200  default: return true; // Skip record.
201  }
202  *dequeue_order = record.generic.order;
203  return true;
204  }
205  return false;
206 }
207 
208 
209 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
210  while (true) {
211  if (!ticks_from_vm_buffer_.IsEmpty()
212  && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
213  TickSampleEventRecord record;
214  ticks_from_vm_buffer_.Dequeue(&record);
215  generator_->RecordTickSample(record.sample);
216  }
217 
218  const TickSampleEventRecord* rec =
219  TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
220  if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
221  // Make a local copy of tick sample record to ensure that it won't
222  // be modified as we are processing it. This is possible as the
223  // sampler writes w/o any sync to the queue, so if the processor
224  // will get far behind, a record may be modified right under its
225  // feet.
226  TickSampleEventRecord record = *rec;
227  if (record.order == dequeue_order) {
228  // A paranoid check to make sure that we don't get a memory overrun
229  // in case of frames_count having a wild value.
230  if (record.sample.frames_count < 0
231  || record.sample.frames_count > TickSample::kMaxFramesCount)
232  record.sample.frames_count = 0;
233  generator_->RecordTickSample(record.sample);
234  ticks_buffer_.FinishDequeue();
235  } else {
236  return true;
237  }
238  }
239 }
240 
241 
243  unsigned dequeue_order = 0;
244 
245  while (running_) {
246  // Process ticks until we have any.
247  if (ProcessTicks(dequeue_order)) {
248  // All ticks of the current dequeue_order are processed,
249  // proceed to the next code event.
250  ProcessCodeEvent(&dequeue_order);
251  }
252  YieldCPU();
253  }
254 
255  // Process remaining tick events.
256  ticks_buffer_.FlushResidualRecords();
257  // Perform processing until we have tick events, skip remaining code events.
258  while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
259 }
260 
261 
262 void CpuProfiler::StartProfiling(const char* title) {
263  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
264  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
265 }
266 
267 
269  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
270  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
271 }
272 
273 
275  Isolate* isolate = Isolate::Current();
276  return is_profiling(isolate) ?
277  isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
278 }
279 
280 
282  Isolate* isolate = Isolate::Current();
283  return is_profiling(isolate) ?
284  isolate->cpu_profiler()->StopCollectingProfile(
285  security_token, title) : NULL;
286 }
287 
288 
290  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
291  // The count of profiles doesn't depend on a security token.
292  return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
294 }
295 
296 
297 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
298  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
299  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
300  const int token = profiler->token_enumerator_->GetTokenId(security_token);
301  return profiler->profiles_->Profiles(token)->at(index);
302 }
303 
304 
305 CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
306  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
307  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
308  const int token = profiler->token_enumerator_->GetTokenId(security_token);
309  return profiler->profiles_->GetProfile(token, uid);
310 }
311 
312 
314  if (CpuProfiler::is_profiling(isolate)) {
315  return isolate->cpu_profiler()->processor_->TickSampleEvent();
316  } else {
317  return NULL;
318  }
319 }
320 
321 
323  Isolate* isolate = Isolate::Current();
324  ASSERT(isolate->cpu_profiler() != NULL);
325  if (is_profiling(isolate)) {
326  isolate->cpu_profiler()->StopProcessor();
327  }
328  isolate->cpu_profiler()->ResetProfiles();
329 }
330 
331 
333  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
334  Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
335  delete profile;
336 }
337 
338 
340  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
341  return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
342 }
343 
344 
346  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
347  Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
348 }
349 
350 
352  Code* code, const char* comment) {
353  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
354  tag, comment, code->address(), code->ExecutableSize());
355 }
356 
357 
359  Code* code, String* name) {
360  Isolate* isolate = Isolate::Current();
361  isolate->cpu_profiler()->processor_->CodeCreateEvent(
362  tag,
363  name,
364  isolate->heap()->empty_string(),
366  code->address(),
367  code->ExecutableSize(),
368  NULL);
369 }
370 
371 
373  Code* code,
374  SharedFunctionInfo* shared,
375  String* name) {
376  Isolate* isolate = Isolate::Current();
377  isolate->cpu_profiler()->processor_->CodeCreateEvent(
378  tag,
379  name,
380  isolate->heap()->empty_string(),
382  code->address(),
383  code->ExecutableSize(),
384  shared->address());
385 }
386 
387 
389  Code* code,
390  SharedFunctionInfo* shared,
391  String* source, int line) {
392  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
393  tag,
394  shared->DebugName(),
395  source,
396  line,
397  code->address(),
398  code->ExecutableSize(),
399  shared->address());
400 }
401 
402 
404  Code* code, int args_count) {
405  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
406  tag,
407  args_count,
408  code->address(),
409  code->ExecutableSize());
410 }
411 
412 
414  Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
415 }
416 
417 
419 }
420 
421 
423  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
424  profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
425 }
426 
427 
429  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
430  Logger::CALLBACK_TAG, "get ", name, entry_point);
431 }
432 
433 
435  Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
436  Logger::REG_EXP_TAG,
437  "RegExp: ",
438  source,
439  code->address(),
440  code->ExecutableSize());
441 }
442 
443 
445  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
446  Logger::CALLBACK_TAG, "set ", name, entry_point);
447 }
448 
449 
450 CpuProfiler::CpuProfiler()
451  : profiles_(new CpuProfilesCollection()),
452  next_profile_uid_(1),
453  token_enumerator_(new TokenEnumerator()),
454  generator_(NULL),
455  processor_(NULL),
456  need_to_stop_sampler_(false),
457  is_profiling_(false) {
458 }
459 
460 
461 CpuProfiler::~CpuProfiler() {
462  delete token_enumerator_;
463  delete profiles_;
464 }
465 
466 
467 void CpuProfiler::ResetProfiles() {
468  delete profiles_;
469  profiles_ = new CpuProfilesCollection();
470 }
471 
472 void CpuProfiler::StartCollectingProfile(const char* title) {
473  if (profiles_->StartProfiling(title, next_profile_uid_++)) {
474  StartProcessorIfNotStarted();
475  }
476  processor_->AddCurrentStack();
477 }
478 
479 
480 void CpuProfiler::StartCollectingProfile(String* title) {
481  StartCollectingProfile(profiles_->GetName(title));
482 }
483 
484 
485 void CpuProfiler::StartProcessorIfNotStarted() {
486  if (processor_ == NULL) {
487  Isolate* isolate = Isolate::Current();
488 
489  // Disable logging when using the new implementation.
490  saved_logging_nesting_ = isolate->logger()->logging_nesting_;
491  isolate->logger()->logging_nesting_ = 0;
492  generator_ = new ProfileGenerator(profiles_);
493  processor_ = new ProfilerEventsProcessor(generator_);
494  NoBarrier_Store(&is_profiling_, true);
495  processor_->Start();
496  // Enumerate stuff we already have in the heap.
497  if (isolate->heap()->HasBeenSetUp()) {
498  if (!FLAG_prof_browser_mode) {
499  bool saved_log_code_flag = FLAG_log_code;
500  FLAG_log_code = true;
501  isolate->logger()->LogCodeObjects();
502  FLAG_log_code = saved_log_code_flag;
503  }
504  isolate->logger()->LogCompiledFunctions();
505  isolate->logger()->LogAccessorCallbacks();
506  }
507  // Enable stack sampling.
508  Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
509  if (!sampler->IsActive()) {
510  sampler->Start();
511  need_to_stop_sampler_ = true;
512  }
513  sampler->IncreaseProfilingDepth();
514  }
515 }
516 
517 
518 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
519  const double actual_sampling_rate = generator_->actual_sampling_rate();
520  StopProcessorIfLastProfile(title);
521  CpuProfile* result =
523  title,
524  actual_sampling_rate);
525  if (result != NULL) {
526  result->Print();
527  }
528  return result;
529 }
530 
531 
532 CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
533  String* title) {
534  const double actual_sampling_rate = generator_->actual_sampling_rate();
535  const char* profile_title = profiles_->GetName(title);
536  StopProcessorIfLastProfile(profile_title);
537  int token = token_enumerator_->GetTokenId(security_token);
538  return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
539 }
540 
541 
542 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
543  if (profiles_->IsLastProfile(title)) StopProcessor();
544 }
545 
546 
547 void CpuProfiler::StopProcessor() {
548  Logger* logger = Isolate::Current()->logger();
549  Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
550  sampler->DecreaseProfilingDepth();
551  if (need_to_stop_sampler_) {
552  sampler->Stop();
553  need_to_stop_sampler_ = false;
554  }
555  NoBarrier_Store(&is_profiling_, false);
556  processor_->Stop();
557  processor_->Join();
558  delete processor_;
559  delete generator_;
560  processor_ = NULL;
561  generator_ = NULL;
562  logger->logging_nesting_ = saved_logging_nesting_;
563 }
564 
565 
567  Isolate* isolate = Isolate::Current();
568  if (isolate->cpu_profiler() == NULL) {
569  isolate->set_cpu_profiler(new CpuProfiler());
570  }
571 }
572 
573 
575  Isolate* isolate = Isolate::Current();
576  if (isolate->cpu_profiler() != NULL) {
577  delete isolate->cpu_profiler();
578  }
579  isolate->set_cpu_profiler(NULL);
580 }
581 
582 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static CpuProfile * FindProfile(Object *security_token, unsigned uid)
static void DeleteProfile(CpuProfile *profile)
static void SetterCallbackEvent(String *name, Address entry_point)
static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code *code, const char *comment)
static void CodeDeleteEvent(Address from)
StateTag current_vm_state()
Definition: isolate.h:991
void CallbackCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start)
Definition: cpu-profiler.cc:59
const int KB
Definition: globals.h:221
static void GetterCallbackEvent(String *name, Address entry_point)
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
CpuProfile * GetProfile(int security_token_id, unsigned uid)
static CpuProfile * StopProfiling(const char *title)
TickSample * sample
#define CODE_EVENTS_TYPE_LIST(V)
Definition: cpu-profiler.h:47
#define ASSERT(condition)
Definition: checks.h:270
const char * comment() const
Definition: flags.cc:1362
void SharedFunctionInfoMoveEvent(Address from, Address to)
Address stack[kMaxFramesCount]
Definition: platform.h:701
static CpuProfile * GetProfile(Object *security_token, int index)
static bool HasDetachedProfiles()
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
void RecordTickSample(const TickSample &sample)
static TickSample * TickSampleEvent(Isolate *isolate)
void CodeMoveEvent(Address from, Address to)
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, const char *prefix, String *name, Address start, unsigned size)
ProfilerEventsProcessor(ProfileGenerator *generator)
Definition: cpu-profiler.cc:48
void CodeCreateEvent(Logger::LogEventsAndTags tag, String *name, String *resource_name, int line_number, Address start, unsigned size, Address shared)
Definition: cpu-profiler.cc:76
static void SharedFunctionInfoMoveEvent(Address from, Address to)
static TickSampleEventRecord * cast(void *value)
Definition: cpu-profiler.h:117
static const int kMaxFramesCount
Definition: platform.h:700
int ExecutableSize()
Definition: objects.h:4457
static const char *const kEmptyNamePrefix
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void StartProfiling(const char *title)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
Definition: flags.cc:157
bool StartProfiling(const char *title, unsigned uid)
static void DeleteAllProfiles()
static void RegExpCodeCreateEvent(Code *code, String *source)
CpuProfile * StopProfiling(int security_token_id, const char *title, double actual_sampling_rate)
static const int kNoLineNumberInfo
Definition: v8-profiler.h:113
#define PROFILER_TYPE_CASE(type, clss)
const char * GetName(String *name)
static void CodeMoveEvent(Address from, Address to)
static void CallbackEvent(String *name, Address entry_point)
List< CpuProfile * > * Profiles(int security_token_id)