35 #include "../include/v8-profiler.h"
49 TEST(ProfileNodeFindOrAddChild) {
51 ProfileNode* node = tree.root();
52 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
53 ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
55 CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
56 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
57 ProfileNode* childNode2 = node->FindOrAddChild(&entry2);
60 CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
61 CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
62 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
63 ProfileNode* childNode3 = node->FindOrAddChild(&entry3);
67 CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
68 CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
69 CHECK_EQ(childNode3, node->FindOrAddChild(&entry3));
73 TEST(ProfileNodeFindOrAddChildForSameFunction) {
74 const char* aaa =
"aaa";
76 ProfileNode* node = tree.root();
77 CodeEntry entry1(i::Logger::FUNCTION_TAG, aaa);
78 ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
80 CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
82 CodeEntry entry2(i::Logger::FUNCTION_TAG, aaa);
83 CHECK_EQ(childNode1, node->FindOrAddChild(&entry2));
85 CodeEntry entry3(i::Logger::FUNCTION_TAG, aaa);
86 CHECK_EQ(childNode1, node->FindOrAddChild(&entry3));
92 class ProfileTreeTestHelper {
94 explicit ProfileTreeTestHelper(
const ProfileTree* tree)
97 ProfileNode* Walk(CodeEntry* entry1,
98 CodeEntry* entry2 =
NULL,
99 CodeEntry* entry3 =
NULL) {
100 ProfileNode* node = tree_->root();
101 node = node->FindChild(entry1);
103 if (entry2 !=
NULL) {
104 node = node->FindChild(entry2);
107 if (entry3 !=
NULL) {
108 node = node->FindChild(entry3);
114 const ProfileTree* tree_;
119 TEST(ProfileTreeAddPathFromStart) {
120 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
121 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
122 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
124 ProfileTreeTestHelper helper(&tree);
129 CodeEntry* path[] = {
NULL, &entry1,
NULL, &entry2,
NULL,
NULL, &entry3, NULL};
130 Vector<CodeEntry*> path_vec(path,
sizeof(path) /
sizeof(path[0]));
131 tree.AddPathFromStart(path_vec);
134 ProfileNode* node1 = helper.Walk(&entry1);
139 ProfileNode* node2 = helper.Walk(&entry1, &entry2);
145 ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
151 tree.AddPathFromStart(path_vec);
152 CHECK_EQ(node1, helper.Walk(&entry1));
153 CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
154 CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
159 CodeEntry* path2[] = {&entry1, &entry2, &entry2};
160 Vector<CodeEntry*> path2_vec(path2,
sizeof(path2) /
sizeof(path2[0]));
161 tree.AddPathFromStart(path2_vec);
164 CHECK_EQ(node1, helper.Walk(&entry1));
167 CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
169 CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
171 ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
178 TEST(ProfileTreeAddPathFromEnd) {
179 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
180 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
181 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
183 ProfileTreeTestHelper helper(&tree);
188 CodeEntry* path[] = {
NULL, &entry3,
NULL, &entry2,
NULL,
NULL, &entry1, NULL};
189 Vector<CodeEntry*> path_vec(path,
sizeof(path) /
sizeof(path[0]));
190 tree.AddPathFromEnd(path_vec);
193 ProfileNode* node1 = helper.Walk(&entry1);
198 ProfileNode* node2 = helper.Walk(&entry1, &entry2);
204 ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
210 tree.AddPathFromEnd(path_vec);
211 CHECK_EQ(node1, helper.Walk(&entry1));
212 CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
213 CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
218 CodeEntry* path2[] = {&entry2, &entry2, &entry1};
219 Vector<CodeEntry*> path2_vec(path2,
sizeof(path2) /
sizeof(path2[0]));
220 tree.AddPathFromEnd(path2_vec);
223 CHECK_EQ(node1, helper.Walk(&entry1));
226 CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
228 CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
230 ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
237 TEST(ProfileTreeCalculateTotalTicks) {
238 ProfileTree empty_tree;
239 CHECK_EQ(0, empty_tree.root()->self_ticks());
240 empty_tree.root()->IncrementSelfTicks();
241 CHECK_EQ(1, empty_tree.root()->self_ticks());
243 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
244 CodeEntry* e1_path[] = {&entry1};
245 Vector<CodeEntry*> e1_path_vec(
246 e1_path,
sizeof(e1_path) /
sizeof(e1_path[0]));
248 ProfileTree single_child_tree;
249 single_child_tree.AddPathFromStart(e1_path_vec);
250 single_child_tree.root()->IncrementSelfTicks();
251 CHECK_EQ(1, single_child_tree.root()->self_ticks());
252 ProfileTreeTestHelper single_child_helper(&single_child_tree);
253 ProfileNode* node1 = single_child_helper.Walk(&entry1);
255 CHECK_EQ(1, single_child_tree.root()->self_ticks());
258 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
259 CodeEntry* e1_e2_path[] = {&entry1, &entry2};
260 Vector<CodeEntry*> e1_e2_path_vec(
261 e1_e2_path,
sizeof(e1_e2_path) /
sizeof(e1_e2_path[0]));
263 ProfileTree flat_tree;
264 ProfileTreeTestHelper flat_helper(&flat_tree);
265 flat_tree.AddPathFromStart(e1_path_vec);
266 flat_tree.AddPathFromStart(e1_path_vec);
267 flat_tree.AddPathFromStart(e1_e2_path_vec);
268 flat_tree.AddPathFromStart(e1_e2_path_vec);
269 flat_tree.AddPathFromStart(e1_e2_path_vec);
271 CHECK_EQ(0, flat_tree.root()->self_ticks());
272 node1 = flat_helper.Walk(&entry1);
275 ProfileNode* node2 = flat_helper.Walk(&entry1, &entry2);
279 CHECK_EQ(0, flat_tree.root()->self_ticks());
282 CodeEntry* e2_path[] = {&entry2};
283 Vector<CodeEntry*> e2_path_vec(
284 e2_path,
sizeof(e2_path) /
sizeof(e2_path[0]));
285 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
286 CodeEntry* e3_path[] = {&entry3};
287 Vector<CodeEntry*> e3_path_vec(
288 e3_path,
sizeof(e3_path) /
sizeof(e3_path[0]));
290 ProfileTree wide_tree;
291 ProfileTreeTestHelper wide_helper(&wide_tree);
292 wide_tree.AddPathFromStart(e1_path_vec);
293 wide_tree.AddPathFromStart(e1_path_vec);
294 wide_tree.AddPathFromStart(e1_e2_path_vec);
295 wide_tree.AddPathFromStart(e2_path_vec);
296 wide_tree.AddPathFromStart(e2_path_vec);
297 wide_tree.AddPathFromStart(e2_path_vec);
298 wide_tree.AddPathFromStart(e3_path_vec);
299 wide_tree.AddPathFromStart(e3_path_vec);
300 wide_tree.AddPathFromStart(e3_path_vec);
301 wide_tree.AddPathFromStart(e3_path_vec);
305 CHECK_EQ(0, wide_tree.root()->self_ticks());
306 node1 = wide_helper.Walk(&entry1);
309 ProfileNode* node1_2 = wide_helper.Walk(&entry1, &entry2);
312 node2 = wide_helper.Walk(&entry2);
315 ProfileNode* node3 = wide_helper.Walk(&entry3);
321 CHECK_EQ(0, wide_tree.root()->self_ticks());
336 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
337 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
338 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
339 CodeEntry entry4(i::Logger::FUNCTION_TAG,
"ddd");
340 code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
341 code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
342 code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
343 code_map.AddCode(ToAddress(0x1950), &entry4, 0x10);
345 CHECK_EQ(
NULL, code_map.FindEntry(ToAddress(0x1500 - 1)));
346 CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
347 CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x100)));
348 CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x200 - 1)));
349 CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
350 CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x50)));
351 CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x100 - 1)));
352 CHECK_EQ(
NULL, code_map.FindEntry(ToAddress(0x1700 + 0x100)));
353 CHECK_EQ(
NULL, code_map.FindEntry(ToAddress(0x1900 - 1)));
354 CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900)));
355 CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900 + 0x28)));
356 CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950)));
357 CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x7)));
358 CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x10 - 1)));
359 CHECK_EQ(
NULL, code_map.FindEntry(ToAddress(0x1950 + 0x10)));
360 CHECK_EQ(
NULL, code_map.FindEntry(ToAddress(0xFFFFFFFF)));
364 TEST(CodeMapMoveAndDeleteCode) {
366 CodeEntry entry1(i::Logger::FUNCTION_TAG,
"aaa");
367 CodeEntry entry2(i::Logger::FUNCTION_TAG,
"bbb");
368 code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
369 code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
370 CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
371 CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
372 code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700));
374 CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1700)));
375 CodeEntry entry3(i::Logger::FUNCTION_TAG,
"ccc");
376 code_map.AddCode(ToAddress(0x1750), &entry3, 0x100);
378 CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1750)));
387 : old_flag_prof_browser_mode_(i::FLAG_prof_browser_mode) {
388 i::FLAG_prof_browser_mode =
false;
392 i::FLAG_prof_browser_mode = old_flag_prof_browser_mode_;
396 bool old_flag_prof_browser_mode_;
402 TestSetup test_setup;
404 profiles.StartProfiling(
"",
false);
405 ProfileGenerator generator(&profiles);
406 CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"aaa");
407 CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"bbb");
408 CodeEntry* entry3 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"ccc");
409 generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
410 generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
411 generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
418 sample1.pc = ToAddress(0x1600);
419 sample1.tos = ToAddress(0x1500);
420 sample1.stack[0] = ToAddress(0x1510);
421 sample1.frames_count = 1;
422 generator.RecordTickSample(sample1);
424 sample2.pc = ToAddress(0x1925);
425 sample2.tos = ToAddress(0x1900);
426 sample2.stack[0] = ToAddress(0x1780);
427 sample2.stack[1] = ToAddress(0x10000);
428 sample2.stack[2] = ToAddress(0x1620);
429 sample2.frames_count = 3;
430 generator.RecordTickSample(sample2);
432 sample3.pc = ToAddress(0x1510);
433 sample3.tos = ToAddress(0x1500);
434 sample3.stack[0] = ToAddress(0x1910);
435 sample3.stack[1] = ToAddress(0x1610);
436 sample3.frames_count = 2;
437 generator.RecordTickSample(sample3);
439 CpuProfile* profile = profiles.StopProfiling(
"");
441 ProfileTreeTestHelper top_down_test_helper(profile->top_down());
444 ProfileNode* node1 = top_down_test_helper.Walk(entry1);
447 ProfileNode* node2 = top_down_test_helper.Walk(entry1, entry1);
450 ProfileNode* node3 = top_down_test_helper.Walk(entry1, entry2, entry3);
453 ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
459 static void CheckNodeIds(ProfileNode* node,
int* expectedId) {
460 CHECK_EQ((*expectedId)++, node->id());
461 for (
int i = 0; i < node->children()->length(); i++) {
462 CheckNodeIds(node->children()->at(i), expectedId);
468 TestSetup test_setup;
470 profiles.StartProfiling(
"",
true);
471 ProfileGenerator generator(&profiles);
472 CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"aaa");
473 CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"bbb");
474 CodeEntry* entry3 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"ccc");
475 generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
476 generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
477 generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
484 sample1.pc = ToAddress(0x1600);
485 sample1.stack[0] = ToAddress(0x1510);
486 sample1.frames_count = 1;
487 generator.RecordTickSample(sample1);
489 sample2.pc = ToAddress(0x1925);
490 sample2.stack[0] = ToAddress(0x1780);
491 sample2.stack[1] = ToAddress(0x10000);
492 sample2.stack[2] = ToAddress(0x1620);
493 sample2.frames_count = 3;
494 generator.RecordTickSample(sample2);
496 sample3.pc = ToAddress(0x1510);
497 sample3.stack[0] = ToAddress(0x1910);
498 sample3.stack[1] = ToAddress(0x1610);
499 sample3.frames_count = 2;
500 generator.RecordTickSample(sample3);
502 CpuProfile* profile = profiles.StopProfiling(
"");
504 CheckNodeIds(profile->top_down()->root(), &nodeId);
507 CHECK_EQ(3, profile->samples_count());
508 int expected_id[] = {3, 5, 7};
509 for (
int i = 0; i < 3; i++) {
510 CHECK_EQ(expected_id[i], profile->sample(i)->id());
516 TestSetup test_setup;
518 profiles.StartProfiling(
"",
false);
519 ProfileGenerator generator(&profiles);
520 CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG,
"aaa");
521 generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
526 sample1.pc = ToAddress(0x1600);
527 sample1.stack[0] = ToAddress(0x1510);
528 sample1.frames_count = 1;
529 generator.RecordTickSample(sample1);
531 CpuProfile* profile = profiles.StopProfiling(
"");
533 CheckNodeIds(profile->top_down()->root(), &nodeId);
536 CHECK_EQ(0, profile->samples_count());
540 static const ProfileNode* PickChild(
const ProfileNode* parent,
542 for (
int i = 0; i < parent->children()->length(); ++i) {
543 const ProfileNode* child = parent->children()->at(i);
544 if (strcmp(child->entry()->name(),
name) == 0)
return child;
550 TEST(RecordStackTraceAtStartProfiling) {
553 i::FLAG_use_inlining =
false;
560 CHECK_EQ(0, profiler->GetProfilesCount());
562 "function c() { startProfiling(); }\n"
563 "function b() { c(); }\n"
564 "function a() { b(); }\n"
567 CHECK_EQ(1, profiler->GetProfilesCount());
568 CpuProfile* profile = profiler->GetProfile(0);
569 const ProfileTree* topDown = profile->top_down();
570 const ProfileNode* current = topDown->root();
571 const_cast<ProfileNode*
>(current)->
Print(0);
581 current = PickChild(current,
"(anonymous function)");
583 current = PickChild(current,
"a");
585 current = PickChild(current,
"b");
587 current = PickChild(current,
"c");
589 CHECK(current->children()->length() == 0 ||
590 current->children()->length() == 1);
591 if (current->children()->length() == 1) {
592 current = PickChild(current,
"startProfiling");
593 CHECK_EQ(0, current->children()->length());
601 CpuProfilesCollection::kMaxSimultaneousProfiles> titles;
602 for (
int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
605 CHECK(collection.StartProfiling(title.
start(),
false));
606 titles[i] = title.
start();
608 CHECK(!collection.StartProfiling(
"maximum",
false));
609 for (
int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
619 if (strcmp(*function_name, name) == 0)
return child;
628 i::FLAG_use_inlining =
false;
638 env->
GetIsolate(),
"function a() { startProfiling(); }\n"));
642 "function b() { a(); }\n"
644 "stopProfiling();\n"));
649 reinterpret_cast<ProfileNode*
>(
660 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
662 current = PickChild(current,
"b");
663 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
666 current = PickChild(current,
"a");
667 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
668 CHECK_EQ(script_a->GetId(), current->GetScriptId());
674 static const char* line_number_test_source_existing_functions =
675 "function foo_at_the_first_line() {\n"
677 "foo_at_the_first_line();\n"
678 "function lazy_func_at_forth_line() {}\n";
681 static const char* line_number_test_source_profile_time_functions =
682 "// Empty first line\n"
683 "function bar_at_the_second_line() {\n"
684 " foo_at_the_first_line();\n"
686 "bar_at_the_second_line();\n"
687 "function lazy_func_at_6th_line() {}";
694 (*(*env))->Global()->Get(v8_str(name))));
695 CodeEntry* func_entry = code_map->FindEntry(func->code()->address());
698 return func_entry->line_number();
703 i::FLAG_use_inlining =
false;
708 TestSetup test_setup;
712 CompileRun(line_number_test_source_existing_functions);
717 CompileRun(line_number_test_source_profile_time_functions);
719 profiler->processor()->StopSynchronously();
726 profiler->StopProfiling(
"LineNumber");
741 "function TryCatch() {\n"
743 " startProfiling();\n"
744 " } catch (e) { };\n"
746 "function TryFinally() {\n"
752 "stopProfiling();"));
758 reinterpret_cast<ProfileNode*
>(
766 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
768 current = PickChild(current,
"TryFinally");
769 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
772 current = PickChild(current,
"TryCatch");
773 CHECK_NE(
NULL, const_cast<v8::CpuProfileNode*>(current));
void StartProfiling(const char *title, bool record_samples=false)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
#define CHECK_EQ(expected, value)
CpuProfiler * GetCpuProfiler()
TEST(ProfileNodeFindOrAddChild)
const char * GetBailoutReason() const
const CpuProfileNode * GetChild(int index) const
static const v8::CpuProfile * last_profile
ProfileGenerator * generator() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Handle< String > GetFunctionName() const
static v8::Local< v8::Context > NewContext(CcTestExtensionFlags extensions, v8::Isolate *isolate=CcTest::isolate())
v8::Isolate * GetIsolate()
static Local< Script > Compile(Handle< String > source, ScriptOrigin *origin=NULL, ScriptData *script_data=NULL)
int GetFunctionLineNumber(LocalContext *env, const char *name)
static i::Isolate * i_isolate()
static Vector< T > New(int length)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including on console Map counters to a file Enable debugger compile events enable GDBJIT enable GDBJIT interface for all code objects dump only objects containing this substring stress the GC compactor to flush out pretty print source code print source AST function name where to insert a breakpoint print scopes for builtins trace contexts operations print stuff during garbage collection report code statistics after GC report handles after GC trace cache state transitions print interface inference details prints when objects are turned into dictionaries report heap spill statistics along with trace isolate state changes trace regexp bytecode execution Minimal Log all events to the log file Log API events to the log file Log heap samples on garbage collection for the hp2ps tool log positions Log suspect operations Used with turns on browser compatible mode for profiling v8 Specify the name of the log file Enable low level linux profiler Enable perf linux profiler(experimental annotate support).") DEFINE_string(gc_fake_mmap
static v8::internal::Handle< To > OpenHandle(v8::Local< From > handle)
#define CHECK_NE(unexpected, value)
static int SNPrintF(Vector< char > str, const char *format,...)
static void InitializeVM()
static const char *const kAnonymousFunctionName
CpuProfiler * cpu_profiler() const
int GetChildrenCount() const
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
void DeleteArray(T *array)
static v8::Isolate * isolate()
static Local< String > NewFromUtf8(Isolate *isolate, const char *data, NewStringType type=kNormalString, int length=-1)