32 #if defined(V8_TARGET_ARCH_MIPS)
44 #define __ ACCESS_MASM(masm)
47 void Builtins::Generate_Adaptor(MacroAssembler* masm,
62 int num_extra_args = 0;
72 __ Addu(
s0, a0, num_extra_args + 1);
75 __ JumpToExternalReference(ExternalReference(
id, masm->isolate()));
80 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
97 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
114 static void AllocateEmptyJSArray(MacroAssembler* masm,
115 Register array_function,
120 Label* gc_required) {
123 __ LoadInitialArrayMap(array_function, scratch2, scratch1,
false);
128 if (initial_capacity > 0) {
131 __ AllocateInNewSpace(size,
143 __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
146 __ mov(scratch3, zero_reg);
149 if (initial_capacity == 0) {
169 __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
180 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
181 static const int kLoopUnfoldLimit = 4;
182 if (initial_capacity <= kLoopUnfoldLimit) {
183 for (
int i = 0; i < initial_capacity; i++) {
188 __ Addu(scratch2, scratch1, Operand(initial_capacity *
kPointerSize));
194 __ Branch(&loop,
lt, scratch1, Operand(scratch2));
208 static void AllocateJSArray(MacroAssembler* masm,
209 Register array_function,
212 Register elements_array_storage,
213 Register elements_array_end,
217 Label* gc_required) {
219 __ LoadInitialArrayMap(array_function, scratch2,
220 elements_array_storage, fill_with_hole);
222 if (FLAG_debug_code) {
224 ne,
"array size is unexpectedly 0", array_size, Operand(zero_reg));
230 __ li(elements_array_end,
233 __ Addu(elements_array_end, elements_array_end, scratch1);
234 __ AllocateInNewSpace(
248 __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
249 __ sw(elements_array_storage,
259 __ sw(elements_array_storage,
263 __ And(elements_array_storage,
264 elements_array_storage,
271 __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
274 __ Addu(elements_array_storage, elements_array_storage,
kPointerSize);
283 __ Addu(elements_array_storage, elements_array_storage,
kPointerSize);
291 __ Addu(elements_array_end, elements_array_storage, elements_array_end);
297 if (fill_with_hole) {
299 __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
303 __ Addu(elements_array_storage, elements_array_storage,
kPointerSize);
306 __ Branch(&loop,
lt, elements_array_storage, Operand(elements_array_end));
325 static void ArrayNativeCode(MacroAssembler* masm,
326 Label* call_generic_code) {
327 Counters* counters = masm->isolate()->counters();
328 Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
329 has_non_smi_element, finish, cant_transition_map, not_double;
332 __ Branch(&argc_one_or_more,
ne, a0, Operand(zero_reg));
334 __ bind(&empty_array);
335 AllocateEmptyJSArray(masm,
342 __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
350 __ bind(&argc_one_or_more);
351 __ Branch(&argc_two_or_more,
ne, a0, Operand(1));
355 __ Branch(¬_empty_array,
ne, a2, Operand(zero_reg));
357 __ mov(a0, zero_reg);
358 __ Branch(&empty_array);
360 __ bind(¬_empty_array);
362 __ Branch(call_generic_code,
eq, a3, Operand(zero_reg));
374 AllocateJSArray(masm,
384 __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
392 __ bind(&argc_two_or_more);
399 AllocateJSArray(masm,
409 __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
425 if (FLAG_smi_only_arrays) {
426 __ JumpIfNotSmi(a2, &has_non_smi_element);
432 __ Branch(&loop,
lt, t0, Operand(t1));
446 __ bind(&has_non_smi_element);
450 __ bind(&cant_transition_map);
451 __ UndoAllocationInNewSpace(a3, t0);
452 __ Branch(call_generic_code);
454 __ bind(¬_double);
462 &cant_transition_map);
464 __ RecordWriteField(a3,
478 __ Branch(&loop2,
lt, t0, Operand(t1));
483 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
489 Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
492 GenerateLoadInternalArrayFunction(masm, a1);
494 if (FLAG_debug_code) {
498 __ Assert(
ne,
"Unexpected initial map for InternalArray function",
499 t0, Operand(zero_reg));
500 __ GetObjectType(a2, a3, t0);
501 __ Assert(
eq,
"Unexpected initial map for InternalArray function",
507 ArrayNativeCode(masm, &generic_array_code);
511 __ bind(&generic_array_code);
513 Handle<Code> array_code =
514 masm->isolate()->builtins()->InternalArrayCodeGeneric();
515 __ Jump(array_code, RelocInfo::CODE_TARGET);
519 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
525 Label generic_array_code;
528 GenerateLoadArrayFunction(masm, a1);
530 if (FLAG_debug_code) {
534 __ Assert(
ne,
"Unexpected initial map for Array function (1)",
535 t0, Operand(zero_reg));
536 __ GetObjectType(a2, a3, t0);
537 __ Assert(
eq,
"Unexpected initial map for Array function (2)",
542 ArrayNativeCode(masm, &generic_array_code);
546 __ bind(&generic_array_code);
548 Handle<Code> array_code =
549 masm->isolate()->builtins()->ArrayCodeGeneric();
550 __ Jump(array_code, RelocInfo::CODE_TARGET);
554 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
561 Label generic_constructor;
563 if (FLAG_debug_code) {
569 __ Assert(
ne,
"Unexpected initial map for Array function (3)",
570 t0, Operand(zero_reg));
571 __ GetObjectType(a2, a3, t0);
572 __ Assert(
eq,
"Unexpected initial map for Array function (4)",
577 ArrayNativeCode(masm, &generic_constructor);
581 __ bind(&generic_constructor);
583 Handle<Code> generic_construct_stub =
584 masm->isolate()->builtins()->JSConstructStubGeneric();
585 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
589 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
597 Counters* counters = masm->isolate()->counters();
598 __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
600 Register
function = a1;
601 if (FLAG_debug_code) {
603 __ Assert(
eq,
"Unexpected String function",
function, Operand(a2));
608 __ Branch(&no_arguments,
eq, a0, Operand(zero_reg));
610 __ Subu(a0, a0, Operand(1));
617 Register argument = a2;
618 Label not_cached, argument_is_string;
628 __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
629 __ bind(&argument_is_string);
647 __ LoadGlobalFunctionInitialMap(
function, map, t0);
648 if (FLAG_debug_code) {
650 __ Assert(
eq,
"Unexpected string wrapper instance size",
653 __ Assert(
eq,
"Unexpected unused properties of string wrapper",
654 t0, Operand(zero_reg));
658 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
671 Label convert_argument;
672 __ bind(¬_cached);
673 __ JumpIfSmi(a0, &convert_argument);
680 __ Branch(&convert_argument,
ne, t0, Operand(zero_reg));
681 __ mov(argument, a0);
682 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
683 __ Branch(&argument_is_string);
686 __ bind(&convert_argument);
688 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
695 __ mov(argument, v0);
696 __ Branch(&argument_is_string);
700 __ bind(&no_arguments);
701 __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
703 __ Branch(&argument_is_string);
707 __ bind(&gc_required);
708 __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
712 __ CallRuntime(Runtime::kNewStringWrapper, 1);
718 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
726 void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
727 GenerateTailCallToSharedCode(masm);
731 void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
741 __ CallRuntime(Runtime::kParallelRecompile, 1);
751 GenerateTailCallToSharedCode(masm);
755 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
756 bool is_api_function,
757 bool count_constructions) {
766 ASSERT(!is_api_function || !count_constructions);
768 Isolate* isolate = masm->isolate();
779 FrameScope scope(masm, StackFrame::CONSTRUCT);
783 __ MultiPushReversed(a0.bit() | a1.bit());
786 __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
788 Label rt_call, allocated;
791 if (FLAG_inline_new) {
792 Label undo_allocation;
793 #ifdef ENABLE_DEBUGGER_SUPPORT
794 ExternalReference debug_step_in_fp =
795 ExternalReference::debug_step_in_fp_address(isolate);
796 __ li(a2, Operand(debug_step_in_fp));
798 __ Branch(&rt_call,
ne, a2, Operand(zero_reg));
804 __ JumpIfSmi(a2, &rt_call);
805 __ GetObjectType(a2, a3, t4);
816 if (count_constructions) {
822 __ lbu(t0, constructor_count);
823 __ Subu(t0, t0, Operand(1));
824 __ sb(t0, constructor_count);
825 __ Branch(&allocate,
ne, t0, Operand(zero_reg));
831 __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
851 __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
870 __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
871 if (count_constructions) {
878 if (FLAG_debug_code) {
879 __ Assert(
le,
"Unexpected number of pre-allocated property fields.",
882 __ InitializeFieldsWithFiller(t5, a0, t7);
884 __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
886 __ InitializeFieldsWithFiller(t5, t6, t7);
905 __ Addu(a3, a3, Operand(t6));
911 __ Branch(&allocated,
eq, a3, Operand(zero_reg));
913 a3, Operand(zero_reg));
922 __ AllocateInNewSpace(
935 __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
955 if (count_constructions) {
956 __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
957 }
else if (FLAG_debug_code) {
958 __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
959 __ Assert(
eq,
"Undefined value not loaded.", t7, Operand(t8));
966 __ Branch(&loop,
less, a2, Operand(t6));
986 __ bind(&undo_allocation);
987 __ UndoAllocationInNewSpace(t4, t5);
994 __ CallRuntime(Runtime::kNewObject, 1);
1030 __ Addu(t0, a2, Operand(t0));
1034 __ Addu(a3, a3, Operand(-2));
1040 if (is_api_function) {
1043 masm->isolate()->builtins()->HandleApiCallConstruct();
1044 ParameterCount expected(0);
1045 __ InvokeCode(code, expected, expected,
1048 ParameterCount actual(a0);
1054 if (!is_api_function && !count_constructions) {
1055 masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
1064 Label use_receiver, exit;
1071 __ JumpIfSmi(v0, &use_receiver);
1075 __ GetObjectType(v0, a3, a3);
1080 __ bind(&use_receiver);
1098 __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
1103 void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
1104 Generate_JSConstructStubHelper(masm,
false,
true);
1108 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
1109 Generate_JSConstructStubHelper(masm,
false,
false);
1113 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
1114 Generate_JSConstructStubHelper(masm,
true,
false);
1118 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1119 bool is_construct) {
1131 __ mov(
cp, zero_reg);
1148 __ addu(t2,
s0, t0);
1158 __ Branch(&loop,
ne,
s0, Operand(t2));
1162 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
1177 ParameterCount actual(a0);
1189 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
1190 Generate_JSEntryTrampolineHelper(masm,
false);
1194 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
1195 Generate_JSEntryTrampolineHelper(masm,
true);
1199 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
1212 __ CallRuntime(Runtime::kLazyCompile, 1);
1229 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
1241 __ CallRuntime(Runtime::kLazyRecompile, 1);
1258 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
1265 __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
1272 Label with_tos_register, unknown_state;
1273 __ Branch(&with_tos_register,
1278 __ bind(&with_tos_register);
1285 __ bind(&unknown_state);
1286 __ stop(
"no cases left");
1290 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1295 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
1300 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
1307 __ MultiPush(saved_regs);
1310 __ CallRuntime(Runtime::kNotifyOSR, 0);
1312 __ MultiPop(saved_regs);
1317 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1318 CpuFeatures::TryForceFeatureScope scope(
VFP3);
1320 __ Abort(
"Unreachable code: Cannot optimize without FPU support.");
1330 __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
1348 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1352 __ Branch(&done,
ne, a0, Operand(zero_reg));
1353 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1355 __ Addu(a0, a0, Operand(1));
1362 Label slow, non_function;
1364 __ addu(at,
sp, at);
1366 __ JumpIfSmi(a1, &non_function);
1367 __ GetObjectType(a1, a2, a2);
1373 Label shift_arguments;
1375 { Label convert_to_object, use_global_receiver, patch_receiver;
1384 __ Branch(&shift_arguments,
ne, t3, Operand(zero_reg));
1388 __ Branch(&shift_arguments,
ne, t3, Operand(zero_reg));
1393 __ addu(a2,
sp, at);
1398 __ JumpIfSmi(a2, &convert_to_object, t2);
1400 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
1401 __ Branch(&use_global_receiver,
eq, a2, Operand(a3));
1402 __ LoadRoot(a3, Heap::kNullValueRootIndex);
1403 __ Branch(&use_global_receiver,
eq, a2, Operand(a3));
1406 __ GetObjectType(a2, a3, a3);
1409 __ bind(&convert_to_object);
1426 __ addu(at,
sp, at);
1429 __ Branch(&patch_receiver);
1433 __ bind(&use_global_receiver);
1434 const int kGlobalIndex =
1441 __ bind(&patch_receiver);
1443 __ addu(a3,
sp, at);
1446 __ Branch(&shift_arguments);
1454 __ bind(&non_function);
1465 __ addu(a2,
sp, at);
1474 __ bind(&shift_arguments);
1478 __ addu(a2,
sp, at);
1483 __ Subu(a2, a2, Operand(kPointerSize));
1484 __ Branch(&loop,
ne, a2, Operand(
sp));
1487 __ Subu(a0, a0, Operand(1));
1496 { Label
function, non_proxy;
1497 __ Branch(&
function,
eq, t0, Operand(zero_reg));
1499 __ mov(a2, zero_reg);
1501 __ Branch(&non_proxy,
ne, t0, Operand(1));
1504 __ Addu(a0, a0, Operand(1));
1505 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
1506 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1507 RelocInfo::CODE_TARGET);
1509 __ bind(&non_proxy);
1510 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
1511 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1512 RelocInfo::CODE_TARGET);
1528 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1529 RelocInfo::CODE_TARGET,
ne, a2, Operand(a0));
1531 ParameterCount expected(0);
1537 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1557 __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
1560 __ subu(a2,
sp, a2);
1563 __ Branch(&okay,
gt, a2, Operand(t3));
1575 __ mov(a1, zero_reg);
1582 Label push_receiver;
1584 __ GetObjectType(a1, a2, a2);
1594 Label call_to_object, use_global_receiver;
1598 __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
1602 __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
1605 __ JumpIfSmi(a0, &call_to_object);
1606 __ LoadRoot(a1, Heap::kNullValueRootIndex);
1607 __ Branch(&use_global_receiver,
eq, a0, Operand(a1));
1608 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1609 __ Branch(&use_global_receiver,
eq, a0, Operand(a2));
1614 __ GetObjectType(a0, a1, a1);
1619 __ bind(&call_to_object);
1623 __ Branch(&push_receiver);
1626 __ bind(&use_global_receiver);
1627 const int kGlobalOffset =
1636 __ bind(&push_receiver);
1653 __ CallRuntime(Runtime::kGetProperty, 2);
1665 __ Branch(&loop, ne, a0, Operand(a1));
1669 ParameterCount actual(a0);
1672 __ GetObjectType(a1, a2, a2);
1678 frame_scope.GenerateLeaveFrame();
1680 __ Addu(
sp,
sp, Operand(3 * kPointerSize));
1683 __ bind(&call_proxy);
1685 __ Addu(a0, a0, Operand(1));
1688 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
1689 __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1690 RelocInfo::CODE_TARGET);
1695 __ Addu(
sp,
sp, Operand(3 * kPointerSize));
1699 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1702 __ MultiPush(a0.bit() | a1.bit() | t0.bit() |
fp.
bit() | ra.bit());
1703 __ Addu(
fp,
sp, Operand(3 * kPointerSize));
1707 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1715 __ MultiPop(
fp.
bit() | ra.bit());
1719 __ Addu(
sp,
sp, Operand(kPointerSize));
1723 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
1733 Label invoke, dont_adapt_arguments;
1735 Label enough, too_few;
1736 __ Branch(&dont_adapt_arguments,
eq,
1739 __ Branch(&too_few,
Uless, a0, Operand(a2));
1747 EnterArgumentsAdaptorFrame(masm);
1751 __ Addu(a0,
fp, a0);
1753 __ Addu(a0, a0, Operand(2 * kPointerSize));
1756 __ subu(a2, a0, a2);
1769 __ addiu(a0, a0, -kPointerSize);
1776 EnterArgumentsAdaptorFrame(masm);
1784 __ Addu(a0,
fp, a0);
1786 __ Addu(a0, a0, Operand(2 * kPointerSize));
1788 __ Addu(t3,
fp, kPointerSize);
1799 __ Subu(
sp,
sp, kPointerSize);
1800 __ Subu(a0, a0, kPointerSize);
1808 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
1810 __ Subu(a2,
fp, Operand(t2));
1811 __ Addu(a2, a2, Operand(-4 * kPointerSize));
1815 __ Subu(
sp,
sp, kPointerSize);
1826 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
1829 LeaveArgumentsAdaptorFrame(masm);
1836 __ bind(&dont_adapt_arguments);
1845 #endif // V8_TARGET_ARCH_MIPS
const intptr_t kSmiTagMask
static const int kCodeOffset
static const int kCodeEntryOffset
static const int kPrototypeOrInitialMapOffset
static int SlotOffset(int index)
static Smi * FromInt(int value)
const intptr_t kIntptrSignBit
static const int kGlobalReceiverOffset
static const int kConstructionCountOffset
static bool IsSupported(CpuFeature f)
static const int kCallerSPOffset
#define ASSERT(condition)
const RegList kJSCallerSaved
const int kPointerSizeLog2
static const int kInstanceSizeOffset
static const int kUnusedPropertyFieldsOffset
static const int kInstanceSizesOffset
static const int kContextOffset
const intptr_t kHeapObjectTagMask
static const int kInObjectPropertiesByte
const uint32_t kNotStringTag
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kDontAdaptArgumentsSentinel
const RegList kCalleeSaved
static const int kPropertiesOffset
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random generator(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer
static const int kElementsOffset
static const int kLengthOffset
static int SizeFor(int length)
static const int kHeaderSize
static const int kMapOffset
const uint32_t kIsNotStringMask
static const int kLengthOffset
MemOperand FieldMemOperand(Register object, int offset)
static const int kContextOffset
static const int kFunctionOffset
static const int kFormalParameterCountOffset
static const int kHeaderSize
#define ASSERT_EQ(v1, v2)
static const int kHeaderSize
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kPreAllocatedPropertyFieldsByte
static const int kPreallocatedArrayElements
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kValueOffset
static const int kNativeContextOffset
static const int kCompilerHintsOffset
static const int kSharedFunctionInfoOffset
static const int kInitialMaxFastElementArray
static const int kInstanceTypeOffset