30 #if V8_TARGET_ARCH_ARM
48 #define __ ACCESS_MASM(masm)
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
53 Label* global_object) {
57 __ b(
eq, global_object);
59 __ b(
eq, global_object);
61 __ b(
eq, global_object);
67 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
82 __ JumpIfSmi(receiver, miss);
91 GenerateGlobalInstanceTypeCheck(masm, t1, miss);
101 __ LoadRoot(
ip, Heap::kHashTableMapRootIndex);
120 static void GenerateDictionaryLoad(MacroAssembler* masm,
147 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
149 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask <<
kSmiTagSize));
169 static void GenerateDictionaryStore(MacroAssembler* masm,
196 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
197 const int kTypeAndReadOnlyMask =
198 (PropertyDetails::TypeField::kMask |
201 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
205 const int kValueOffset = kElementsStartOffset +
kPointerSize;
210 __ mov(scratch1, value);
218 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
225 __ JumpIfSmi(receiver, slow);
246 static void GenerateFastArrayLoad(MacroAssembler* masm,
253 Label* not_fast_array,
254 Label* out_of_range) {
278 if (not_fast_array !=
NULL) {
281 __ LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
282 __ cmp(scratch1,
ip);
283 __ b(
ne, not_fast_array);
285 __ AssertFastElements(elements);
289 __ cmp(key, Operand(scratch1));
290 __ b(
hs, out_of_range);
293 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
294 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
295 __ cmp(scratch2,
ip);
298 __ b(
eq, out_of_range);
299 __ mov(result, scratch2);
305 static void GenerateKeyNameCheck(MacroAssembler* masm,
315 __ b(
hi, not_unique);
322 __ b(
eq, index_string);
330 __ b(
ne, not_unique);
345 masm->isolate()->stub_cache()->GenerateProbe(
361 GenerateNameDictionaryReceiverCheck(masm,
r0,
r1,
r3,
r4, &miss);
364 GenerateDictionaryLoad(masm, &miss,
r1,
r2,
r0,
r3,
r4);
379 Isolate*
isolate = masm->isolate();
381 __ IncrementCounter(isolate->counters()->load_miss(), 1,
r3,
r4);
387 ExternalReference ref =
388 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
389 __ TailCallExternalReference(ref, 2, 1);
403 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
407 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
413 Label* unmapped_case,
415 Heap* heap = masm->isolate()->heap();
420 __ JumpIfSmi(
object, slow_case);
426 __ tst(key, Operand(0x80000001));
430 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
438 __ cmp(key, Operand(scratch2));
439 __ b(
cs, unmapped_case);
445 __ mov(scratch3, Operand(kPointerSize >> 1));
446 __ mul(scratch3, key, scratch3);
447 __ add(scratch3, scratch3, Operand(kOffset));
450 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
451 __ cmp(scratch2, scratch3);
452 __ b(
eq, unmapped_case);
458 __ mov(scratch3, Operand(kPointerSize >> 1));
459 __ mul(scratch3, scratch2, scratch3);
465 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
467 Register parameter_map,
475 Register backing_store = parameter_map;
477 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
478 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
481 __ cmp(key, Operand(scratch));
483 __ mov(scratch, Operand(kPointerSize >> 1));
484 __ mul(scratch, key, scratch);
500 GenerateMappedArgumentsLookup(masm,
r1,
r0,
r2,
r3,
r4, ¬in, &slow);
501 __ ldr(
r0, mapped_location);
506 GenerateUnmappedArgumentsLookup(masm,
r0,
r2,
r3, &slow);
507 __ ldr(
r2, unmapped_location);
508 __ LoadRoot(
r3, Heap::kTheHoleValueRootIndex);
527 GenerateMappedArgumentsLookup(masm,
r2,
r1,
r3,
r4,
r5, ¬in, &slow);
528 __ str(
r0, mapped_location);
536 GenerateUnmappedArgumentsLookup(masm,
r1,
r3,
r4, &slow);
537 __ str(
r0, unmapped_location);
553 Isolate* isolate = masm->isolate();
555 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1,
r3,
r4);
560 ExternalReference ref =
561 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
563 __ TailCallExternalReference(ref, 2, 1);
576 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
586 Label slow, check_name, index_smi, index_name, property_array_property;
587 Label probe_dictionary, check_number_dictionary;
590 Register receiver =
r1;
592 Isolate* isolate = masm->isolate();
595 __ JumpIfNotSmi(key, &check_name);
600 GenerateKeyedLoadReceiverCheck(
604 __ CheckFastElements(
r2,
r3, &check_number_dictionary);
606 GenerateFastArrayLoad(
608 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
r2,
r3);
611 __ bind(&check_number_dictionary);
619 __ LoadRoot(
ip, Heap::kHashTableMapRootIndex);
628 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
632 __ bind(&check_name);
633 GenerateKeyNameCheck(masm, key,
r2,
r3, &index_name, &slow);
635 GenerateKeyedLoadReceiverCheck(
642 __ LoadRoot(
ip, Heap::kHashTableMapRootIndex);
644 __ b(
eq, &probe_dictionary);
653 __ And(
r3,
r3, Operand(mask));
657 Label load_in_object_property;
659 Label hit_on_nth_entry[kEntriesPerBucket];
660 ExternalReference cache_keys =
661 ExternalReference::keyed_lookup_cache_keys(isolate);
663 __ mov(
r4, Operand(cache_keys));
666 for (
int i = 0; i < kEntriesPerBucket - 1; i++) {
667 Label try_next_entry;
671 __ b(
ne, &try_next_entry);
674 __ b(
eq, &hit_on_nth_entry[i]);
675 __ bind(&try_next_entry);
691 ExternalReference cache_field_offsets =
692 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
695 for (
int i = kEntriesPerBucket - 1; i >= 0; i--) {
696 __ bind(&hit_on_nth_entry[i]);
697 __ mov(
r4, Operand(cache_field_offsets));
699 __ add(
r3,
r3, Operand(i));
704 __ b(
ge, &property_array_property);
706 __ jmp(&load_in_object_property);
711 __ bind(&load_in_object_property);
714 __ sub(
r1,
r1, Operand(kHeapObjectTag));
716 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
721 __ bind(&property_array_property);
725 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
731 __ bind(&probe_dictionary);
737 GenerateGlobalInstanceTypeCheck(masm,
r2, &slow);
739 GenerateDictionaryLoad(masm, &slow,
r3,
r0,
r0,
r2,
r4);
741 isolate->counters()->keyed_load_generic_symbol(), 1,
r2,
r3);
744 __ bind(&index_name);
745 __ IndexFromHash(
r3, key);
759 Register receiver =
r1;
761 Register scratch =
r3;
762 Register result =
r0;
764 StringCharAtGenerator char_at_generator(receiver,
772 char_at_generator.GenerateFast(masm);
775 StubRuntimeCallHelper call_helper;
776 char_at_generator.GenerateSlow(masm, call_helper);
792 __ JumpIfSmi(
r1, &slow);
795 __ NonNegativeSmiTst(
r0);
812 __ TailCallExternalReference(
813 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
834 ExternalReference ref =
835 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
836 __ TailCallExternalReference(ref, 3, 1);
853 ExternalReference ref =
854 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
855 __ TailCallExternalReference(ref, 3, 1);
872 ExternalReference ref =
873 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
874 __ TailCallExternalReference(ref, 3, 1);
894 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
898 static void KeyedStoreGenerateGenericHelper(
899 MacroAssembler* masm,
908 Register receiver_map,
909 Register elements_map,
911 Label transition_smi_elements;
912 Label finish_object_store, non_double_value, transition_double_elements;
913 Label fast_double_without_map_check;
916 __ bind(fast_object);
917 Register scratch_value =
r4;
918 Register address =
r5;
922 Operand(masm->isolate()->factory()->fixed_array_map()));
923 __ b(
ne, fast_double);
929 Label holecheck_passed1;
931 __ ldr(scratch_value,
932 MemOperand::PointerAddressFromSmiKey(address, key,
PreIndex));
933 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
934 __ b(
ne, &holecheck_passed1);
935 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
938 __ bind(&holecheck_passed1);
942 __ JumpIfNotSmi(value, &non_smi_value);
951 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
954 __ bind(&non_smi_value);
956 __ CheckFastObjectElements(receiver_map, scratch_value,
957 &transition_smi_elements);
960 __ bind(&finish_object_store);
967 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
970 __ mov(scratch_value, value);
971 __ RecordWrite(elements,
980 __ bind(fast_double);
984 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
991 __ add(address, elements,
994 __ ldr(scratch_value,
997 __ b(
ne, &fast_double_without_map_check);
998 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
1001 __ bind(&fast_double_without_map_check);
1002 __ StoreNumberToDoubleElements(value, key, elements,
r3,
d0,
1003 &transition_double_elements);
1011 __ bind(&transition_smi_elements);
1014 __ CompareRoot(
r4, Heap::kHeapNumberMapRootIndex);
1015 __ b(
ne, &non_double_value);
1029 __ jmp(&fast_double_without_map_check);
1031 __ bind(&non_double_value);
1043 __ jmp(&finish_object_store);
1045 __ bind(&transition_double_elements);
1058 __ jmp(&finish_object_store);
1070 Label slow, fast_object, fast_object_grow;
1071 Label fast_double, fast_double_grow;
1072 Label array, extra, check_if_double_array;
1075 Register value =
r0;
1077 Register receiver =
r2;
1078 Register receiver_map =
r3;
1079 Register elements_map =
r6;
1080 Register elements =
r9;
1084 __ JumpIfNotSmi(key, &slow);
1086 __ JumpIfSmi(receiver, &slow);
1106 __ cmp(key, Operand(
ip));
1107 __ b(
lo, &fast_object);
1126 __ cmp(key, Operand(
ip));
1129 __ cmp(elements_map,
1130 Operand(masm->isolate()->factory()->fixed_array_map()));
1131 __ b(
ne, &check_if_double_array);
1132 __ jmp(&fast_object_grow);
1134 __ bind(&check_if_double_array);
1135 __ cmp(elements_map,
1136 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1138 __ jmp(&fast_double_grow);
1148 __ cmp(key, Operand(
ip));
1151 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1153 value, key, receiver, receiver_map,
1154 elements_map, elements);
1155 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1157 value, key, receiver, receiver_map,
1158 elements_map, elements);
1173 masm->isolate()->stub_cache()->GenerateProbe(
1192 ExternalReference ref =
1193 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1194 __ TailCallExternalReference(ref, 3, 1);
1207 GenerateNameDictionaryReceiverCheck(masm,
r1,
r3,
r4,
r5, &miss);
1209 GenerateDictionaryStore(masm, &miss,
r3,
r2,
r0,
r4,
r5);
1210 Counters* counters = masm->isolate()->counters();
1211 __ IncrementCounter(counters->store_normal_hit(),
1216 __ IncrementCounter(counters->store_normal_miss(), 1,
r4,
r5);
1237 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1246 case Token::EQ_STRICT:
1264 bool CompareIC::HasInlinedSmiCode(
Address address) {
1266 Address cmp_instruction_address =
1277 Address cmp_instruction_address =
1298 if (FLAG_trace_ic) {
1299 PrintF(
"[ patching ic at %p, cmp=%p, delta=%d\n",
1300 address, cmp_instruction_address, delta);
1306 Instr branch_instr =
1316 CodePatcher patcher(patch_address, 2);
1326 patcher.masm()->cmp(reg, reg);
1330 patcher.EmitCondition(
ne);
1333 patcher.EmitCondition(
eq);
1340 #endif // V8_TARGET_ARCH_ARM
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static bool IsBranch(Instr instr)
static const int kHashFieldOffset
static const int kBitFieldOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
static const int kElementsStartIndex
static bool IsCmpRegister(Instr instr)
static const int kMapHashShift
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const LowDwVfpRegister d0
static Smi * FromInt(int value)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static void GenerateMiss(MacroAssembler *masm)
static int GetCmpImmediateRawImmediate(Instr instr)
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
static const int kHasNamedInterceptor
static const int kIsAccessCheckNeeded
static Register GetRm(Instr instr)
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
static bool IsCmpImmediate(Instr instr)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
Isolate * isolate() const
static void GenerateMegamorphic(MacroAssembler *masm)
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static Condition GetCondition(Instr instr)
static const int kHasIndexedInterceptor
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const uint32_t kHoleNanUpper32
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
static const int kInObjectPropertiesOffset
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
static Register GetRn(Instr instr)
const uint32_t kInternalizedTag
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static bool IsTstImmediate(Instr instr)
static const int kLengthOffset
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
static const int kIsObserved
static const int kLengthOffset
static const int kSlowCaseBitFieldMask
KeyedStoreIncrementLength
MemOperand FieldMemOperand(Register object, int offset)
static void GenerateString(MacroAssembler *masm)
static Register GetCmpImmediateRegister(Instr instr)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
#define ASSERT_EQ(v1, v2)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static Address return_address_from_call_start(Address pc)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const int kHashShift
static const int kCapacityMask
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
static const int kInstanceTypeOffset
static const int kEntriesPerBucket
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)