30 #if V8_TARGET_ARCH_ARM64
44 #define __ ACCESS_MASM(masm)
50 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52 Label* global_object) {
56 __ B(
eq, global_object);
65 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
74 __ JumpIfSmi(receiver, miss);
87 Register
map = scratch0;
88 Register type = scratch1;
91 GenerateGlobalInstanceTypeCheck(masm, type, miss);
101 __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
116 static void GenerateDictionaryLoad(MacroAssembler* masm,
142 static const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
144 __ Tst(scratch1,
Smi::FromInt(PropertyDetails::TypeField::kMask));
163 static void GenerateDictionaryStore(MacroAssembler* masm,
189 static const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
190 static const int kTypeAndReadOnlyMask =
191 PropertyDetails::TypeField::kMask |
192 PropertyDetails::AttributesField::encode(
READ_ONLY);
194 __ Tst(scratch1, kTypeAndReadOnlyMask);
198 static const int kValueOffset = kElementsStartOffset +
kPointerSize;
203 __ Mov(scratch1, value);
212 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
214 Register map_scratch,
221 __ JumpIfSmi(receiver, slow);
227 __ Tbnz(scratch, interceptor_bit, slow);
258 static void GenerateFastArrayLoad(MacroAssembler* masm,
262 Register elements_map,
265 Label* not_fast_array,
271 if (not_fast_array !=
NULL) {
274 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
277 __ AssertFastElements(elements);
282 Register scratch1 = elements_map;
286 __ Cmp(key, scratch1);
291 __ SmiUntag(scratch2, key);
296 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
301 __ Mov(result, scratch2);
310 static void GenerateKeyNameCheck(MacroAssembler* masm,
312 Register map_scratch,
313 Register hash_scratch,
327 __ TestAndBranchIfAllClear(hash_scratch,
347 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
353 Label* unmapped_case,
357 Heap* heap = masm->isolate()->heap();
362 __ JumpIfSmi(
object, slow_case);
368 __ JumpIfNotSmi(key, slow_case);
372 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
380 __ Cmp(key, scratch1);
381 __ B(
hs, unmapped_case);
384 static const int offset =
387 __ Add(scratch1, map, offset);
388 __ SmiUntag(scratch2, key);
390 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
394 __ SmiUntag(scratch1);
405 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
407 Register parameter_map,
415 Register backing_store = parameter_map;
417 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
421 __ Cmp(key, scratch);
424 __ Add(backing_store,
427 __ SmiUntag(scratch, key);
441 masm->isolate()->stub_cache()->GenerateProbe(
442 masm, flags, x0, x2, x3, x4, x5, x6);
457 GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
460 GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
475 Isolate*
isolate = masm->isolate();
478 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
482 ExternalReference ref =
483 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
484 __ TailCallExternalReference(ref, 2, 1);
496 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
506 Register result = x0;
508 Register receiver = x1;
509 Label miss, unmapped;
511 Register map_scratch = x2;
512 MemOperand mapped_location = GenerateMappedArgumentsLookup(
513 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
514 __ Ldr(result, mapped_location);
520 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
521 __ Ldr(x2, unmapped_location);
522 __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
545 Register receiver = x2;
550 Register mapped1 = x4;
551 Register mapped2 = x5;
554 GenerateMappedArgumentsLookup(masm, receiver, key, map,
557 Operand mapped_offset = mapped.OffsetAsOperand();
558 __ Str(value, mapped);
559 __ Add(x10, mapped.base(), mapped_offset);
568 Register unmapped1 =
map;
569 Register unmapped2 = x4;
571 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
572 Operand unmapped_offset = unmapped.OffsetAsOperand();
573 __ Str(value, unmapped);
574 __ Add(x10, unmapped.base(), unmapped_offset);
576 __ RecordWrite(unmapped.base(), x10, x11,
590 Isolate* isolate = masm->isolate();
592 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
597 ExternalReference ref =
598 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
600 __ TailCallExternalReference(ref, 2, 1);
611 Register receiver = x1;
613 __ Push(receiver, key);
614 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
618 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
628 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
630 Isolate* isolate = masm->isolate();
631 Label check_number_dictionary;
633 Register result = x0;
635 GenerateKeyedLoadReceiverCheck(
639 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
641 GenerateFastArrayLoad(
642 masm, receiver, key, scratch3, scratch2, scratch1, result,
NULL, slow);
644 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
647 __ Bind(&check_number_dictionary);
652 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
654 __ LoadFromNumberDictionary(
655 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
659 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
669 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
671 Isolate* isolate = masm->isolate();
672 Label probe_dictionary, property_array_property;
674 Register result = x0;
676 GenerateKeyedLoadReceiverCheck(
683 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
686 Register receiver_map = scratch1;
695 __ And(scratch2, scratch2, mask);
699 Label load_in_object_property;
701 Label hit_on_nth_entry[kEntriesPerBucket];
702 ExternalReference cache_keys =
703 ExternalReference::keyed_lookup_cache_keys(isolate);
705 __ Mov(scratch3, cache_keys);
708 for (
int i = 0; i < kEntriesPerBucket - 1; i++) {
709 Label try_next_entry;
712 __ Cmp(receiver_map, scratch4);
713 __ B(
ne, &try_next_entry);
715 __ Cmp(key, scratch4);
716 __ B(
eq, &hit_on_nth_entry[i]);
717 __ Bind(&try_next_entry);
722 __ Cmp(receiver_map, scratch4);
725 __ Cmp(key, scratch4);
729 ExternalReference cache_field_offsets =
730 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
733 for (
int i = kEntriesPerBucket - 1; i >= 0; i--) {
734 __ Bind(&hit_on_nth_entry[i]);
735 __ Mov(scratch3, cache_field_offsets);
737 __ Add(scratch2, scratch2, i);
742 __ Subs(scratch4, scratch4, scratch5);
743 __ B(
ge, &property_array_property);
745 __ B(&load_in_object_property);
750 __ Bind(&load_in_object_property);
752 __ Add(scratch5, scratch5, scratch4);
753 __ Sub(receiver, receiver, kHeapObjectTag);
755 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
756 1, scratch1, scratch2);
760 __ Bind(&property_array_property);
764 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
765 1, scratch1, scratch2);
769 __ Bind(&probe_dictionary);
772 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
774 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
775 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
776 1, scratch1, scratch2);
787 Label slow, check_name, index_smi, index_name;
790 Register receiver = x1;
792 __ JumpIfNotSmi(key, &check_name);
796 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
801 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
804 __ Bind(&check_name);
805 GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
807 GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
809 __ Bind(&index_name);
810 __ IndexFromHash(x3, key);
825 Register receiver = x1;
826 Register result = x0;
827 Register scratch = x3;
829 StringCharAtGenerator char_at_generator(receiver,
837 char_at_generator.GenerateFast(masm);
840 StubRuntimeCallHelper call_helper;
841 char_at_generator.GenerateSlow(masm, call_helper);
856 Register receiver = x1;
859 __ JumpIfSmi(receiver, &slow);
877 __ Push(receiver, key);
878 __ TailCallExternalReference(
879 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
901 ExternalReference ref =
902 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
903 __ TailCallExternalReference(ref, 3, 1);
921 ExternalReference ref =
922 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
923 __ TailCallExternalReference(ref, 3, 1);
929 ASM_LOCATION(
"KeyedStoreIC::GenerateRuntimeSetProperty");
945 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
949 static void KeyedStoreGenerateGenericHelper(
950 MacroAssembler* masm,
959 Register receiver_map,
960 Register elements_map,
963 value, key, receiver, receiver_map, elements_map, elements, x10, x11));
965 Label transition_smi_elements;
966 Label transition_double_elements;
967 Label fast_double_without_map_check;
968 Label non_double_value;
971 __ Bind(fast_object);
975 Operand(masm->isolate()->factory()->fixed_array_map()));
976 __ B(
ne, fast_double);
982 Label holecheck_passed;
986 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
987 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
988 __ bind(&holecheck_passed);
991 __ JumpIfSmi(value, &finish_store);
994 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
996 __ Bind(&finish_store);
1003 Register address = x11;
1008 Label dont_record_write;
1009 __ JumpIfSmi(value, &dont_record_write);
1013 __ RecordWrite(elements,
1021 __ Bind(&dont_record_write);
1025 __ Bind(fast_double);
1029 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
1039 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
1041 __ Bind(&fast_double_without_map_check);
1042 __ StoreNumberToDoubleElements(value,
1048 &transition_double_elements);
1057 __ Bind(&transition_smi_elements);
1060 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
1070 ASSERT(receiver_map.Is(x3));
1075 __ B(&fast_double_without_map_check);
1077 __ Bind(&non_double_value);
1085 ASSERT(receiver_map.Is(x3));
1090 __ B(&finish_store);
1092 __ Bind(&transition_double_elements);
1102 ASSERT(receiver_map.Is(x3));
1106 __ B(&finish_store);
1123 Label fast_object_grow;
1124 Label fast_double_grow;
1127 Register value = x0;
1129 Register receiver = x2;
1130 Register receiver_map = x3;
1131 Register elements = x4;
1132 Register elements_map = x5;
1134 __ JumpIfNotSmi(key, &slow);
1135 __ JumpIfSmi(receiver, &slow);
1141 __ TestAndBranchIfAnySet(
1145 Register instance_type = x10;
1157 __ B(
hi, &fast_object);
1181 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
1182 __ B(
eq, &fast_object_grow);
1183 __ Cmp(elements_map,
1184 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1185 __ B(
eq, &fast_double_grow);
1202 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1204 value, key, receiver, receiver_map,
1205 elements_map, elements);
1206 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1208 value, key, receiver, receiver_map,
1209 elements_map, elements);
1223 masm->isolate()->stub_cache()->GenerateProbe(
1224 masm, flags, x1, x2, x3, x4, x5, x6);
1239 __ Push(x1, x2, x0);
1242 ExternalReference ref =
1243 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1244 __ TailCallExternalReference(ref, 3, 1);
1256 Register value = x0;
1257 Register receiver = x1;
1259 Register dictionary = x3;
1261 GenerateNameDictionaryReceiverCheck(
1262 masm, receiver, dictionary, x4, x5, &miss);
1264 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
1265 Counters* counters = masm->isolate()->counters();
1266 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
1271 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
1286 __ Push(x1, x2, x0);
1293 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1306 __ Push(x1, x2, x0);
1310 ExternalReference ref =
1311 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1312 __ TailCallExternalReference(ref, 3, 1);
1318 case Token::EQ_STRICT:
1336 bool CompareIC::HasInlinedSmiCode(
Address address) {
1342 return patch_info->IsInlineData();
1356 InlineSmiCheckInfo
info(info_address);
1359 if (!
info.HasSmiCheck()) {
1363 if (FLAG_trace_ic) {
1364 PrintF(
"[ Patching ic at %p, marker=%p, SMI check=%p\n",
1365 address, info_address, reinterpret_cast<void*>(
info.SmiCheck()));
1374 Instruction* to_patch =
info.SmiCheck();
1375 PatchingAssembler patcher(to_patch, 1);
1376 ASSERT(to_patch->IsTestBranch());
1377 ASSERT(to_patch->ImmTestBranchBit5() == 0);
1378 ASSERT(to_patch->ImmTestBranchBit40() == 0);
1383 int branch_imm = to_patch->ImmTestBranch();
1386 ASSERT(to_patch->Rt() == xzr.code());
1387 smi_reg =
info.SmiRegister();
1390 ASSERT(to_patch->Rt() != xzr.code());
1396 patcher.tbnz(smi_reg, 0, branch_imm);
1400 patcher.tbz(smi_reg, 0, branch_imm);
1407 #endif // V8_TARGET_ARCH_ARM64
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kHashFieldOffset
static const int kBitFieldOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
const intptr_t kSmiSignMask
static const int kElementsStartIndex
static const int kMapHashShift
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const LowDwVfpRegister d0
static Smi * FromInt(int value)
#define ASM_LOCATION(message)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
static const int kHasNamedInterceptor
static const int kIsAccessCheckNeeded
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
Isolate * isolate() const
static void GenerateMegamorphic(MacroAssembler *masm)
static Operand UntagSmiAndScale(Register smi, int scale)
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const int kHasIndexedInterceptor
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const uint64_t kHoleNanInt64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
static const int kInObjectPropertiesOffset
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
const uint32_t kInternalizedTag
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
static const int kIsObserved
static const int kLengthOffset
static const int kSlowCaseBitFieldMask
KeyedStoreIncrementLength
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
MemOperand FieldMemOperand(Register object, int offset)
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static Address return_address_from_call_start(Address pc)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const int kHashShift
const LowDwVfpRegister d1
static const int kCapacityMask
static void GenerateMiss(MacroAssembler *masm)
static Operand UntagSmi(Register smi)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static InstructionSequence * At(Address address)
static const int kHashMask
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
static const int kInstanceTypeOffset
static const int kEntriesPerBucket
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)