30 #if V8_TARGET_ARCH_X64
44 #define __ ACCESS_MASM(masm)
47 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
49 Label* global_object) {
63 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
74 __ JumpIfSmi(receiver, miss);
85 GenerateGlobalInstanceTypeCheck(masm, r0, miss);
95 Heap::kHashTableMapRootIndex);
108 static void GenerateDictionaryLoad(MacroAssembler* masm,
142 const int kElementsStartOffset =
145 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
152 const int kValueOffset = kElementsStartOffset +
kPointerSize;
166 static void GenerateDictionaryStore(MacroAssembler* masm,
199 const int kElementsStartOffset =
202 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
203 const int kTypeAndReadOnlyMask =
204 (PropertyDetails::TypeField::kMask |
206 __ Test(Operand(elements,
214 const int kValueOffset = kElementsStartOffset +
kPointerSize;
215 __ leap(scratch1, Operand(elements,
219 __ movp(Operand(scratch1, 0), value);
222 __ movp(scratch0, value);
229 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
240 __ JumpIfSmi(receiver, slow);
253 (1 << interceptor_bit)));
260 static void GenerateFastArrayLoad(MacroAssembler* masm,
266 Label* not_fast_array,
267 Label* out_of_range) {
288 if (not_fast_array !=
NULL) {
291 Heap::kFixedArrayMapRootIndex);
294 __ AssertFastElements(elements);
306 __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
310 if (!result.is(scratch)) {
311 __ movp(result, scratch);
318 static void GenerateKeyNameCheck(MacroAssembler* masm,
358 Label slow, check_name, index_smi, index_name, property_array_property;
359 Label probe_dictionary, check_number_dictionary;
362 __ JumpIfNotSmi(
rax, &check_name);
367 GenerateKeyedLoadReceiverCheck(
371 __ CheckFastElements(
rcx, &check_number_dictionary);
373 GenerateFastArrayLoad(masm,
381 Counters* counters = masm->isolate()->counters();
382 __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
385 __ bind(&check_number_dictionary);
395 Heap::kHashTableMapRootIndex);
404 __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
407 __ bind(&check_name);
408 GenerateKeyNameCheck(masm,
rax,
rcx,
rbx, &index_name, &slow);
410 GenerateKeyedLoadReceiverCheck(
417 Heap::kHashTableMapRootIndex);
418 __ j(
equal, &probe_dictionary);
429 __ andp(
rcx, Immediate(mask));
433 Label load_in_object_property;
435 Label hit_on_nth_entry[kEntriesPerBucket];
436 ExternalReference cache_keys
437 = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
439 for (
int i = 0; i < kEntriesPerBucket - 1; i++) {
440 Label try_next_entry;
444 int off = kPointerSize * i * 2;
448 __ j(
equal, &hit_on_nth_entry[i]);
449 __ bind(&try_next_entry);
452 int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
459 ExternalReference cache_field_offsets
460 = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
463 for (
int i = kEntriesPerBucket - 1; i >= 0; i--) {
464 __ bind(&hit_on_nth_entry[i]);
466 __ addl(
rcx, Immediate(i));
474 __ jmp(&load_in_object_property);
479 __ bind(&load_in_object_property);
483 __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
487 __ bind(&property_array_property);
491 __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
496 __ bind(&probe_dictionary);
503 GenerateGlobalInstanceTypeCheck(masm,
rcx, &slow);
506 __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
509 __ bind(&index_name);
523 Register receiver =
rdx;
524 Register index =
rax;
525 Register scratch =
rcx;
526 Register result =
rax;
528 StringCharAtGenerator char_at_generator(receiver,
536 char_at_generator.GenerateFast(masm);
539 StubRuntimeCallHelper call_helper;
540 char_at_generator.GenerateSlow(masm, call_helper);
556 __ JumpIfSmi(
rdx, &slow);
560 __ JumpUnlessNonNegativeSmi(
rax, &slow);
573 __ PopReturnAddressTo(
rcx);
576 __ PushReturnAddressFrom(
rcx);
579 __ TailCallExternalReference(
580 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
590 static void KeyedStoreGenerateGenericHelper(
591 MacroAssembler* masm,
597 Label transition_smi_elements;
598 Label finish_object_store, non_double_value, transition_double_elements;
599 Label fast_double_without_map_check;
601 __ bind(fast_object);
609 __ CompareRoot(
rdi, Heap::kFixedArrayMapRootIndex);
616 Label holecheck_passed1;
625 __ bind(&holecheck_passed1);
629 __ JumpIfNotSmi(
rax, &non_smi_value);
640 __ bind(&non_smi_value);
643 __ CheckFastObjectElements(
r9, &transition_smi_elements);
645 __ bind(&finish_object_store);
658 __ bind(fast_double);
663 __ CompareRoot(
rdi, Heap::kFixedDoubleArrayMapRootIndex);
675 __ bind(&fast_double_without_map_check);
677 &transition_double_elements);
685 __ bind(&transition_smi_elements);
690 __ CompareRoot(
r9, Heap::kHeapNumberMapRootIndex);
704 __ jmp(&fast_double_without_map_check);
706 __ bind(&non_double_value);
717 __ jmp(&finish_object_store);
719 __ bind(&transition_double_elements);
732 __ jmp(&finish_object_store);
744 Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
745 Label fast_double, fast_double_grow;
746 Label array, extra, check_if_double_array;
749 __ JumpIfSmi(
rdx, &slow_with_tagged_index);
758 __ JumpIfNotSmi(
rcx, &slow_with_tagged_index);
782 __ bind(&slow_with_tagged_index);
800 __ CompareRoot(
rdi, Heap::kFixedArrayMapRootIndex);
802 __ jmp(&fast_object_grow);
804 __ bind(&check_if_double_array);
806 __ CompareRoot(
rdi, Heap::kFixedDoubleArrayMapRootIndex);
808 __ jmp(&fast_double_grow);
824 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
826 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
831 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
837 Label* unmapped_case,
839 Heap* heap = masm->isolate()->heap();
844 __ JumpIfSmi(
object, slow_case);
855 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
862 __ cmpp(key, scratch2);
867 __ SmiToInteger64(scratch3, key);
872 __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
879 __ SmiToInteger64(scratch3, scratch2);
887 static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
889 Register parameter_map,
897 Register backing_store = parameter_map;
898 __ movp(backing_store,
FieldOperand(parameter_map, kBackingStoreOffset));
899 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
902 __ cmpp(key, scratch);
904 __ SmiToInteger64(scratch, key);
919 Operand mapped_location =
920 GenerateMappedArgumentsLookup(
922 __ movp(
rax, mapped_location);
926 Operand unmapped_location =
927 GenerateUnmappedArgumentsLookup(masm,
rax,
rbx,
rcx, &slow);
928 __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
930 __ movp(
rax, unmapped_location);
945 Operand mapped_location = GenerateMappedArgumentsLookup(
947 __ movp(mapped_location,
rax);
948 __ leap(
r9, mapped_location);
959 Operand unmapped_location =
960 GenerateUnmappedArgumentsLookup(masm,
rcx,
rbx,
rdi, &slow);
961 __ movp(unmapped_location,
rax);
962 __ leap(
r9, unmapped_location);
985 masm->isolate()->stub_cache()->GenerateProbe(
1000 GenerateNameDictionaryReceiverCheck(masm,
rax,
rdx,
rbx, &miss);
1020 Counters* counters = masm->isolate()->counters();
1021 __ IncrementCounter(counters->load_miss(), 1);
1023 __ PopReturnAddressTo(
rbx);
1026 __ PushReturnAddressFrom(
rbx);
1029 ExternalReference ref =
1030 ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
1031 __ TailCallExternalReference(ref, 2, 1);
1042 __ PopReturnAddressTo(
rbx);
1045 __ PushReturnAddressFrom(
rbx);
1048 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
1059 Counters* counters = masm->isolate()->counters();
1060 __ IncrementCounter(counters->keyed_load_miss(), 1);
1062 __ PopReturnAddressTo(
rbx);
1065 __ PushReturnAddressFrom(
rbx);
1068 ExternalReference ref =
1069 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
1070 __ TailCallExternalReference(ref, 2, 1);
1081 __ PopReturnAddressTo(
rbx);
1084 __ PushReturnAddressFrom(
rbx);
1087 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1101 masm->isolate()->stub_cache()->GenerateProbe(
1117 __ PopReturnAddressTo(
rbx);
1121 __ PushReturnAddressFrom(
rbx);
1124 ExternalReference ref =
1125 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1126 __ TailCallExternalReference(ref, 3, 1);
1140 GenerateNameDictionaryReceiverCheck(masm,
rdx,
rbx,
rdi, &miss);
1142 GenerateDictionaryStore(masm, &miss,
rbx,
rcx,
rax,
r8,
r9);
1143 Counters* counters = masm->isolate()->counters();
1144 __ IncrementCounter(counters->store_normal_hit(), 1);
1148 __ IncrementCounter(counters->store_normal_miss(), 1);
1161 __ PopReturnAddressTo(
rbx);
1167 __ PushReturnAddressFrom(
rbx);
1170 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1183 __ PopReturnAddressTo(
rbx);
1189 __ PushReturnAddressFrom(
rbx);
1192 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1204 __ PopReturnAddressTo(
rbx);
1208 __ PushReturnAddressFrom(
rbx);
1211 ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
1212 __ TailCallExternalReference(ref, 3, 1);
1224 __ PopReturnAddressTo(
rbx);
1228 __ PushReturnAddressFrom(
rbx);
1231 ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1232 __ TailCallExternalReference(ref, 3, 1);
1244 __ PopReturnAddressTo(
rbx);
1248 __ PushReturnAddressFrom(
rbx);
1251 ExternalReference ref =
1252 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1253 __ TailCallExternalReference(ref, 3, 1);
1262 case Token::EQ_STRICT:
1280 bool CompareIC::HasInlinedSmiCode(
Address address) {
1282 Address test_instruction_address =
1293 Address test_instruction_address =
1303 Address delta_address = test_instruction_address + 1;
1306 int8_t delta = *
reinterpret_cast<int8_t*
>(delta_address);
1307 if (FLAG_trace_ic) {
1308 PrintF(
"[ patching ic at %p, test=%p, delta=%d\n",
1309 address, test_instruction_address, delta);
1315 Address jmp_address = test_instruction_address - delta;
1330 #endif // V8_TARGET_ARCH_X64
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kHashFieldOffset
static const int kBitFieldOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const byte kJccShortPrefix
static const int kElementsStartIndex
static const int kMapHashShift
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static Smi * FromInt(int value)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
static const int kHasNamedInterceptor
static const int kIsAccessCheckNeeded
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
static void GenerateMegamorphic(MacroAssembler *masm)
static const byte kTestAlByte
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const int kHasIndexedInterceptor
static const byte kJcShortOpcode
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const uint32_t kHoleNanUpper32
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static const byte kNopByte
Operand FieldOperand(Register object, int offset)
static const byte kJzShortOpcode
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
static const int kInObjectPropertiesOffset
STATIC_ASSERT(static_cast< int >(NOT_CONTEXTUAL)==0)
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
static const int kCallTargetAddressOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
static const int kIsObserved
static const byte kJncShortOpcode
const uint32_t kNotInternalizedTag
static const int kLengthOffset
static const int kSlowCaseBitFieldMask
KeyedStoreIncrementLength
const Register kScratchRegister
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Condition NegateCondition(Condition cond)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const byte kJnzShortOpcode
static const int kHashShift
static const int kCapacityMask
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
static const int kInstanceTypeOffset
static const int kEntriesPerBucket
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)