30 #if V8_TARGET_ARCH_IA32
44 #define __ ACCESS_MASM(masm)
47 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
49 Label* global_object) {
63 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
75 __ JumpIfSmi(receiver, miss);
86 GenerateGlobalInstanceTypeCheck(masm, r0, miss);
95 __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
107 static void GenerateDictionaryLoad(MacroAssembler* masm,
143 const int kElementsStartOffset =
146 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
148 Immediate(PropertyDetails::TypeField::kMask <<
kSmiTagSize));
152 const int kValueOffset = kElementsStartOffset +
kPointerSize;
164 static void GenerateDictionaryStore(MacroAssembler* masm,
198 const int kElementsStartOffset =
201 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
202 const int kTypeAndReadOnlyMask =
203 (PropertyDetails::TypeField::kMask |
206 Immediate(kTypeAndReadOnlyMask));
210 const int kValueOffset = kElementsStartOffset +
kPointerSize;
212 __ mov(Operand(r0, 0), value);
222 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
233 __ JumpIfSmi(receiver, slow);
255 static void GenerateFastArrayLoad(MacroAssembler* masm,
260 Label* not_fast_array,
261 Label* out_of_range) {
271 if (not_fast_array !=
NULL) {
274 masm->isolate()->factory()->fixed_array_map(),
278 __ AssertFastElements(scratch);
286 __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
290 if (!result.is(scratch)) {
291 __ mov(result, scratch);
298 static void GenerateKeyNameCheck(MacroAssembler* masm,
331 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
336 Label* unmapped_case,
338 Heap* heap = masm->isolate()->heap();
339 Factory* factory = masm->isolate()->factory();
344 __ JumpIfSmi(
object, slow_case);
350 __ test(key, Immediate(0x80000001));
354 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
362 __ cmp(key, scratch2);
371 __ cmp(scratch2, factory->the_hole_value());
386 static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
388 Register parameter_map,
394 Register backing_store = parameter_map;
395 __ mov(backing_store,
FieldOperand(parameter_map, kBackingStoreOffset));
396 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
399 __ cmp(key, scratch);
414 Label slow, check_name, index_smi, index_name, property_array_property;
415 Label probe_dictionary, check_number_dictionary;
418 __ JumpIfNotSmi(
ecx, &check_name);
423 GenerateKeyedLoadReceiverCheck(
427 __ CheckFastElements(
eax, &check_number_dictionary);
430 Isolate*
isolate = masm->isolate();
431 Counters* counters = isolate->
counters();
432 __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
435 __ bind(&check_number_dictionary);
446 isolate->factory()->hash_table_map(),
449 Label slow_pop_receiver;
458 __ bind(&slow_pop_receiver);
466 __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
469 __ bind(&check_name);
470 GenerateKeyNameCheck(masm,
ecx,
eax,
ebx, &index_name, &slow);
472 GenerateKeyedLoadReceiverCheck(
479 Immediate(isolate->factory()->hash_table_map()));
480 __ j(
equal, &probe_dictionary);
484 if (FLAG_debug_code) {
486 __ Check(
equal, kMapIsNoLongerInEax);
497 Label load_in_object_property;
499 Label hit_on_nth_entry[kEntriesPerBucket];
500 ExternalReference cache_keys =
501 ExternalReference::keyed_lookup_cache_keys(masm->isolate());
503 for (
int i = 0; i < kEntriesPerBucket - 1; i++) {
504 Label try_next_entry;
508 __ add(
edi, Immediate(kPointerSize * i * 2));
512 __ add(
edi, Immediate(kPointerSize));
514 __ j(
equal, &hit_on_nth_entry[i]);
515 __ bind(&try_next_entry);
520 __ add(
edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
523 __ add(
edi, Immediate(kPointerSize));
532 ExternalReference cache_field_offsets =
533 ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
536 for (
int i = kEntriesPerBucket - 1; i >= 0; i--) {
537 __ bind(&hit_on_nth_entry[i]);
539 __ add(
eax, Immediate(i));
547 __ jmp(&load_in_object_property);
552 __ bind(&load_in_object_property);
556 __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
560 __ bind(&property_array_property);
564 __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
569 __ bind(&probe_dictionary);
573 GenerateGlobalInstanceTypeCheck(masm,
eax, &slow);
576 __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
579 __ bind(&index_name);
594 Register receiver =
edx;
595 Register index =
ecx;
596 Register scratch =
ebx;
597 Register result =
eax;
599 StringCharAtGenerator char_at_generator(receiver,
607 char_at_generator.GenerateFast(masm);
610 StubRuntimeCallHelper call_helper;
611 char_at_generator.GenerateSlow(masm, call_helper);
627 __ JumpIfSmi(
edx, &slow);
650 ExternalReference ref =
651 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
653 __ TailCallExternalReference(ref, 2, 1);
667 Factory* factory = masm->isolate()->factory();
668 Operand mapped_location =
669 GenerateMappedArgumentsLookup(masm,
edx,
ecx,
ebx,
eax, ¬in, &slow);
670 __ mov(
eax, mapped_location);
674 Operand unmapped_location =
675 GenerateUnmappedArgumentsLookup(masm,
ecx,
ebx,
eax, &slow);
676 __ cmp(unmapped_location, factory->the_hole_value());
678 __ mov(
eax, unmapped_location);
693 Operand mapped_location =
694 GenerateMappedArgumentsLookup(masm,
edx,
ecx,
ebx,
edi, ¬in, &slow);
695 __ mov(mapped_location,
eax);
696 __ lea(
ecx, mapped_location);
702 Operand unmapped_location =
703 GenerateUnmappedArgumentsLookup(masm,
ecx,
ebx,
edi, &slow);
704 __ mov(unmapped_location,
eax);
705 __ lea(
edi, unmapped_location);
714 static void KeyedStoreGenerateGenericHelper(
715 MacroAssembler* masm,
721 Label transition_smi_elements;
722 Label finish_object_store, non_double_value, transition_double_elements;
723 Label fast_double_without_map_check;
730 __ bind(fast_object);
733 __ cmp(
edi, masm->isolate()->factory()->fixed_array_map());
740 Label holecheck_passed1;
742 masm->isolate()->factory()->the_hole_value());
744 __ JumpIfDictionaryInPrototypeChain(
edx,
ebx,
edi, slow);
747 __ bind(&holecheck_passed1);
751 __ JumpIfNotSmi(
eax, &non_smi_value);
761 __ bind(&non_smi_value);
764 __ CheckFastObjectElements(
edi, &transition_smi_elements);
767 __ bind(&finish_object_store);
780 __ bind(fast_double);
784 __ cmp(
edi, masm->isolate()->factory()->fixed_double_array_map());
796 __ JumpIfDictionaryInPrototypeChain(
edx,
ebx,
edi, slow);
799 __ bind(&fast_double_without_map_check);
801 &transition_double_elements,
false);
809 __ bind(&transition_smi_elements);
814 masm->isolate()->factory()->heap_number_map(),
829 __ jmp(&fast_double_without_map_check);
831 __ bind(&non_double_value);
842 __ jmp(&finish_object_store);
844 __ bind(&transition_double_elements);
857 __ jmp(&finish_object_store);
869 Label slow, fast_object, fast_object_grow;
870 Label fast_double, fast_double_grow;
871 Label array, extra, check_if_double_array;
874 __ JumpIfSmi(
edx, &slow);
883 __ JumpIfNotSmi(
ecx, &slow);
919 __ cmp(
edi, masm->isolate()->factory()->fixed_array_map());
921 __ jmp(&fast_object_grow);
923 __ bind(&check_if_double_array);
924 __ cmp(
edi, masm->isolate()->factory()->fixed_double_array_map());
926 __ jmp(&fast_double_grow);
943 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
945 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
959 masm->isolate()->stub_cache()->GenerateProbe(
975 GenerateNameDictionaryReceiverCheck(masm,
edx,
eax,
ebx, &miss);
995 __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
1003 ExternalReference ref =
1004 ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
1005 __ TailCallExternalReference(ref, 2, 1);
1022 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
1033 __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
1041 ExternalReference ref =
1042 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
1043 __ TailCallExternalReference(ref, 2, 1);
1060 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1072 masm->isolate()->stub_cache()->GenerateProbe(
1095 ExternalReference ref =
1096 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1097 __ TailCallExternalReference(ref, 3, 1);
1109 Label miss, restore_miss;
1111 GenerateNameDictionaryReceiverCheck(masm,
edx,
ebx,
edi, &miss);
1117 GenerateDictionaryStore(masm, &restore_miss,
ebx,
ecx,
eax,
edx,
edi);
1119 Counters* counters = masm->isolate()->counters();
1120 __ IncrementCounter(counters->store_normal_hit(), 1);
1123 __ bind(&restore_miss);
1127 __ IncrementCounter(counters->store_normal_miss(), 1);
1149 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1171 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1190 ExternalReference ref =
1191 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1192 __ TailCallExternalReference(ref, 3, 1);
1211 ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
1212 __ TailCallExternalReference(ref, 3, 1);
1231 ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1232 __ TailCallExternalReference(ref, 3, 1);
1241 case Token::EQ_STRICT:
1259 bool CompareIC::HasInlinedSmiCode(
Address address) {
1261 Address test_instruction_address =
1272 Address test_instruction_address =
1282 Address delta_address = test_instruction_address + 1;
1285 int8_t delta = *
reinterpret_cast<int8_t*
>(delta_address);
1286 if (FLAG_trace_ic) {
1287 PrintF(
"[ patching ic at %p, test=%p, delta=%d\n",
1288 address, test_instruction_address, delta);
1294 Address jmp_address = test_instruction_address - delta;
1309 #endif // V8_TARGET_ARCH_IA32
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kHashFieldOffset
static const int kBitFieldOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
const intptr_t kSmiSignMask
static const byte kJccShortPrefix
static const int kElementsStartIndex
static const int kMapHashShift
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static Smi * FromInt(int value)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
static const int kHasNamedInterceptor
static const int kIsAccessCheckNeeded
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
Isolate * isolate() const
static void GenerateMegamorphic(MacroAssembler *masm)
static const byte kTestAlByte
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const int kHasIndexedInterceptor
static const byte kJcShortOpcode
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const uint32_t kHoleNanUpper32
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static const byte kNopByte
Operand FieldOperand(Register object, int offset)
static const byte kJzShortOpcode
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
static const int kInObjectPropertiesOffset
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
Operand FixedArrayElementOperand(Register array, Register index_as_smi, int additional_offset=0)
static const int kCallTargetAddressOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
static const int kIsObserved
static const byte kJncShortOpcode
const uint32_t kNotInternalizedTag
static const int kLengthOffset
static const int kSlowCaseBitFieldMask
KeyedStoreIncrementLength
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static const byte kJnzShortOpcode
static const int kHashShift
static const int kCapacityMask
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
static const int kInstanceTypeOffset
static const int kEntriesPerBucket
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)