32 #if V8_TARGET_ARCH_MIPS
48 #define __ ACCESS_MASM(masm)
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
53 Label* global_object) {
64 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
79 __ JumpIfSmi(receiver, miss);
82 __ GetObjectType(receiver, scratch0, scratch1);
88 GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
94 __ Branch(miss,
ne, scratch1, Operand(zero_reg));
98 __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
99 __ Branch(miss,
ne, scratch1, Operand(scratch0));
118 static void GenerateDictionaryLoad(MacroAssembler* masm,
145 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
149 Operand(PropertyDetails::TypeField::kMask <<
kSmiTagSize));
150 __ Branch(miss,
ne, at, Operand(zero_reg));
171 static void GenerateDictionaryStore(MacroAssembler* masm,
198 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
199 const int kTypeAndReadOnlyMask =
200 (PropertyDetails::TypeField::kMask |
203 __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
204 __ Branch(miss,
ne, at, Operand(zero_reg));
207 const int kValueOffset = kElementsStartOffset +
kPointerSize;
212 __ mov(scratch1, value);
220 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
227 __ JumpIfSmi(receiver, slow);
234 __ Branch(slow,
ne, at, Operand(zero_reg));
247 static void GenerateFastArrayLoad(MacroAssembler* masm,
254 Label* not_fast_array,
255 Label* out_of_range) {
279 if (not_fast_array !=
NULL) {
282 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
283 __ Branch(not_fast_array,
ne, scratch1, Operand(at));
285 __ AssertFastElements(elements);
290 __ Branch(out_of_range,
hs, key, Operand(scratch1));
293 __ Addu(scratch1, elements,
298 __ addu(at, at, scratch1);
301 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
304 __ Branch(out_of_range,
eq, scratch2, Operand(at));
305 __ mov(result, scratch2);
311 static void GenerateKeyNameCheck(MacroAssembler* masm,
320 __ GetObjectType(key, map, hash);
328 __ Branch(index_string,
eq, at, Operand(zero_reg));
336 __ Branch(not_unique,
ne, at, Operand(zero_reg));
351 masm->isolate()->stub_cache()->GenerateProbe(
352 masm, flags, a0, a2, a3, t0, t1, t2);
367 GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
370 GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
385 Isolate*
isolate = masm->isolate();
387 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
393 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
394 __ TailCallExternalReference(ref, 2, 1);
408 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
412 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
418 Label* unmapped_case,
420 Heap* heap = masm->isolate()->heap();
425 __ JumpIfSmi(
object, slow_case);
427 __ GetObjectType(
object, scratch1, scratch2);
431 __ And(scratch1, key, Operand(0x80000001));
432 __ Branch(slow_case,
ne, scratch1, Operand(zero_reg));
435 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
437 __ CheckMap(scratch1,
452 __ li(scratch3, Operand(kPointerSize >> 1));
453 __ Mul(scratch3, key, scratch3);
454 __ Addu(scratch3, scratch3, Operand(kOffset));
456 __ Addu(scratch2, scratch1, scratch3);
458 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
459 __ Branch(unmapped_case,
eq, scratch2, Operand(scratch3));
465 __ li(scratch3, Operand(kPointerSize >> 1));
466 __ Mul(scratch3, scratch2, scratch3);
468 __ Addu(scratch2, scratch1, scratch3);
473 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
475 Register parameter_map,
483 Register backing_store = parameter_map;
485 __ CheckMap(backing_store,
487 Heap::kFixedArrayMapRootIndex,
492 __ li(scratch, Operand(kPointerSize >> 1));
493 __ Mul(scratch, key, scratch);
497 __ Addu(scratch, backing_store, scratch);
510 GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in, &slow);
512 __ lw(v0, mapped_location);
516 GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
517 __ lw(a2, unmapped_location);
518 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
519 __ Branch(&slow,
eq, a2, Operand(a3));
537 GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow);
538 __ sw(a0, mapped_location);
541 __ RecordWrite(a3, mapped_location.rm(), t5,
549 GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
550 __ sw(a0, unmapped_location);
552 ASSERT_EQ(unmapped_location.offset(), 0);
553 __ RecordWrite(a3, unmapped_location.rm(), t5,
568 Isolate* isolate = masm->isolate();
570 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
575 ExternalReference ref =
576 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
578 __ TailCallExternalReference(ref, 2, 1);
591 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
601 Label slow, check_name, index_smi, index_name, property_array_property;
602 Label probe_dictionary, check_number_dictionary;
605 Register receiver = a1;
607 Isolate* isolate = masm->isolate();
610 __ JumpIfNotSmi(key, &check_name);
615 GenerateKeyedLoadReceiverCheck(
619 __ CheckFastElements(a2, a3, &check_number_dictionary);
621 GenerateFastArrayLoad(
622 masm, receiver, key, t0, a3, a2, v0,
NULL, &slow);
624 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
627 __ bind(&check_number_dictionary);
635 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
636 __ Branch(&slow,
ne, a3, Operand(at));
638 __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
643 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
649 __ bind(&check_name);
650 GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
652 GenerateKeyedLoadReceiverCheck(
660 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
661 __ Branch(&probe_dictionary,
eq, t0, Operand(at));
671 __ And(a3, a3, Operand(mask));
675 Label load_in_object_property;
677 Label hit_on_nth_entry[kEntriesPerBucket];
678 ExternalReference cache_keys =
679 ExternalReference::keyed_lookup_cache_keys(isolate);
680 __ li(t0, Operand(cache_keys));
684 for (
int i = 0; i < kEntriesPerBucket - 1; i++) {
685 Label try_next_entry;
687 __ Branch(&try_next_entry,
ne, a2, Operand(t1));
689 __ Branch(&hit_on_nth_entry[i],
eq, a0, Operand(t1));
690 __ bind(&try_next_entry);
693 __ lw(t1,
MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
694 __ Branch(&slow,
ne, a2, Operand(t1));
695 __ lw(t1,
MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
696 __ Branch(&slow,
ne, a0, Operand(t1));
703 ExternalReference cache_field_offsets =
704 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
707 for (
int i = kEntriesPerBucket - 1; i >= 0; i--) {
708 __ bind(&hit_on_nth_entry[i]);
709 __ li(t0, Operand(cache_field_offsets));
715 __ Branch(&property_array_property,
ge, t1, Operand(zero_reg));
717 __ Branch(&load_in_object_property);
722 __ bind(&load_in_object_property);
725 __ Subu(a1, a1, Operand(kHeapObjectTag));
729 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
736 __ bind(&property_array_property);
742 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
751 __ bind(&probe_dictionary);
757 GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
759 GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
760 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
766 __ bind(&index_name);
767 __ IndexFromHash(a3, key);
769 __ Branch(&index_smi);
781 Register receiver = a1;
783 Register scratch = a3;
784 Register result = v0;
786 StringCharAtGenerator char_at_generator(receiver,
794 char_at_generator.GenerateFast(masm);
797 StubRuntimeCallHelper call_helper;
798 char_at_generator.GenerateSlow(masm, call_helper);
820 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
824 static void KeyedStoreGenerateGenericHelper(
825 MacroAssembler* masm,
834 Register receiver_map,
835 Register elements_map,
837 Label transition_smi_elements;
838 Label finish_object_store, non_double_value, transition_double_elements;
839 Label fast_double_without_map_check;
842 __ bind(fast_object);
843 Register scratch_value = t0;
844 Register address = t1;
847 __ Branch(fast_double,
ne, elements_map,
848 Operand(masm->isolate()->factory()->fixed_array_map()));
854 Label holecheck_passed1;
857 __ addu(address, address, at);
859 __ Branch(&holecheck_passed1,
ne, scratch_value,
860 Operand(masm->isolate()->factory()->the_hole_value()));
861 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
864 __ bind(&holecheck_passed1);
868 __ JumpIfNotSmi(value, &non_smi_value);
878 __ Addu(address, address, scratch_value);
882 __ bind(&non_smi_value);
884 __ CheckFastObjectElements(receiver_map, scratch_value,
885 &transition_smi_elements);
888 __ bind(&finish_object_store);
896 __ Addu(address, address, scratch_value);
899 __ mov(scratch_value, value);
900 __ RecordWrite(elements,
909 __ bind(fast_double);
913 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
914 __ Branch(slow,
ne, elements_map, Operand(at));
920 __ Addu(address, elements,
924 __ addu(address, address, at);
926 __ Branch(&fast_double_without_map_check,
ne, scratch_value,
928 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
931 __ bind(&fast_double_without_map_check);
932 __ StoreNumberToDoubleElements(value,
938 &transition_double_elements);
946 __ bind(&transition_smi_elements);
949 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
950 __ Branch(&non_double_value,
ne, t0, Operand(at));
959 ASSERT(receiver_map.is(a3));
964 __ jmp(&fast_double_without_map_check);
966 __ bind(&non_double_value);
973 ASSERT(receiver_map.is(a3));
978 __ jmp(&finish_object_store);
980 __ bind(&transition_double_elements);
989 ASSERT(receiver_map.is(a3));
993 __ jmp(&finish_object_store);
1005 Label slow, fast_object, fast_object_grow;
1006 Label fast_double, fast_double_grow;
1007 Label array, extra, check_if_double_array;
1010 Register value = a0;
1012 Register receiver = a2;
1013 Register receiver_map = a3;
1014 Register elements_map = t2;
1015 Register elements = t3;
1019 __ JumpIfNotSmi(key, &slow);
1021 __ JumpIfSmi(receiver, &slow);
1029 __ Branch(&slow,
ne, t0, Operand(zero_reg));
1040 __ Branch(&fast_object,
lo, key, Operand(t0));
1056 __ Branch(&slow,
ne, key, Operand(t0));
1060 __ Branch(&slow,
hs, key, Operand(t0));
1063 &check_if_double_array,
ne, elements_map, Heap::kFixedArrayMapRootIndex);
1065 __ jmp(&fast_object_grow);
1067 __ bind(&check_if_double_array);
1068 __ Branch(&slow,
ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
1069 __ jmp(&fast_double_grow);
1079 __ Branch(&extra,
hs, key, Operand(t0));
1081 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1083 value, key, receiver, receiver_map,
1084 elements_map, elements);
1085 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1087 value, key, receiver, receiver_map,
1088 elements_map, elements);
1101 __ JumpIfSmi(a1, &slow);
1105 __ Branch(&slow,
ne, t0, Operand(zero_reg));
1119 __ TailCallExternalReference(ExternalReference(
1120 IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
1136 __ Push(a2, a1, a0);
1138 ExternalReference ref =
1139 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1140 __ TailCallExternalReference(ref, 3, 1);
1153 __ Push(a1, a2, a0);
1157 ExternalReference ref =
1158 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1159 __ TailCallExternalReference(ref, 3, 1);
1173 __ Push(a2, a1, a0);
1177 ExternalReference ref =
1178 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1180 __ TailCallExternalReference(ref, 3, 1);
1194 masm->isolate()->stub_cache()->GenerateProbe(
1195 masm, flags, a1, a2, a3, t0, t1, t2);
1210 __ Push(a1, a2, a0);
1212 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
1214 __ TailCallExternalReference(ref, 3, 1);
1227 GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
1229 GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
1230 Counters* counters = masm->isolate()->counters();
1231 __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
1235 __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
1249 __ Push(a1, a2, a0);
1256 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1265 case Token::EQ_STRICT:
1283 bool CompareIC::HasInlinedSmiCode(
Address address) {
1285 Address andi_instruction_address =
1297 Address andi_instruction_address =
1318 if (FLAG_trace_ic) {
1319 PrintF(
"[ patching ic at %p, andi=%p, delta=%d\n",
1320 address, andi_instruction_address, delta);
1326 Instr branch_instr =
1336 CodePatcher patcher(patch_address, 2);
1345 patcher.masm()->andi(at, reg, 0);
1349 patcher.ChangeBranchCondition(
ne);
1352 patcher.ChangeBranchCondition(
eq);
1359 #endif // V8_TARGET_ARCH_MIPS
static void GenerateSloppyArguments(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static bool IsBranch(Instr instr)
static const int kHashFieldOffset
static const int kBitFieldOffset
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
const intptr_t kSmiTagMask
const intptr_t kSmiSignMask
static uint32_t GetRt(Instr instr)
static const int kElementsStartIndex
static const int kMapHashShift
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
static Smi * FromInt(int value)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static uint32_t GetImmediate16(Instr instr)
static void GenerateMiss(MacroAssembler *masm)
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
const uint32_t kIsNotInternalizedMask
static const int kHasNamedInterceptor
static const int kIsAccessCheckNeeded
static uint32_t GetRs(Instr instr)
static void GenerateMegamorphic(MacroAssembler *masm)
#define ASSERT(condition)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
Isolate * isolate() const
static void GenerateMegamorphic(MacroAssembler *masm)
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const int kHasIndexedInterceptor
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Condition ComputeCondition(Token::Value op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const uint32_t kHoleNanUpper32
void check(i::Vector< const uint8_t > string)
static void GenerateGeneric(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
const uint32_t kHoleNanLower32
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static const int kPropertiesOffset
static Register from_code(int code)
static const int kInObjectPropertiesOffset
static void GenerateSlow(MacroAssembler *masm)
static const int kElementsOffset
static const int kCallTargetAddressOffset
const uint32_t kInternalizedTag
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static const int kLengthOffset
static void GenerateSloppyArguments(MacroAssembler *masm)
static const int kHeaderSize
static void GenerateSlow(MacroAssembler *masm)
static const int kMapOffset
static const int kIsObserved
static const int kLengthOffset
static const int kSlowCaseBitFieldMask
KeyedStoreIncrementLength
MemOperand FieldMemOperand(Register object, int offset)
static void GenerateString(MacroAssembler *masm)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
#define ASSERT_EQ(v1, v2)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static void GenerateNormal(MacroAssembler *masm)
static bool IsBne(Instr instr)
static void GenerateIndexedInterceptor(MacroAssembler *masm)
static bool IsBeq(Instr instr)
static const int kHashShift
static const int kCapacityMask
static void GenerateMiss(MacroAssembler *masm)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kHashMask
static const int kInstanceTypeOffset
static const int kEntriesPerBucket
static bool IsAndImmediate(Instr instr)
static void GenerateNormal(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)