28 #ifndef V8_MIPS_CODE_STUBS_ARM_H_
29 #define V8_MIPS_CODE_STUBS_ARM_H_
41 class StoreBufferOverflowStub:
public PlatformCodeStub {
44 : save_doubles_(save_fp) {}
54 Major MajorKey() {
return StoreBufferOverflow; }
55 int MinorKey() {
return (save_doubles_ ==
kSaveFPRegs) ? 1 : 0; }
59 class StringHelper :
public AllStatic {
90 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
94 class SubStringStub:
public PlatformCodeStub {
99 Major MajorKey() {
return SubString; }
100 int MinorKey() {
return 0; }
102 void Generate(MacroAssembler* masm);
105 class StoreRegistersStateStub:
public PlatformCodeStub {
108 : save_doubles_(with_fp) {}
112 Major MajorKey() {
return StoreRegistersState; }
113 int MinorKey() {
return (save_doubles_ ==
kSaveFPRegs) ? 1 : 0; }
116 void Generate(MacroAssembler* masm);
119 class RestoreRegistersStateStub:
public PlatformCodeStub {
122 : save_doubles_(with_fp) {}
126 Major MajorKey() {
return RestoreRegistersState; }
127 int MinorKey() {
return (save_doubles_ ==
kSaveFPRegs) ? 1 : 0; }
130 void Generate(MacroAssembler* masm);
133 class StringCompareStub:
public PlatformCodeStub {
156 virtual Major MajorKey() {
return StringCompare; }
157 virtual int MinorKey() {
return 0; }
158 virtual void Generate(MacroAssembler* masm);
160 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
167 Label* chars_not_equal);
174 class WriteInt32ToHeapNumberStub :
public PlatformCodeStub {
181 the_heap_number_(the_heap_number),
184 ASSERT(IntRegisterBits::is_valid(the_int_.
code()));
185 ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.
code()));
186 ASSERT(ScratchRegisterBits::is_valid(scratch_.
code()));
187 ASSERT(SignRegisterBits::is_valid(sign_.
code()));
199 class IntRegisterBits:
public BitField<int, 0, 4> {};
200 class HeapNumberRegisterBits:
public BitField<int, 4, 4> {};
201 class ScratchRegisterBits:
public BitField<int, 8, 4> {};
202 class SignRegisterBits:
public BitField<int, 12, 4> {};
204 Major MajorKey() {
return WriteInt32ToHeapNumber; }
207 return IntRegisterBits::encode(the_int_.
code())
208 | HeapNumberRegisterBits::encode(the_heap_number_.
code())
209 | ScratchRegisterBits::encode(scratch_.
code())
210 | SignRegisterBits::encode(sign_.
code());
213 void Generate(MacroAssembler* masm);
217 class RecordWriteStub:
public PlatformCodeStub {
227 remembered_set_action_(remembered_set_action),
228 save_fp_regs_mode_(fp_mode),
304 class RegisterAllocation {
311 scratch0_(scratch0) {
316 void Save(MacroAssembler* masm) {
320 masm->push(scratch1_);
323 void Restore(MacroAssembler* masm) {
324 masm->pop(scratch1_);
337 inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
345 inline Register object() {
return object_; }
346 inline Register address() {
return address_; }
347 inline Register scratch0() {
return scratch0_; }
348 inline Register scratch1() {
return scratch1_; }
359 enum OnNoNeedToInformIncrementalMarker {
360 kReturnOnNoNeedToInformIncrementalMarker,
361 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
364 void Generate(MacroAssembler* masm);
365 void GenerateIncremental(MacroAssembler* masm,
Mode mode);
366 void CheckNeedsToInformIncrementalMarker(
367 MacroAssembler* masm,
368 OnNoNeedToInformIncrementalMarker on_no_need,
370 void InformIncrementalMarker(MacroAssembler* masm);
372 Major MajorKey() {
return RecordWrite; }
375 return ObjectBits::encode(object_.
code()) |
376 ValueBits::encode(value_.
code()) |
377 AddressBits::encode(address_.
code()) |
378 RememberedSetActionBits::encode(remembered_set_action_) |
379 SaveFPRegsModeBits::encode(save_fp_regs_mode_);
382 void Activate(Code*
code) {
383 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
386 class ObjectBits:
public BitField<int, 0, 5> {};
387 class ValueBits:
public BitField<int, 5, 5> {};
388 class AddressBits:
public BitField<int, 10, 5> {};
389 class RememberedSetActionBits:
public BitField<RememberedSetAction, 15, 1> {};
390 class SaveFPRegsModeBits:
public BitField<SaveFPRegsMode, 16, 1> {};
398 RegisterAllocation regs_;
407 class DirectCEntryStub:
public PlatformCodeStub {
414 Major MajorKey() {
return DirectCEntry; }
415 int MinorKey() {
return 0; }
417 bool NeedsImmovableCode() {
return true; }
421 class NameDictionaryLookupStub:
public PlatformCodeStub {
448 static const int kInlinedProbes = 4;
449 static const int kTotalProbes = 20;
451 static const int kCapacityOffset =
455 static const int kElementsStartOffset =
459 Major MajorKey() {
return NameDictionaryLookup; }
462 return LookupModeBits::encode(mode_);
465 class LookupModeBits:
public BitField<LookupMode, 0, 1> {};
473 #endif // V8_MIPS_CODE_STUBS_ARM_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
static const int kElementsStartIndex
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
virtual bool SometimesSetsUpAFrame()
void instr_at_put(int pos, Instr instr)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
#define ASSERT(condition)
void Generate(MacroAssembler *masm)
const RegList kJSCallerSaved
static void GenerateAheadOfTime(Isolate *isolate)
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
RestoreRegistersStateStub(SaveFPRegsMode with_fp)
static void GenerateAheadOfTime(Isolate *isolate)
void GenerateCall(MacroAssembler *masm, Register target)
const RegList kCallerSavedFPU
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, int flags)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
byte * instruction_start()
StoreRegistersStateStub(SaveFPRegsMode with_fp)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch, Register scratch2)
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
void Generate(MacroAssembler *masm)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
virtual bool SometimesSetsUpAFrame()
static const int kCapacityIndex
static void PatchNopIntoBranch(MacroAssembler *masm, int pos)
static bool IsBne(Instr instr)
static bool IsBeq(Instr instr)
static const int kInstrSize
virtual bool SometimesSetsUpAFrame()
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
NameDictionaryLookupStub(LookupMode mode)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
void Generate(MacroAssembler *masm)