28 #ifndef V8_IA32_CODE_STUBS_IA32_H_
29 #define V8_IA32_CODE_STUBS_IA32_H_
40 Label* call_generic_code);
43 class StoreBufferOverflowStub:
public PlatformCodeStub {
46 : save_doubles_(save_fp) {
58 Major MajorKey() {
return StoreBufferOverflow; }
59 int MinorKey() {
return (save_doubles_ ==
kSaveFPRegs) ? 1 : 0; }
63 class StringHelper :
public AllStatic {
89 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
93 class SubStringStub:
public PlatformCodeStub {
98 Major MajorKey() {
return SubString; }
99 int MinorKey() {
return 0; }
101 void Generate(MacroAssembler* masm);
105 class StringCompareStub:
public PlatformCodeStub {
126 virtual Major MajorKey() {
return StringCompare; }
127 virtual int MinorKey() {
return 0; }
128 virtual void Generate(MacroAssembler* masm);
130 static void GenerateAsciiCharsCompareLoop(
131 MacroAssembler* masm,
136 Label* chars_not_equal,
137 Label::Distance chars_not_equal_near = Label::kFar);
141 class NameDictionaryLookupStub:
public PlatformCodeStub {
149 : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
171 static const int kInlinedProbes = 4;
172 static const int kTotalProbes = 20;
174 static const int kCapacityOffset =
178 static const int kElementsStartOffset =
182 Major MajorKey() {
return NameDictionaryLookup; }
185 return DictionaryBits::encode(dictionary_.
code()) |
186 ResultBits::encode(result_.
code()) |
187 IndexBits::encode(index_.
code()) |
188 LookupModeBits::encode(mode_);
191 class DictionaryBits:
public BitField<int, 0, 3> {};
192 class ResultBits:
public BitField<int, 3, 3> {};
193 class IndexBits:
public BitField<int, 6, 3> {};
194 class LookupModeBits:
public BitField<LookupMode, 9, 1> {};
196 Register dictionary_;
203 class RecordWriteStub:
public PlatformCodeStub {
213 remembered_set_action_(remembered_set_action),
214 save_fp_regs_mode_(fp_mode),
280 class RegisterAllocation {
285 : object_orig_(object),
286 address_orig_(address),
287 scratch0_orig_(scratch0),
290 scratch0_(scratch0) {
292 scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
293 if (scratch0.
is(
ecx)) {
294 scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
296 if (
object.is(
ecx)) {
297 object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
299 if (address.
is(
ecx)) {
300 address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
305 void Save(MacroAssembler* masm) {
306 ASSERT(!address_orig_.is(object_));
307 ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
314 if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
315 if (!
ecx.
is(scratch0_orig_) &&
316 !
ecx.
is(object_orig_) &&
317 !
ecx.
is(address_orig_)) {
320 masm->push(scratch1_);
321 if (!address_.is(address_orig_)) {
322 masm->push(address_);
323 masm->mov(address_, address_orig_);
325 if (!object_.is(object_orig_)) {
327 masm->mov(object_, object_orig_);
331 void Restore(MacroAssembler* masm) {
335 if (!object_.is(object_orig_)) {
336 masm->mov(object_orig_, object_);
339 if (!address_.is(address_orig_)) {
340 masm->mov(address_orig_, address_);
343 masm->pop(scratch1_);
344 if (!
ecx.
is(scratch0_orig_) &&
345 !
ecx.
is(object_orig_) &&
346 !
ecx.
is(address_orig_)) {
349 if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
357 if (!scratch0_.is(
eax) && !scratch1_.is(
eax)) masm->push(
eax);
358 if (!scratch0_.is(
edx) && !scratch1_.is(
edx)) masm->push(
edx);
360 CpuFeatureScope scope(masm,
SSE2);
371 inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
374 CpuFeatureScope scope(masm,
SSE2);
383 if (!scratch0_.is(
edx) && !scratch1_.is(
edx)) masm->pop(
edx);
384 if (!scratch0_.is(
eax) && !scratch1_.is(
eax)) masm->pop(
eax);
387 inline Register object() {
return object_; }
388 inline Register address() {
return address_; }
389 inline Register scratch0() {
return scratch0_; }
390 inline Register scratch1() {
return scratch1_; }
393 Register object_orig_;
394 Register address_orig_;
395 Register scratch0_orig_;
402 Register GetRegThatIsNotEcxOr(Register
r1,
407 if (candidate.is(
ecx))
continue;
408 if (candidate.is(r1))
continue;
409 if (candidate.is(r2))
continue;
410 if (candidate.is(r3))
continue;
419 enum OnNoNeedToInformIncrementalMarker {
420 kReturnOnNoNeedToInformIncrementalMarker,
421 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
424 void Generate(MacroAssembler* masm);
425 void GenerateIncremental(MacroAssembler* masm,
Mode mode);
426 void CheckNeedsToInformIncrementalMarker(
427 MacroAssembler* masm,
428 OnNoNeedToInformIncrementalMarker on_no_need,
430 void InformIncrementalMarker(MacroAssembler* masm);
432 Major MajorKey() {
return RecordWrite; }
435 return ObjectBits::encode(object_.
code()) |
436 ValueBits::encode(value_.
code()) |
437 AddressBits::encode(address_.
code()) |
438 RememberedSetActionBits::encode(remembered_set_action_) |
439 SaveFPRegsModeBits::encode(save_fp_regs_mode_);
442 void Activate(Code*
code) {
443 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
446 class ObjectBits:
public BitField<int, 0, 3> {};
447 class ValueBits:
public BitField<int, 3, 3> {};
448 class AddressBits:
public BitField<int, 6, 3> {};
449 class RememberedSetActionBits:
public BitField<RememberedSetAction, 9, 1> {};
450 class SaveFPRegsModeBits:
public BitField<SaveFPRegsMode, 10, 1> {};
457 RegisterAllocation regs_;
463 #endif // V8_IA32_CODE_STUBS_IA32_H_
static Mode GetMode(Code *stub)
static void Patch(Code *stub, Mode mode)
static const int kElementsStartIndex
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
virtual bool SometimesSetsUpAFrame()
static const byte kTwoByteNopInstruction
static int NumAllocatableRegisters()
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
#define ASSERT(condition)
void Generate(MacroAssembler *masm)
static const int kNumRegisters
static bool IsSafeForSnapshot(CpuFeature f)
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
byte * instruction_start()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
static const byte kTwoByteJumpInstruction
static const byte kFiveByteNopInstruction
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
NameDictionaryLookupStub(Register dictionary, Register result, Register index, LookupMode mode)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
static const int kHeaderSize
bool is(Register reg) const
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
virtual bool SometimesSetsUpAFrame()
static const int kCapacityIndex
static XMMRegister from_code(int code)
static const byte kFiveByteJumpInstruction
virtual bool SometimesSetsUpAFrame()
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
void Generate(MacroAssembler *masm)