28 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
29 #define V8_ARM64_ASSEMBLER_ARM64_H_
48 #define REGISTER_CODE_LIST(R) \
49 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
50 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
51 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
52 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
79 unsigned code()
const;
213 const char*
const names[] = {
214 "x0",
"x1",
"x2",
"x3",
"x4",
215 "x5",
"x6",
"x7",
"x8",
"x9",
216 "x10",
"x11",
"x12",
"x13",
"x14",
217 "x15",
"x18",
"x19",
"x20",
"x21",
218 "x22",
"x23",
"x24",
"x27",
320 const char*
const names[] = {
321 "d0",
"d1",
"d2",
"d3",
"d4",
"d5",
"d6",
"d7",
322 "d8",
"d9",
"d10",
"d11",
"d12",
"d13",
"d14",
323 "d16",
"d17",
"d18",
"d19",
"d20",
"d21",
"d22",
"d23",
324 "d24",
"d25",
"d26",
"d27",
"d28",
"d29"
350 #if defined(ARM64_DEFINE_REG_STATICS)
351 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
352 const CPURegister init_##register_class##_##name = {code, size, type}; \
353 const register_class& name = *reinterpret_cast<const register_class*>( \
354 &init_##register_class##_##name)
355 #define ALIAS_REGISTER(register_class, alias, name) \
356 const register_class& alias = *reinterpret_cast<const register_class*>( \
357 &init_##register_class##_##name)
359 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
360 extern const register_class& name
361 #define ALIAS_REGISTER(register_class, alias, name) \
362 extern const register_class& alias
363 #endif // defined(ARM64_DEFINE_REG_STATICS)
375 #define DEFINE_REGISTERS(N) \
376 INITIALIZE_REGISTER(Register, w##N, N, \
377 kWRegSizeInBits, CPURegister::kRegister); \
378 INITIALIZE_REGISTER(Register, x##N, N, \
379 kXRegSizeInBits, CPURegister::kRegister);
381 #undef DEFINE_REGISTERS
388 #define DEFINE_FPREGISTERS(N) \
389 INITIALIZE_REGISTER(FPRegister, s##N, N, \
390 kSRegSizeInBits, CPURegister::kFPRegister); \
391 INITIALIZE_REGISTER(FPRegister, d##N, N, \
392 kDRegSizeInBits, CPURegister::kFPRegister);
394 #undef DEFINE_FPREGISTERS
396 #undef INITIALIZE_REGISTER
427 #undef ALIAS_REGISTER
431 Register reg2 = NoReg,
432 Register reg3 = NoReg,
433 Register reg4 = NoReg);
439 const CPURegister& reg2,
440 const CPURegister& reg3 = NoReg,
441 const CPURegister& reg4 = NoReg,
442 const CPURegister& reg5 = NoReg,
443 const CPURegister& reg6 = NoReg,
444 const CPURegister& reg7 = NoReg,
445 const CPURegister& reg8 = NoReg);
452 const CPURegister& reg2,
453 const CPURegister& reg3 = NoCPUReg,
454 const CPURegister& reg4 = NoCPUReg,
455 const CPURegister& reg5 = NoCPUReg,
456 const CPURegister& reg6 = NoCPUReg,
457 const CPURegister& reg7 = NoCPUReg,
458 const CPURegister& reg8 = NoCPUReg);
472 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
473 size_(reg1.SizeInBits()), type_(reg1.
type()) {
479 : list_(list), size_(size), type_(type) {
484 unsigned first_reg,
unsigned last_reg)
485 : size_(size), type_(type) {
490 ASSERT(last_reg >= first_reg);
491 list_ = (1UL << (last_reg + 1)) - 1;
492 list_ &= ~((1UL << first_reg) - 1);
562 if (!other1.
IsNone() && (other1.
type() == type_)) list |= other1.
Bit();
563 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
564 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
565 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
566 return (list_ & list) != 0;
590 bool IsValid()
const {
591 const RegList kValidRegisters = 0x8000000ffffffff;
592 const RegList kValidFPRegisters = 0x0000000ffffffff;
595 return (list_ & kValidRegisters) == list_;
597 return (list_ & kValidFPRegisters) == list_;
609 #define kCalleeSaved CPURegList::GetCalleeSaved()
610 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
614 #define kCallerSaved CPURegList::GetCallerSaved()
615 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
652 template<
typename int_t>
658 inline bool IsZero()
const;
671 RelocInfo::Mode
rmode()
const {
return rmode_; }
685 unsigned shift_amount_;
686 RelocInfo::Mode rmode_;
710 ptrdiff_t
offset()
const {
return offset_; }
731 unsigned shift_amount_;
738 class Assembler :
public AssemblerBase {
758 num_pending_reloc_info_ = 0;
788 void bind(Label* label);
794 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
851 ASSERT(label->is_bound());
916 return veneer_pool_blocked_nesting_ > 0;
974 void b(Label* label);
986 void bl(Label* label);
998 void tbz(
const Register& rt,
unsigned bit_pos, Label* label);
999 void tbz(
const Register& rt,
unsigned bit_pos,
int imm14);
1002 void tbnz(
const Register& rt,
unsigned bit_pos, Label* label);
1003 void tbnz(
const Register& rt,
unsigned bit_pos,
int imm14);
1159 bfm(rd, rn, lsb, lsb + width - 1);
1186 sbfm(rd, rn, lsb, lsb + width - 1);
1196 sbfm(rd, rn, 0, 15);
1201 sbfm(rd, rn, 0, 31);
1208 ASSERT(shift < reg_size);
1209 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1235 ubfm(rd, rn, lsb, lsb + width - 1);
1245 ubfm(rd, rn, 0, 15);
1250 ubfm(rd, rn, 0, 31);
1300 extr(rd, rs, rs, shift);
1449 MoveWide(rd, imm, shift,
MOVK);
1454 MoveWide(rd, imm, shift,
MOVN);
1459 MoveWide(rd, imm, shift,
MOVZ);
1652 void dc8(uint8_t data) { EmitData(&data,
sizeof(data)); }
1655 void dc32(uint32_t data) { EmitData(&data,
sizeof(data)); }
1658 void dc64(uint64_t data) { EmitData(&data,
sizeof(data)); }
1664 size_t len = strlen(
string) + 1;
1666 EmitData(
string, len);
1668 const char pad[] = {
'\0',
'\0',
'\0',
'\0'};
1671 EmitData(&pad, next_pc -
pc_);
1694 return rd.
code() << Rd_offset;
1699 return rn.
code() << Rn_offset;
1704 return rm.
code() << Rm_offset;
1709 return ra.
code() << Ra_offset;
1714 return rt.
code() << Rt_offset;
1719 return rt2.
code() << Rt2_offset;
1751 inline static Instr ImmS(
unsigned imms,
unsigned reg_size);
1752 inline static Instr ImmR(
unsigned immr,
unsigned reg_size);
1756 inline static Instr BitN(
unsigned bitn,
unsigned reg_size);
1830 void EmitVeneers(
bool force_emit,
bool need_protection,
1905 unsigned left_shift);
1944 void DataProcShiftedRegister(
const Register& rd,
1949 void DataProcExtendedRegister(
const Register& rd,
1958 void LoadStorePairNonTemporal(
const CPURegister& rt,
1967 void ConditionalSelect(
const Register& rd,
1972 void DataProcessing1Source(
const Register& rd,
1975 void DataProcessing3Source(
const Register& rd,
1980 void FPDataProcessing1Source(
const FPRegister& fd,
1983 void FPDataProcessing2Source(
const FPRegister& fd,
1987 void FPDataProcessing3Source(
const FPRegister& fd,
1996 int LinkAndGetByteOffsetTo(Label* label);
2000 inline int LinkAndGetInstructionOffsetTo(Label* label);
2002 static const int kStartOfLabelLinkChain = 0;
2005 void CheckLabelLinkChain(Label
const * label);
2007 void RecordLiteral(int64_t imm,
unsigned size);
2014 void Emit(
Instr instruction) {
2019 memcpy(
pc_, &instruction,
sizeof(instruction));
2020 pc_ +=
sizeof(instruction);
2025 void EmitData(
void const * data,
unsigned size) {
2031 memcpy(
pc_, data, size);
2040 int next_constant_pool_check_;
2056 static const int kCheckConstPoolIntervalInst = 128;
2057 static const int kCheckConstPoolInterval =
2063 static const int kMaxDistToConstPool = 4 *
KB;
2064 static const int kMaxNumPendingRelocInfo =
2073 static const int kAvgDistToConstPool =
2074 kMaxDistToConstPool - kCheckConstPoolInterval;
2077 int const_pool_blocked_nesting_;
2078 int no_const_pool_before_;
2082 int first_const_pool_use_;
2085 int veneer_pool_blocked_nesting_;
2089 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2090 RelocInfoWriter reloc_info_writer;
2100 RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
2102 int num_pending_reloc_info_;
2126 static const int kGap = 128;
2178 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2198 reinterpret_cast<
byte*>(start),
2219 CPU::FlushICache(
buffer_, length);
2227 assembler->CheckBuffer();
2233 #endif // V8_ARM64_ASSEMBLER_ARM64_H_
void RecordVeneerPool(int location_offset, int size)
static Instr ImmPCRelAddress(int imm21)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void cbnz(const Register &rt, Label *label)
void sbfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void lsl(const Register &rd, const Register &rn, unsigned shift)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
const Register & base() const
static FPRegister Create(unsigned code, unsigned size)
static Instr ImmSystemRegister(int imm15)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void ClearRecordedAstId()
static const int kDebugBreakSlotInstructions
const Register & AppropriateZeroRegFor(const CPURegister ®) const
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
static const unsigned kAllocatableContext
void uxtb(const Register &rd, const Register &rn)
static Instr Cond(Condition cond)
static const unsigned kAllocatableHighRangeBegin
void LoadRelocated(const CPURegister &rt, const Operand &operand)
static Instr FPScale(unsigned scale)
void ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
Register(const CPURegister &r)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
unsigned shift_amount() const
void sxtw(const Register &rd, const Register &rn)
void set_rmode(RelocInfo::Mode rmode)
static const int kCallSizeWithoutRelocation
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool IsValidRegister() const
void sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
void mrs(Register dst, SRegister s, Condition cond=al)
static const int kAllocatableRangeGapSize
static const RegList kAllocatableFPRegisters
void sbfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
bool IsRegisterOffset() const
static Instr ImmCmpBranch(int imm19)
static const int kCallSizeWithRelocation
static Instr ImmTestBranch(int imm14)
static bool IsImmLSUnscaled(ptrdiff_t offset)
bool IsExtendedRegister() const
RegisterType type() const
const unsigned kDRegSizeInBits
void negs(const Register &rd, const Operand &operand)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
int next_veneer_pool_check_
static const int kMaxVeneerCodeSize
unsigned shift_amount() const
static Register FromAllocationIndex(unsigned index)
static Instr ShiftDP(Shift shift)
bool IsShiftedRegister() const
void bfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static int NumAllocatableRegisters()
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
static const int kPatchDebugBreakSlotReturnOffset
int SizeOfCodeGeneratedSince(Label *label)
void rev16(const Register &rd, const Register &rn)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static const int kMaxNumAllocatableRegisters
static LSDataSize CalcLSDataSize(LoadStoreOp op)
void adcs(const Register &rd, const Register &rn, const Operand &operand)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static FPRegister SRegFromCode(unsigned code)
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
Operand OffsetAsOperand() const
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
void csetm(const Register &rd, Condition cond)
static const unsigned kAllocatableHighRangeEnd
void bics(const Register &rd, const Register &rn, const Operand &operand)
static int NumAllocatableRegisters()
void b(int branch_offset, Condition cond=al)
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
static Instr ImmLS(int imm9)
void cmn(Register src1, const Operand &src2, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
static Instr ImmBarrierType(int imm2)
void clz(Register dst, Register src, Condition cond=al)
static bool IsConstantPoolAt(Instruction *instr)
bool IsFPRegister() const
bool is(const CPURegister &other) const
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
void fcvtns(const Register &rd, const FPRegister &fn)
void orn(const Register &rd, const Register &rn, const Operand &operand)
void frintz(const FPRegister &fd, const FPRegister &fn)
static const int kMaxNumRegisters
void bfi(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void RecordConstPool(int size)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr RdSP(Register rd)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void rbit(const Register &rd, const Register &rn)
const LowDwVfpRegister d15
TypeFeedbackId RecordedAstId()
FPDataProcessing2SourceOp
#define ASSERT(condition)
static const int kPatchReturnSequenceAddressOffset
friend class BlockConstPoolScope
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
void SetRecordedAstId(TypeFeedbackId ast_id)
CPURegister PopHighestIndex()
void ands(const Register &rd, const Register &rn, const Operand &operand)
void frintn(const FPRegister &fd, const FPRegister &fn)
unsigned RegisterSizeInBytes() const
static Instr Flags(FlagsUpdate S)
void eon(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmFP32(float imm)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void nop(NopMarkerTypes n)
bool ShouldEmitVeneer(int max_reachable_pc, int margin=kVeneerDistanceMargin)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void fneg(const FPRegister &fd, const FPRegister &fn)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cinv(const Register &rd, const Register &rn, Condition cond)
void RemoveBranchFromLabelLinkChain(Instruction *branch, Label *label, Instruction *label_veneer=NULL)
#define DEFINE_REGISTERS(N)
static Operand UntagSmiAndScale(Register smi, int scale)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
CPURegister PopLowestIndex()
void strb(Register src, const MemOperand &dst, Condition cond=al)
PatchingAssembler(Instruction *start, unsigned count)
static Instr Rt2(CPURegister rt2)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void udiv(const Register &rd, const Register &rn, const Register &rm)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void asr(const Register &rd, const Register &rn, unsigned shift)
static const int kNumRegisters
void rev32(const Register &rd, const Register &rn)
EnsureSpace(Assembler *assembler)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
bool IsAllocatable() const
const uint64_t kSmiShiftMask
int CountSetBits(uint64_t value, int width)
uint64_t SizeOfCodeGeneratedSince(const Label *label)
static const char * AllocationIndexToString(int index)
const unsigned kWRegSizeInBits
void ngcs(const Register &rd, const Operand &operand)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
void ret(const Register &xn=lr)
bool IsValidOrNone() const
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
bool ShouldEmitVeneers(int margin=kVeneerDistanceMargin)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr ImmUncondBranch(int imm26)
static bool IsImmFP64(double imm)
static const unsigned kAllocatableHighRangeEnd
void cneg(const Register &rd, const Register &rn, Condition cond)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
static const int kMaxNumAllocatableRegisters
void sxtb(const Register &rd, const Register &rn)
void neg(const Register &rd, const Operand &operand)
static Instr ImmMoveWide(uint64_t imm)
int InstructionsGeneratedSince(const Label *label)
static Instr ImmException(int imm16)
void FPConvertToInt(const Register &rd, const FPRegister &fn, FPIntegerConvertOp op)
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
uint64_t SizeOfGeneratedCode() const
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void uxth(const Register &rd, const Register &rn)
static Instr ImmTestBranchBit(unsigned bit_pos)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void cbz(const Register &rt, Label *label)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
bool IsAllocatable() const
void sbcs(const Register &rd, const Register &rn, const Operand &operand)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void ubfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void smulh(const Register &rd, const Register &rn, const Register &rm)
static const char * AllocationIndexToString(int index)
void br(const Register &xn)
void EmitVeneers(bool force_emit, bool need_protection, int margin=kVeneerDistanceMargin)
static const int kSpecialTargetSize
AddrMode addrmode() const
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void rev(const Register &rd, const Register &rn)
void GetCode(CodeDesc *desc)
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
ALIAS_REGISTER(Register, ip0, x16)
static bool IsImmAddSub(int64_t immediate)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
PatchingAssembler(byte *start, unsigned count)
static const int kVeneerDistanceMargin
const Register & regoffset() const
const unsigned kInstructionSize
static bool IsImmConditionalCompare(int64_t immediate)
BlockPoolsScope(Assembler *assem)
void fmov(FPRegister fd, double imm)
static FPRegister FromAllocationIndex(unsigned int index)
static Instr ImmExtendShift(unsigned left_shift)
FPRegister(const CPURegister &r)
bool IsSameSizeAndType(const CPURegister &other) const
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
Instruction * InstructionAt(int offset) const
static Register WRegFromCode(unsigned code)
bool is_veneer_pool_blocked() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void StartBlockConstPool()
FPRegister(const FPRegister &r)
void mneg(const Register &rd, const Register &rn, const Register &rm)
static const int kJSRetSequenceInstructions
static Instr Rd(CPURegister rd)
void blr(const Register &xn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
const unsigned kSPRegInternalCode
void ror(const Register &rd, const Register &rs, unsigned shift)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Register from_code(int code)
static Instr Rn(CPURegister rn)
T RoundUp(T x, intptr_t m)
#define REGISTER_CODE_LIST(R)
static const int kVeneerNoProtectionFactor
void adds(const Register &rd, const Register &rn, const Operand &operand)
void ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void StartBlockVeneerPool()
MaybeObject * AllocateConstantPool(Heap *heap)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
LoadStorePairNonTemporalOp
void fcvtau(const Register &rd, const FPRegister &fn)
void CheckConstPool(bool force_emit, bool require_jump)
T AlignUp(T pointer, size_t alignment)
const unsigned kNumberOfFPRegisters
void movz(const Register &rd, uint64_t imm, int shift=-1)
static const unsigned kAllocatableHighRangeBegin
static Address target_pointer_address_at(Address pc)
void ldrsw(const Register &rt, const MemOperand &src)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void fcvtnu(const Register &rd, const FPRegister &fn)
void sxth(const Register &rd, const Register &rn)
void fcvtzu(const Register &rd, const FPRegister &fn)
void ngc(const Register &rd, const Operand &operand)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cinc(const Register &rd, const Register &rn, Condition cond)
bool IncludesAliasOf(const CPURegister &other1, const CPURegister &other2=NoCPUReg, const CPURegister &other3=NoCPUReg, const CPURegister &other4=NoCPUReg) const
FarBranchInfo(int offset, Label *label)
static const int kDebugBreakSlotLength
void set_list(RegList new_list)
static Instr Ra(CPURegister ra)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
#define T(name, string, precedence)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
FPDataProcessing1SourceOp
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void RecordDebugBreakSlot()
bool NeedsRelocation() const
void ldr(Register dst, const MemOperand &src, Condition cond=al)
unsigned RegisterSizeInBits() const
static int ToAllocationIndex(FPRegister reg)
const unsigned kNumberOfRegisters
static int NumAllocatableRegisters()
const unsigned kRegCodeMask
void Combine(const CPURegList &other)
void ConstantPoolMarker(uint32_t size)
static bool IsImmFP32(float imm)
static Instr Rt(CPURegister rt)
void cset(const Register &rd, Condition cond)
static Instr ImmLLiteral(int imm19)
unsigned num_pending_reloc_info() const
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
int unresolved_branches_first_limit() const
Handle< T > handle(T *t, Isolate *isolate)
static Instr Rm(CPURegister rm)
static const unsigned kAllocatableLowRangeEnd
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
void dmb(BarrierDomain domain, BarrierType type)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
CPURegList(CPURegister::RegisterType type, unsigned size, unsigned first_reg, unsigned last_reg)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void hint(SystemHint code)
static FPRegister DRegFromCode(unsigned code)
void fsqrt(const FPRegister &fd, const FPRegister &fn)
void fcvtas(const Register &rd, const FPRegister &fn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
static Instr ImmBarrierDomain(int imm2)
static Address target_address_from_return_address(Address pc)
bool is_const_pool_blocked() const
friend class PositionsRecorder
static const unsigned kAllocatableLowRangeBegin
void EmitStringData(const char *string)
static Instr ImmLSPair(int imm7, LSDataSize size)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Register XRegFromCode(unsigned code)
std::multimap< int, FarBranchInfo > unresolved_branches_
void dsb(BarrierDomain domain, BarrierType type)
CPURegister::RegisterType type() const
static Instr ImmHint(int imm7)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
FPDataProcessing3SourceOp
BlockConstPoolScope(Assembler *assem)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Remove(const CPURegList &other)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void fcvtmu(const Register &rd, const FPRegister &fn)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static int NumRegisters()
static Instr ImmR(unsigned immr, unsigned reg_size)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Address return_address_from_call_start(Address pc)
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
void cls(const Register &rd, const Register &rn)
PositionsRecorder * positions_recorder()
void movn(const Register &rd, uint64_t imm, int shift=-1)
static Instr ImmS(unsigned imms, unsigned reg_size)
#define DEFINE_FPREGISTERS(N)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static const unsigned kAllocatableLowRangeBegin
void AssertSizeOfCodeGeneratedSince(const Label *label, ptrdiff_t size)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
static Instr ImmDPShift(unsigned amount)
void ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr RnSP(Register rn)
virtual void AbortedCodeGeneration()
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
static Instr ImmFP64(double imm)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
static int ToAllocationIndex(Register reg)
bool Is(const CPURegister &other) const
Register(const Register &r)
void rorv(const Register &rd, const Register &rn, const Register &rm)
static const unsigned kAllocatableLowRangeEnd
void fcvt(const FPRegister &fd, const FPRegister &fn)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
void uxtw(const Register &rd, const Register &rn)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void adr(const Register &rd, Label *label)
bool IsValidFPRegister() const
static Instr BitN(unsigned bitn, unsigned reg_size)
static Operand UntagSmi(Register smi)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
int64_t immediate() const
static const int kAllocatableRangeGapSize
static Instr FPType(FPRegister fd)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
static CPURegList GetSafepointSavedRegisters()
void frinta(const FPRegister &fd, const FPRegister &fn)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
static const int kPatchDebugBreakSlotAddressOffset
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
Operand ToExtendedRegister() const
void fcvtms(const Register &rd, const FPRegister &fn)
TypeFeedbackId recorded_ast_id_
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static int ConstantPoolSizeAt(Instruction *instr)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void tst(Register src1, const Operand &src2, Condition cond=al)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
RelocInfo::Mode rmode() const
static FPRegister from_code(int code)
void subs(const Register &rd, const Register &rn, const Operand &operand)
bool IsImmediateOffset() const
INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister)
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
unsigned SizeInBits() const
void EndBlockVeneerPool()
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)