28 #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
29 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
40 void RelocInfo::apply(intptr_t delta) {
46 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
50 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
176 list_ |= other.
list();
182 if (other.
type() == type_) {
183 list_ &= ~other.
list();
209 list_ |= (1UL <<
code);
216 list_ &= ~(1UL <<
code);
273 initialize_handle(value);
278 template<
typename int_t>
282 return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
293 static const bool kIsIntType =
false;
295 return RelocInfo::NONE64;
298 return reinterpret_cast<int64_t
>(t);
306 static inline RelocInfo::Mode
rmode_for(ExternalReference t) {
307 return RelocInfo::EXTERNAL_REFERENCE;
310 return reinterpret_cast<int64_t
>(t.address());
335 shift_amount_(shift_amount),
336 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
347 shift_amount_(shift_amount),
348 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
350 ASSERT(shift_amount <= 4);
359 return reg_.
Is(NoReg);
384 ASSERT((shift_ ==
LSL) && (shift_amount_ <= 4));
415 return shift_amount_;
438 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
447 unsigned shift_amount)
448 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(
Offset),
449 shift_(
NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
462 unsigned shift_amount)
463 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(
Offset),
464 shift_(shift), extend_(
NO_EXTEND), shift_amount_(shift_amount) {
472 : base_(base), addrmode_(addrmode) {
482 regoffset_ = offset.
reg();
483 shift_= offset.
shift();
496 regoffset_ = offset.
reg();
497 extend_ = offset.
extend();
511 return (addrmode_ ==
Offset) && regoffset_.
Is(NoReg);
516 return (addrmode_ ==
Offset) && !regoffset_.
Is(NoReg);
562 ConstantPoolArray* constant_pool) {
607 Instruction* instr =
reinterpret_cast<Instruction*
>(
pc);
608 if (instr->IsMovz()) {
610 ASSERT(instr->following(1)->IsMovk());
611 ASSERT(instr->following(2)->IsMovk());
612 ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
616 ASSERT(instr->IsLdrLiteralX());
617 ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
630 ConstantPoolArray* constant_pool,
652 int RelocInfo::target_address_size() {
657 Address RelocInfo::target_address() {
658 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
663 Address RelocInfo::target_address_address() {
664 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
665 || rmode_ == EMBEDDED_OBJECT
666 || rmode_ == EXTERNAL_REFERENCE);
671 Address RelocInfo::constant_pool_entry_address() {
672 ASSERT(IsInConstantPool());
677 Object* RelocInfo::target_object() {
678 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
683 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
684 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
685 return Handle<Object>(
reinterpret_cast<Object**
>(
691 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
692 ASSERT(!target->IsConsString());
694 reinterpret_cast<Address>(target));
697 target->IsHeapObject()) {
698 host()->GetHeap()->incremental_marking()->RecordWrite(
704 Address RelocInfo::target_reference() {
705 ASSERT(rmode_ == EXTERNAL_REFERENCE);
710 Address RelocInfo::target_runtime_entry(Assembler* origin) {
711 ASSERT(IsRuntimeEntry(rmode_));
712 return target_address();
716 void RelocInfo::set_target_runtime_entry(
Address target,
718 ASSERT(IsRuntimeEntry(rmode_));
719 if (target_address() != target) set_target_address(target, mode);
723 Handle<Cell> RelocInfo::target_cell_handle() {
725 Cell *null_cell =
NULL;
726 return Handle<Cell>(null_cell);
730 Cell* RelocInfo::target_cell() {
731 ASSERT(rmode_ == RelocInfo::CELL);
745 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
747 return Handle<Object>();
751 Code* RelocInfo::code_age_stub() {
752 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
755 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
760 void RelocInfo::set_code_age_stub(Code* stub) {
761 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
765 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
770 Address RelocInfo::call_address() {
771 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
772 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
779 void RelocInfo::set_call_address(
Address target) {
780 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
781 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
783 if (host() !=
NULL) {
785 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
791 void RelocInfo::WipeOut() {
792 ASSERT(IsEmbeddedObject(rmode_) ||
793 IsCodeTarget(rmode_) ||
794 IsRuntimeEntry(rmode_) ||
795 IsExternalReference(rmode_));
800 bool RelocInfo::IsPatchedReturnSequence() {
805 Instruction* i1 =
reinterpret_cast<Instruction*
>(pc_);
806 Instruction* i2 = i1->following();
807 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
808 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
812 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
813 Instruction* current_instr =
reinterpret_cast<Instruction*
>(pc_);
818 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
819 RelocInfo::Mode mode = rmode();
820 if (mode == RelocInfo::EMBEDDED_OBJECT) {
821 visitor->VisitEmbeddedPointer(
this);
822 }
else if (RelocInfo::IsCodeTarget(mode)) {
823 visitor->VisitCodeTarget(
this);
824 }
else if (mode == RelocInfo::CELL) {
825 visitor->VisitCell(
this);
826 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
827 visitor->VisitExternalReference(
this);
828 #ifdef ENABLE_DEBUGGER_SUPPORT
829 }
else if (((RelocInfo::IsJSReturn(mode) &&
830 IsPatchedReturnSequence()) ||
831 (RelocInfo::IsDebugBreakSlot(mode) &&
832 IsPatchedDebugBreakSlotSequence())) &&
833 isolate->debug()->has_break_points()) {
834 visitor->VisitDebugTarget(
this);
836 }
else if (RelocInfo::IsRuntimeEntry(mode)) {
837 visitor->VisitRuntimeEntry(
this);
842 template<
typename StaticVisitor>
843 void RelocInfo::Visit(Heap* heap) {
844 RelocInfo::Mode mode = rmode();
845 if (mode == RelocInfo::EMBEDDED_OBJECT) {
846 StaticVisitor::VisitEmbeddedPointer(heap,
this);
847 }
else if (RelocInfo::IsCodeTarget(mode)) {
848 StaticVisitor::VisitCodeTarget(heap,
this);
849 }
else if (mode == RelocInfo::CELL) {
850 StaticVisitor::VisitCell(heap,
this);
851 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
852 StaticVisitor::VisitExternalReference(
this);
853 #ifdef ENABLE_DEBUGGER_SUPPORT
854 }
else if (heap->isolate()->debug()->has_break_points() &&
855 ((RelocInfo::IsJSReturn(mode) &&
856 IsPatchedReturnSequence()) ||
857 (RelocInfo::IsDebugBreakSlot(mode) &&
858 IsPatchedDebugBreakSlotSequence()))) {
859 StaticVisitor::VisitDebugTarget(heap,
this);
861 }
else if (RelocInfo::IsRuntimeEntry(mode)) {
862 StaticVisitor::VisitRuntimeEntry(
this);
870 return rt.
Is64Bits() ? LDR_x : LDR_w;
873 return rt.
Is64Bits() ? LDR_d : LDR_s;
883 return rt.
Is64Bits() ? LDP_x : LDP_w;
886 return rt.
Is64Bits() ? LDP_d : LDP_s;
894 return rt.
Is64Bits() ? STR_x : STR_w;
897 return rt.
Is64Bits() ? STR_d : STR_s;
907 return rt.
Is64Bits() ? STP_x : STP_w;
910 return rt.
Is64Bits() ? STP_d : STP_s;
941 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
942 ASSERT(kStartOfLabelLinkChain == 0);
943 int offset = LinkAndGetByteOffsetTo(label);
951 return 1 << FlagsUpdate_offset;
953 return 0 << FlagsUpdate_offset;
961 return cond << Condition_offset;
966 CHECK(is_int21(imm21));
967 Instr imm =
static_cast<Instr>(truncate_to_int21(imm21));
968 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
969 Instr immlo = imm << ImmPCRelLo_offset;
970 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
975 CHECK(is_int26(imm26));
976 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
981 CHECK(is_int19(imm19));
982 return truncate_to_int19(imm19) << ImmCondBranch_offset;
987 CHECK(is_int19(imm19));
988 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
993 CHECK(is_int14(imm14));
994 return truncate_to_int14(imm14) << ImmTestBranch_offset;
999 ASSERT(is_uint6(bit_pos));
1001 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
1002 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
1003 b5 &= ImmTestBranchBit5_mask;
1004 b40 &= ImmTestBranchBit40_mask;
1016 if (is_uint12(imm)) {
1017 return imm << ImmAddSub_offset;
1019 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
1028 return imms << ImmS_offset;
1037 return immr << ImmR_offset;
1046 return imms << ImmSetBits_offset;
1055 return immr << ImmRotate_offset;
1060 CHECK(is_int19(imm19));
1061 return truncate_to_int19(imm19) << ImmLLiteral_offset;
1069 return bitn << BitN_offset;
1075 return shift << ShiftDP_offset;
1080 ASSERT(is_uint6(amount));
1081 return amount << ImmDPShift_offset;
1086 return extend << ExtendMode_offset;
1092 return left_shift << ImmExtendShift_offset;
1098 return imm << ImmCondCmp_offset;
1103 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1108 ASSERT(is_uint12(imm12));
1109 return imm12 << ImmLSUnsigned_offset;
1115 return truncate_to_int9(imm9) << ImmLS_offset;
1120 ASSERT(((imm7 >> size) << size) == imm7);
1121 int scaled_imm7 = imm7 >>
size;
1122 ASSERT(is_int7(scaled_imm7));
1123 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1128 ASSERT(is_uint1(shift_amount));
1129 return shift_amount << ImmShiftLS_offset;
1134 ASSERT(is_uint16(imm16));
1135 return imm16 << ImmException_offset;
1140 ASSERT(is_uint15(imm15));
1141 return imm15 << ImmSystemRegister_offset;
1147 return imm7 << ImmHint_offset;
1153 return imm2 << ImmBarrierDomain_offset;
1159 return imm2 << ImmBarrierType_offset;
1164 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1165 return static_cast<LSDataSize>(op >> SizeLS_offset);
1171 return imm << ImmMoveWide_offset;
1177 return shift << ShiftMoveWide_offset;
1188 return scale << FPScale_offset;
1198 LoadRelocatedValue(rt, operand,
LDR_x_lit);
1202 inline void Assembler::CheckBuffer() {
1210 if (
pc_offset() >= next_constant_pool_check_) {
1229 #endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
static Instr ImmPCRelAddress(int imm21)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const unsigned kInstructionSizeLog2
static FPRegister Create(unsigned code, unsigned size)
static Instr ImmSystemRegister(int imm15)
static Object *& Object_at(Address addr)
void ClearRecordedAstId()
const Register & AppropriateZeroRegFor(const CPURegister ®) const
static Instr Cond(Condition cond)
void LoadRelocated(const CPURegister &rt, const Operand &operand)
static Instr FPScale(unsigned scale)
unsigned shift_amount() const
static const int kCallSizeWithoutRelocation
bool IsValidRegister() const
bool IsRegisterOffset() const
static Instr ImmCmpBranch(int imm19)
static const int kCallSizeWithRelocation
static Instr ImmTestBranch(int imm14)
bool IsExtendedRegister() const
RegisterType type() const
const unsigned kDRegSizeInBits
int next_veneer_pool_check_
bool IsLdrLiteralX() const
unsigned shift_amount() const
static TypeFeedbackId None()
static Instr ShiftDP(Shift shift)
bool IsShiftedRegister() const
const unsigned kZeroRegCode
static HeapObject * cast(Object *obj)
static RelocInfo::Mode rmode_for(Smi *t)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
static FPRegister SRegFromCode(unsigned code)
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
Operand OffsetAsOperand() const
kSerializedDataOffset Object
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
static Instr ImmLS(int imm9)
static Instr ImmBarrierType(int imm2)
bool IsFPRegister() const
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
static Instr Flags(FlagsUpdate S)
static RelocInfo::Mode rmode_for(ExternalReference t)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
static Operand UntagSmiAndScale(Register smi, int scale)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
ConstantPoolArray * constant_pool()
const unsigned kWRegSizeInBits
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
bool IsValidOrNone() const
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static Instr ImmUncondBranch(int imm26)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
static Instr ImmMoveWide(uint64_t imm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Instr ImmException(int imm16)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static Instr ImmTestBranchBit(unsigned bit_pos)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
Instruction * ImmPCOffsetTarget()
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static bool IsImmAddSub(int64_t immediate)
const Register & regoffset() const
static Address & Address_at(Address addr)
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
bool IsAligned(T value, U alignment)
static Instr ImmExtendShift(unsigned left_shift)
bool IsSameSizeAndType(const CPURegister &other) const
const unsigned kSRegSizeInBits
static Register WRegFromCode(unsigned code)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const bool kIsIntType
const unsigned kSPRegInternalCode
static Instr Rn(CPURegister rn)
static Code * GetCodeFromTargetAddress(Address address)
LoadStorePairNonTemporalOp
void CheckConstPool(bool force_emit, bool require_jump)
const unsigned kNumberOfFPRegisters
static Address target_pointer_address_at(Address pc)
void debug(const char *message, uint32_t code, Instr params=BREAK)
static int64_t immediate_for(ExternalReference t)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
#define T(name, string, precedence)
unsigned RegisterSizeInBits() const
const unsigned kNumberOfRegisters
void Combine(const CPURegList &other)
static Instr ImmLLiteral(int imm19)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static RelocInfo::Mode rmode_for(int_t)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static FPRegister DRegFromCode(unsigned code)
static Instr ImmBarrierDomain(int imm2)
static Address target_address_from_return_address(Address pc)
static Instr ImmLSPair(int imm7, LSDataSize size)
static Register XRegFromCode(unsigned code)
CPURegister::RegisterType type() const
static int64_t immediate_for(Smi *t)
static Instr ImmHint(int imm7)
void Remove(const CPURegList &other)
static Instr ImmR(unsigned immr, unsigned reg_size)
static Address return_address_from_call_start(Address pc)
static bool IsYoungSequence(byte *sequence)
static Instr ImmS(unsigned imms, unsigned reg_size)
static int64_t immediate_for(int_t t)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static Instr ImmDPShift(unsigned amount)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
bool Is(const CPURegister &other) const
bool IsValidFPRegister() const
static Instr BitN(unsigned bitn, unsigned reg_size)
static Operand UntagSmi(Register smi)
int64_t immediate() const
static Instr FPType(FPRegister fd)
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
Operand ToExtendedRegister() const
static Cell * FromValueAddress(Address value)
TypeFeedbackId recorded_ast_id_
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
bool IsImmediateOffset() const
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
unsigned SizeInBits() const