39 #if V8_TARGET_ARCH_ARM
49 bool CpuFeatures::initialized_ =
false;
51 unsigned CpuFeatures::supported_ = 0;
52 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
53 unsigned CpuFeatures::cross_compile_ = 0;
54 unsigned CpuFeatures::cache_line_size_ = 64;
57 ExternalReference ExternalReference::cpu_features() {
58 ASSERT(CpuFeatures::initialized_);
59 return ExternalReference(&CpuFeatures::supported_);
67 static unsigned CpuFeaturesImpliedByCompiler() {
69 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
70 if (FLAG_enable_armv7) {
71 answer |= 1u <<
ARMv7;
73 #endif // CAN_USE_ARMV7_INSTRUCTIONS
74 #ifdef CAN_USE_VFP3_INSTRUCTIONS
75 if (FLAG_enable_vfp3) {
78 #endif // CAN_USE_VFP3_INSTRUCTIONS
79 #ifdef CAN_USE_VFP32DREGS
80 if (FLAG_enable_32dregs) {
83 #endif // CAN_USE_VFP32DREGS
84 if ((answer & (1u <<
ARMv7)) && FLAG_enable_unaligned_accesses) {
104 uint64_t standard_features =
static_cast<unsigned>(
106 ASSERT(supported_ == 0 || supported_ == standard_features);
114 supported_ |= standard_features;
126 if (FLAG_enable_vfp3) {
128 static_cast<uint64_t
>(1) <<
VFP3 |
129 static_cast<uint64_t>(1) <<
ARMv7;
131 if (FLAG_enable_neon) {
132 supported_ |= 1u <<
NEON;
135 if (FLAG_enable_armv7) {
136 supported_ |=
static_cast<uint64_t
>(1) <<
ARMv7;
139 if (FLAG_enable_sudiv) {
140 supported_ |=
static_cast<uint64_t
>(1) <<
SUDIV;
143 if (FLAG_enable_movw_movt) {
147 if (FLAG_enable_32dregs) {
148 supported_ |=
static_cast<uint64_t
>(1) <<
VFP32DREGS;
151 if (FLAG_enable_unaligned_accesses) {
162 found_by_runtime_probing_only_ |=
163 static_cast<uint64_t
>(1) <<
VFP3 |
164 static_cast<uint64_t>(1) <<
ARMv7;
168 found_by_runtime_probing_only_ |= 1u <<
NEON;
172 found_by_runtime_probing_only_ |=
static_cast<uint64_t
>(1) <<
ARMv7;
176 found_by_runtime_probing_only_ |=
static_cast<uint64_t
>(1) <<
SUDIV;
180 && cpu.architecture() >= 7) {
181 found_by_runtime_probing_only_ |=
186 if (cpu.implementer() == CPU::QUALCOMM &&
187 cpu.architecture() >= 7 &&
188 FLAG_enable_movw_movt) {
189 found_by_runtime_probing_only_ |=
194 if (cpu.implementer() == CPU::ARM &&
195 (cpu.part() == CPU::ARM_CORTEX_A5 ||
196 cpu.part() == CPU::ARM_CORTEX_A9)) {
197 cache_line_size_ = 32;
201 found_by_runtime_probing_only_ |=
static_cast<uint64_t
>(1) <<
VFP32DREGS;
204 supported_ |= found_by_runtime_probing_only_;
213 const char* arm_arch =
NULL;
214 const char* arm_test =
"";
215 const char* arm_fpu =
"";
216 const char* arm_thumb =
"";
217 const char* arm_float_abi =
NULL;
219 #if defined CAN_USE_ARMV7_INSTRUCTIONS
230 # if defined __ARM_NEON__
232 # elif defined CAN_USE_VFP3_INSTRUCTIONS
237 # if (defined __thumb__) || (defined __thumb2__)
238 arm_thumb =
" thumb";
244 arm_test =
" simulator";
245 # if defined CAN_USE_VFP3_INSTRUCTIONS
246 # if defined CAN_USE_VFP32DREGS
249 arm_fpu =
" vfp3-d16";
254 # if USE_EABI_HARDFLOAT == 1
255 arm_float_abi =
"hard";
257 arm_float_abi =
"softfp";
262 printf(
"target%s %s%s%s %s\n",
263 arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
269 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
270 "MOVW_MOVT_IMMEDIATE_LOADS=%d",
280 #elif USE_EABI_HARDFLOAT
281 bool eabi_hardfloat =
true;
283 bool eabi_hardfloat =
false;
285 printf(
" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
292 const int RelocInfo::kApplyMask = 0;
295 bool RelocInfo::IsCodedSpecially() {
300 return FLAG_enable_ool_constant_pool;
304 bool RelocInfo::IsInConstantPool() {
305 if (FLAG_enable_ool_constant_pool) {
313 void RelocInfo::PatchCode(
byte* instructions,
int instruction_count) {
316 Instr* instr =
reinterpret_cast<Instr*
>(instructions);
317 for (
int i = 0; i < instruction_count; i++) {
318 *(pc + i) = *(instr + i);
328 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
343 if (obj->IsHeapObject()) {
345 imm32_ =
reinterpret_cast<intptr_t
>(handle.location());
346 rmode_ = RelocInfo::EMBEDDED_OBJECT;
349 imm32_ =
reinterpret_cast<intptr_t
>(
obj);
350 rmode_ = RelocInfo::NONE32;
356 ASSERT(is_uint5(shift_imm));
360 shift_op_ = shift_op;
361 shift_imm_ = shift_imm & 31;
363 if ((shift_op ==
ROR) && (shift_imm == 0)) {
367 }
else if (shift_op ==
RRX) {
380 shift_op_ = shift_op;
404 ASSERT(is_uint5(shift_imm));
407 shift_op_ = shift_op;
408 shift_imm_ = shift_imm & 31;
413 NeonMemOperand::NeonMemOperand(Register rn,
AddrMode am,
int align) {
421 NeonMemOperand::NeonMemOperand(Register rn, Register rm,
int align) {
428 void NeonMemOperand::SetAlignment(
int align) {
450 NeonListOperand::NeonListOperand(
DoubleRegister base,
int registers_count) {
452 switch (registers_count) {
537 : AssemblerBase(isolate, buffer, buffer_size),
538 recorded_ast_id_(TypeFeedbackId::
None()),
539 constant_pool_builder_(),
540 positions_recorder_(this) {
542 num_pending_32_bit_reloc_info_ = 0;
543 num_pending_64_bit_reloc_info_ = 0;
544 next_buffer_check_ = 0;
545 const_pool_blocked_nesting_ = 0;
546 no_const_pool_before_ = 0;
547 first_const_pool_32_use_ = -1;
548 first_const_pool_64_use_ = -1;
550 constant_pool_available_ = !FLAG_enable_ool_constant_pool;
551 constant_pool_full_ =
false;
557 ASSERT(const_pool_blocked_nesting_ == 0);
562 if (!FLAG_enable_ool_constant_pool) {
565 ASSERT(num_pending_32_bit_reloc_info_ == 0);
566 ASSERT(num_pending_64_bit_reloc_info_ == 0);
615 return (instr & (15 *
B24 | 3 *
B20 | 15 *
B8)) == (13 *
B24 |
B20 | 11 *
B8);
623 return positive ? offset : -offset;
632 return positive ? offset : -offset;
639 if (!positive) offset = -offset;
640 ASSERT(is_uint12(offset));
642 instr = (instr & ~
B23) | (positive ?
B23 : 0);
650 ASSERT((offset & ~3) == offset);
651 bool positive = offset >= 0;
652 if (!positive) offset = -offset;
653 ASSERT(is_uint10(offset));
655 instr = (instr & ~B23) | (positive ? B23 : 0);
657 return (instr & ~
kOff8Mask) | (offset >> 2);
668 bool positive = offset >= 0;
669 if (!positive) offset = -offset;
670 ASSERT(is_uint12(offset));
672 instr = (instr & ~B23) | (positive ? B23 : 0);
686 ASSERT(is_uint12(offset));
767 return (instr & kVldrDPpMask) == kVldrDPpPattern;
821 if (is_uint24(instr)) {
828 ((instr &
B24) != 0)) {
838 if (is_uint24(instr)) {
839 ASSERT(target_pos == pos || target_pos >= 0);
858 ASSERT(is_uint24(target24));
859 if (is_uint8(target24)) {
865 patcher.masm()->mov(dst, Operand(target24));
868 uint16_t target16_1 = target24 >> 16;
871 if (target16_1 == 0) {
875 patcher.masm()->movw(dst, target16_0);
880 patcher.masm()->movw(dst, target16_0);
881 patcher.masm()->movt(dst, target16_1);
885 uint8_t target8_0 = target16_0 &
kImm8Mask;
886 uint8_t target8_1 = target16_0 >> 8;
887 uint8_t target8_2 = target16_1 &
kImm8Mask;
888 if (target8_2 == 0) {
892 patcher.masm()->mov(dst, Operand(target8_0));
893 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
898 patcher.masm()->mov(dst, Operand(target8_0));
899 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
900 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
916 int imm24 = imm26 >> 2;
922 void Assembler::print(Label*
L) {
923 if (L->is_unused()) {
925 }
else if (L->is_bound()) {
926 PrintF(
"bound label to %d\n", L->pos());
927 }
else if (L->is_linked()) {
930 while (l.is_linked()) {
944 if ((instr &
B24) != 0)
950 case eq: c =
"eq";
break;
951 case ne: c =
"ne";
break;
952 case hs: c =
"hs";
break;
953 case lo: c =
"lo";
break;
954 case mi: c =
"mi";
break;
955 case pl: c =
"pl";
break;
956 case vs: c =
"vs";
break;
957 case vc: c =
"vc";
break;
958 case hi: c =
"hi";
break;
959 case ls: c =
"ls";
break;
960 case ge: c =
"ge";
break;
961 case lt: c =
"lt";
break;
962 case gt: c =
"gt";
break;
963 case le: c =
"le";
break;
964 case al: c =
"";
break;
975 PrintF(
"label in inconsistent state (pos = %d)\n", L->pos_);
980 void Assembler::bind_to(Label* L,
int pos) {
982 while (L->is_linked()) {
983 int fixup_pos = L->pos();
991 if (pos > last_bound_pos_)
992 last_bound_pos_ = pos;
1002 void Assembler::next(Label* L) {
1005 if (link == L->pos()) {
1020 static bool fits_shifter(uint32_t imm32,
1021 uint32_t* rotate_imm,
1025 for (
int rot = 0; rot < 16; rot++) {
1026 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
1027 if ((imm8 <= 0xff)) {
1035 if (instr !=
NULL) {
1037 if (fits_shifter(~imm32, rotate_imm, immed_8,
NULL)) {
1042 if (imm32 < 0x10000) {
1044 *instr |= EncodeMovwImmediate(imm32);
1045 *rotate_imm = *immed_8 = 0;
1051 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8,
NULL)) {
1057 if (alu_insn ==
ADD ||
1059 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8,
NULL)) {
1063 }
else if (alu_insn ==
AND ||
1065 if (fits_shifter(~imm32, rotate_imm, immed_8,
NULL)) {
1080 bool Operand::must_output_reloc_info(
const Assembler* assembler)
const {
1081 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1087 if (assembler !=
NULL && assembler->predictable_code_size())
return true;
1089 }
else if (RelocInfo::IsNone(rmode_)) {
1096 static bool use_mov_immediate_load(
const Operand& x,
1097 const Assembler* assembler) {
1098 if (assembler !=
NULL && !assembler->can_use_constant_pool()) {
1104 (assembler ==
NULL || !assembler->predictable_code_size())) {
1107 }
else if (x.must_output_reloc_info(assembler)) {
1117 bool Operand::is_single_instruction(
const Assembler* assembler,
1118 Instr instr)
const {
1119 if (rm_.is_valid())
return true;
1120 uint32_t dummy1, dummy2;
1121 if (must_output_reloc_info(assembler) ||
1122 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1127 return !use_mov_immediate_load(*
this, assembler);
1143 void Assembler::move_32_bit_immediate(Register rd,
1147 if (x.must_output_reloc_info(
this)) {
1148 RecordRelocInfo(rinfo);
1151 if (use_mov_immediate_load(x,
this)) {
1152 Register target = rd.code() == pc.code() ?
ip : rd;
1155 if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(
this)) {
1159 emit(cond | 0x30*
B20 | target.code()*
B12 |
1160 EncodeMovwImmediate(x.imm32_ & 0xffff));
1161 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1162 if (target.code() != rd.code()) {
1167 ConstantPoolAddEntry(rinfo);
1168 ldr(rd,
MemOperand(FLAG_enable_ool_constant_pool ?
pp : pc, 0), cond);
1173 void Assembler::addrmod1(
Instr instr,
1179 if (!x.rm_.is_valid()) {
1181 uint32_t rotate_imm;
1183 if (x.must_output_reloc_info(
this) ||
1184 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1192 move_32_bit_immediate(rd, x, cond);
1195 addrmod1(instr, rn, rd, Operand(
ip));
1199 instr |=
I | rotate_imm*
B8 | immed_8;
1200 }
else if (!x.rs_.is_valid()) {
1202 instr |= x.shift_imm_*
B7 | x.shift_op_ | x.rm_.code();
1205 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1206 instr |= x.rs_.code()*
B8 | x.shift_op_ |
B4 | x.rm_.code();
1208 emit(instr | rn.code()*
B16 | rd.code()*
B12);
1209 if (rn.is(pc) || x.rm_.is(pc)) {
1216 void Assembler::addrmod2(
Instr instr, Register rd,
const MemOperand& x) {
1219 if (!x.rm_.is_valid()) {
1221 int offset_12 = x.offset_;
1222 if (offset_12 < 0) {
1223 offset_12 = -offset_12;
1226 if (!is_uint12(offset_12)) {
1229 ASSERT(!x.rn_.is(
ip) && ((instr &
L) == L || !rd.is(
ip)));
1241 instr |=
B25 | x.shift_imm_*
B7 | x.shift_op_ | x.rm_.code();
1243 ASSERT((am & (
P|
W)) ==
P || !x.rn_.is(pc));
1244 emit(instr | am | x.rn_.code()*
B16 | rd.code()*
B12);
1248 void Assembler::addrmod3(
Instr instr, Register rd,
const MemOperand& x) {
1250 ASSERT(x.rn_.is_valid());
1252 if (!x.rm_.is_valid()) {
1254 int offset_8 = x.offset_;
1256 offset_8 = -offset_8;
1259 if (!is_uint8(offset_8)) {
1262 ASSERT(!x.rn_.is(
ip) && ((instr &
L) == L || !rd.is(
ip)));
1268 instr |=
B | (offset_8 >> 4)*
B8 | (offset_8 & 0xf);
1269 }
else if (x.shift_imm_ != 0) {
1272 ASSERT(!x.rn_.is(
ip) && ((instr &
L) == L || !rd.is(
ip)));
1273 mov(
ip, Operand(x.rm_, x.shift_op_, x.shift_imm_),
LeaveCC,
1279 ASSERT((am & (
P|
W)) ==
P || !x.rm_.is(pc));
1280 instr |= x.rm_.code();
1282 ASSERT((am & (
P|
W)) ==
P || !x.rn_.is(pc));
1283 emit(instr | am | x.rn_.code()*
B16 | rd.code()*
B12);
1287 void Assembler::addrmod4(
Instr instr, Register rn,
RegList rl) {
1291 emit(instr | rn.code()*
B16 | rl);
1295 void Assembler::addrmod5(
Instr instr, CRegister crd,
const MemOperand& x) {
1299 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1301 int offset_8 = x.offset_;
1302 ASSERT((offset_8 & 3) == 0);
1305 offset_8 = -offset_8;
1308 ASSERT(is_uint8(offset_8));
1309 ASSERT((am & (
P|
W)) ==
P || !x.rn_.is(pc));
1316 emit(instr | am | x.rn_.code()*
B16 | crd.code()*
B12 | offset_8);
1322 if (L->is_bound()) {
1323 target_pos = L->pos();
1325 if (L->is_linked()) {
1327 target_pos = L->pos();
1344 ASSERT((branch_offset & 3) == 0);
1345 int imm24 = branch_offset >> 2;
1358 ASSERT((branch_offset & 3) == 0);
1359 int imm24 = branch_offset >> 2;
1367 ASSERT((branch_offset & 1) == 0);
1368 int h = ((branch_offset & 2) >> 1)*
B24;
1369 int imm24 = branch_offset >> 2;
1391 void Assembler::and_(Register dst, Register src1,
const Operand& src2,
1393 addrmod1(cond |
AND | s, src1, dst, src2);
1397 void Assembler::eor(Register dst, Register src1,
const Operand& src2,
1399 addrmod1(cond |
EOR | s, src1, dst, src2);
1403 void Assembler::sub(Register dst, Register src1,
const Operand& src2,
1405 addrmod1(cond |
SUB | s, src1, dst, src2);
1409 void Assembler::rsb(Register dst, Register src1,
const Operand& src2,
1411 addrmod1(cond |
RSB | s, src1, dst, src2);
1415 void Assembler::add(Register dst, Register src1,
const Operand& src2,
1417 addrmod1(cond |
ADD | s, src1, dst, src2);
1421 void Assembler::adc(Register dst, Register src1,
const Operand& src2,
1423 addrmod1(cond |
ADC | s, src1, dst, src2);
1427 void Assembler::sbc(Register dst, Register src1,
const Operand& src2,
1429 addrmod1(cond |
SBC | s, src1, dst, src2);
1433 void Assembler::rsc(Register dst, Register src1,
const Operand& src2,
1435 addrmod1(cond |
RSC | s, src1, dst, src2);
1440 addrmod1(cond |
TST |
S, src1,
r0, src2);
1445 addrmod1(cond |
TEQ |
S, src1,
r0, src2);
1450 addrmod1(cond |
CMP |
S, src1,
r0, src2);
1455 Register src,
int raw_immediate,
Condition cond) {
1456 ASSERT(is_uint12(raw_immediate));
1457 emit(cond |
I |
CMP |
S | src.code() << 16 | raw_immediate);
1462 addrmod1(cond |
CMN |
S, src1,
r0, src2);
1466 void Assembler::orr(Register dst, Register src1,
const Operand& src2,
1468 addrmod1(cond |
ORR | s, src1, dst, src2);
1479 ASSERT(!(src.is_reg() && src.rm().is(dst) && s ==
LeaveCC && cond ==
al));
1480 addrmod1(cond |
MOV | s,
r0, dst, src);
1485 if (label->is_bound()) {
1492 int link = label->is_linked() ? label->pos() :
pc_offset();
1524 ASSERT(immediate < 0x10000);
1533 emit(cond | 0x34*
B20 | reg.code()*
B12 | EncodeMovwImmediate(immediate));
1537 void Assembler::bic(Register dst, Register src1,
const Operand& src2,
1539 addrmod1(cond |
BIC | s, src1, dst, src2);
1544 addrmod1(cond |
MVN | s,
r0, dst, src);
1549 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1551 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1552 emit(cond |
A | s | dst.code()*
B16 | srcA.code()*
B12 |
1553 src2.code()*
B8 |
B7 |
B4 | src1.code());
1557 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1559 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1560 emit(cond |
B22 |
B21 | dst.code()*
B16 | srcA.code()*
B12 |
1561 src2.code()*
B8 |
B7 |
B4 | src1.code());
1567 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1570 src2.code()*
B8 |
B4 | src1.code());
1576 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1578 emit(cond | s | dst.code()*
B16 | src2.code()*
B8 |
B7 |
B4 | src1.code());
1588 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1590 emit(cond | B23 |
B22 |
A | s | dstH.code()*
B16 | dstL.code()*
B12 |
1591 src2.code()*
B8 |
B7 |
B4 | src1.code());
1601 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1603 emit(cond | B23 |
B22 | s | dstH.code()*
B16 | dstL.code()*
B12 |
1604 src2.code()*
B8 |
B7 |
B4 | src1.code());
1614 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1616 emit(cond | B23 |
A | s | dstH.code()*
B16 | dstL.code()*
B12 |
1617 src2.code()*
B8 |
B7 |
B4 | src1.code());
1627 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1629 emit(cond | B23 | s | dstH.code()*
B16 | dstL.code()*
B12 |
1630 src2.code()*
B8 |
B7 |
B4 | src1.code());
1637 ASSERT(!dst.is(pc) && !src.is(pc));
1639 15*
B8 |
CLZ | src.code());
1652 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1653 ASSERT((satpos >= 0) && (satpos <= 31));
1654 ASSERT((src.shift_op_ ==
ASR) || (src.shift_op_ ==
LSL));
1658 if (src.shift_op_ ==
ASR) {
1662 emit(cond | 0x6*
B24 | 0xe*
B20 | satpos*
B16 | dst.code()*
B12 |
1663 src.shift_imm_*
B7 | sh*
B6 | 0x1*
B4 | src.rm_.code());
1680 ASSERT(!dst.is(pc) && !src.is(pc));
1681 ASSERT((lsb >= 0) && (lsb <= 31));
1682 ASSERT((width >= 1) && (width <= (32 - lsb)));
1683 emit(cond | 0xf*B23 |
B22 |
B21 | (width - 1)*
B16 | dst.code()*
B12 |
1684 lsb*
B7 |
B6 |
B4 | src.code());
1700 ASSERT(!dst.is(pc) && !src.is(pc));
1701 ASSERT((lsb >= 0) && (lsb <= 31));
1702 ASSERT((width >= 1) && (width <= (32 - lsb)));
1703 emit(cond | 0xf*B23 |
B21 | (width - 1)*
B16 | dst.code()*
B12 |
1704 lsb*
B7 |
B6 |
B4 | src.code());
1716 ASSERT((lsb >= 0) && (lsb <= 31));
1717 ASSERT((width >= 1) && (width <= (32 - lsb)));
1718 int msb = lsb + width - 1;
1719 emit(cond | 0x1f*
B22 | msb*
B16 | dst.code()*
B12 | lsb*
B7 |
B4 | 0xf);
1734 ASSERT(!dst.is(pc) && !src.is(pc));
1735 ASSERT((lsb >= 0) && (lsb <= 31));
1736 ASSERT((width >= 1) && (width <= (32 - lsb)));
1737 int msb = lsb + width - 1;
1738 emit(cond | 0x1f*
B22 | msb*
B16 | dst.code()*
B12 | lsb*
B7 |
B4 |
1745 const Operand& src2,
1752 ASSERT(!src2.rm().is(pc));
1755 ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1757 emit(cond | 0x68*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1758 src2.shift_imm_*
B7 |
B4 | src2.rm().code());
1764 const Operand& src2,
1771 ASSERT(!src2.rm().is(pc));
1774 ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1776 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1777 emit(cond | 0x68*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1778 asr*
B7 |
B6 |
B4 | src2.rm().code());
1789 ASSERT(!src.rm().is(pc));
1792 ASSERT((src.shift_imm_ == 0) ||
1793 (src.shift_imm_ == 8) ||
1794 (src.shift_imm_ == 16) ||
1795 (src.shift_imm_ == 24));
1798 ((src.shift_op() ==
LSL) && (src.shift_imm_ == 0)));
1799 emit(cond | 0x6E*
B20 | 0xF*
B16 | dst.code()*
B12 |
1800 ((src.shift_imm_ >> 1)&0xC)*
B8 | 7*
B4 | src.rm().code());
1806 const Operand& src2,
1813 ASSERT(!src2.rm().is(pc));
1816 ASSERT((src2.shift_imm_ == 0) ||
1817 (src2.shift_imm_ == 8) ||
1818 (src2.shift_imm_ == 16) ||
1819 (src2.shift_imm_ == 24));
1822 ((src2.shift_op() ==
LSL) && (src2.shift_imm_ == 0)));
1823 emit(cond | 0x6E*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1824 ((src2.shift_imm_ >> 1) &0xC)*
B8 | 7*
B4 | src2.rm().code());
1835 ASSERT(!src.rm().is(pc));
1838 ASSERT((src.shift_imm_ == 0) ||
1839 (src.shift_imm_ == 8) ||
1840 (src.shift_imm_ == 16) ||
1841 (src.shift_imm_ == 24));
1844 ((src.shift_op() ==
LSL) && (src.shift_imm_ == 0)));
1845 emit(cond | 0x6C*
B20 | 0xF*
B16 | dst.code()*
B12 |
1846 ((src.shift_imm_ >> 1)&0xC)*
B8 | 7*
B4 | src.rm().code());
1853 emit(cond |
B24 | s | 15*
B16 | dst.code()*
B12);
1861 if (!src.rm_.is_valid()) {
1863 uint32_t rotate_imm;
1865 if (src.must_output_reloc_info(
this) ||
1866 !fits_shifter(src.imm32_, &rotate_imm, &immed_8,
NULL)) {
1868 move_32_bit_immediate(
ip, src);
1869 msr(fields, Operand(
ip), cond);
1872 instr =
I | rotate_imm*
B8 | immed_8;
1874 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);
1875 instr = src.rm_.code();
1877 emit(cond | instr |
B24 |
B21 | fields | 15*
B12);
1886 addrmod2(cond |
B26 | L, dst, src);
1891 addrmod2(cond |
B26, src, dst);
1896 addrmod2(cond |
B26 |
B | L, dst, src);
1901 addrmod2(cond |
B26 |
B, src, dst);
1906 addrmod3(cond | L |
B7 |
H |
B4, dst, src);
1911 addrmod3(cond |
B7 |
H |
B4, src, dst);
1916 addrmod3(cond | L |
B7 |
S6 |
B4, dst, src);
1921 addrmod3(cond | L |
B7 |
S6 |
H |
B4, dst, src);
1931 ASSERT_EQ(dst1.code() + 1, dst2.code());
1932 addrmod3(cond |
B7 |
B6 |
B4, dst1, src);
1941 ASSERT_EQ(src1.code() + 1, src2.code());
1943 addrmod3(cond |
B7 |
B6 |
B5 |
B4, src1, dst);
1955 int offset = address.offset();
1974 addrmod4(cond |
B27 | am | L, base, dst);
1977 if (cond ==
al && (dst & pc.bit()) != 0) {
1992 addrmod4(cond |
B27 | am, base, src);
2011 emit(reinterpret_cast<Instr>(msg));
2013 #else // def __arm__
2022 #endif // def __arm__
2027 ASSERT(is_uint16(imm16));
2033 ASSERT(is_uint24(imm24));
2034 emit(cond | 15*
B24 | imm24);
2046 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
2048 crd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 | crm.code());
2069 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2071 rd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 |
B4 | crm.code());
2092 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2093 emit(cond |
B27 |
B26 |
B25 | (opcode_1 & 7)*
B21 | L | crn.code()*
B16 |
2094 rd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 |
B4 | crm.code());
2113 addrmod5(cond |
B27 |
B26 | l | L | coproc*
B8, crd, src);
2124 ASSERT(is_uint8(option));
2125 emit(cond |
B27 |
B26 | U | l | L | rn.code()*
B16 | crd.code()*
B12 |
2126 coproc*
B8 | (option & 255));
2150 const Register base,
2163 dst.split_code(&vd, &d);
2166 if ((offset % 4) == 0 && (offset / 4) < 256) {
2167 emit(cond | 0xD*
B24 | u*B23 | d*
B22 |
B20 | base.code()*
B16 | vd*
B12 |
2168 0xB*
B8 | ((offset / 4) & 255));
2174 add(
ip, base, Operand(offset));
2176 sub(
ip, base, Operand(offset));
2186 ASSERT(!operand.rm().is_valid());
2188 vldr(dst, operand.rn(), operand.offset(), cond);
2193 const Register base,
2206 dst.split_code(&sd, &d);
2209 if ((offset % 4) == 0 && (offset / 4) < 256) {
2210 emit(cond | u*B23 | d*
B22 | 0xD1*
B20 | base.code()*
B16 | sd*
B12 |
2211 0xA*
B8 | ((offset / 4) & 255));
2217 add(
ip, base, Operand(offset));
2219 sub(
ip, base, Operand(offset));
2229 ASSERT(!operand.rm().is_valid());
2231 vldr(dst, operand.rn(), operand.offset(), cond);
2236 const Register base,
2250 src.split_code(&vd, &d);
2252 if ((offset % 4) == 0 && (offset / 4) < 256) {
2253 emit(cond | 0xD*
B24 | u*B23 | d*
B22 | base.code()*
B16 | vd*
B12 | 0xB*
B8 |
2254 ((offset / 4) & 255));
2260 add(
ip, base, Operand(offset));
2262 sub(
ip, base, Operand(offset));
2272 ASSERT(!operand.rm().is_valid());
2274 vstr(src, operand.rn(), operand.offset(), cond);
2279 const Register base,
2292 src.split_code(&sd, &d);
2294 if ((offset % 4) == 0 && (offset / 4) < 256) {
2295 emit(cond | u*B23 | d*
B22 | 0xD0*
B20 | base.code()*
B16 | sd*
B12 |
2296 0xA*
B8 | ((offset / 4) & 255));
2302 add(
ip, base, Operand(offset));
2304 sub(
ip, base, Operand(offset));
2314 ASSERT(!operand.rm().is_valid());
2316 vstr(src, operand.rn(), operand.offset(), cond);
2322 DwVfpRegister first,
2333 first.split_code(&sd, &d);
2334 int count = last.code() - first.code() + 1;
2343 DwVfpRegister first,
2354 first.split_code(&sd, &d);
2355 int count = last.code() - first.code() + 1;
2363 SwVfpRegister first,
2374 first.split_code(&sd, &d);
2375 int count = last.code() - first.code() + 1;
2383 SwVfpRegister first,
2394 first.split_code(&sd, &d);
2395 int count = last.code() - first.code() + 1;
2401 static void DoubleAsTwoUInt32(
double d, uint32_t*
lo, uint32_t*
hi) {
2405 *lo = i & 0xffffffff;
2412 static bool FitsVMOVDoubleImmediate(
double d, uint32_t *encoding) {
2434 DoubleAsTwoUInt32(d, &lo, &hi);
2437 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2442 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2447 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2453 *encoding = (hi >> 16) & 0xf;
2454 *encoding |= (hi >> 4) & 0x70000;
2455 *encoding |= (hi >> 12) & 0x80000;
2463 const Register scratch) {
2473 dst.split_code(&vd, &d);
2492 ConstantPoolAddEntry(rinfo);
2499 if (scratch.is(
no_reg)) {
2500 if (dst.code() < 16) {
2504 mov(
ip, Operand(lo));
2509 mov(
ip, Operand(hi));
2515 mov(
ip, Operand(lo));
2517 mov(
ip, Operand(hi));
2523 mov(
ip, Operand(lo));
2524 mov(scratch, Operand(hi));
2532 const SwVfpRegister src,
2537 dst.split_code(&sd, &d);
2538 src.split_code(&sm, &m);
2544 const DwVfpRegister src,
2551 dst.split_code(&vd, &d);
2553 src.split_code(&vm, &m);
2560 const VmovIndex index,
2567 ASSERT(index.index == 0 || index.index == 1);
2569 dst.split_code(&vd, &d);
2570 emit(cond | 0xE*
B24 | index.index*
B21 | vd*
B16 | src.code()*
B12 | 0xB*
B8 |
2576 const VmovIndex index,
2577 const DwVfpRegister src,
2583 ASSERT(index.index == 0 || index.index == 1);
2585 src.split_code(&vn, &n);
2586 emit(cond | 0xE*
B24 | index.index*
B21 |
B20 | vn*
B16 | dst.code()*
B12 |
2592 const Register src1,
2593 const Register src2,
2599 ASSERT(!src1.is(pc) && !src2.is(pc));
2601 dst.split_code(&vm, &m);
2602 emit(cond | 0xC*
B24 |
B22 | src2.code()*
B16 |
2603 src1.code()*
B12 | 0xB*
B8 | m*
B5 |
B4 | vm);
2608 const Register dst2,
2609 const DwVfpRegister src,
2615 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2617 src.split_code(&vm, &m);
2619 dst1.code()*
B12 | 0xB*
B8 | m*
B5 |
B4 | vm);
2632 dst.split_code(&sn, &n);
2638 const SwVfpRegister src,
2646 src.split_code(&sn, &n);
2653 enum VFPType { S32, U32, F32, F64 };
2656 static bool IsSignedVFPType(VFPType type) {
2669 static bool IsIntegerVFPType(VFPType type) {
2684 static bool IsDoubleVFPType(VFPType type) {
2701 static void SplitRegCode(VFPType reg_type,
2705 ASSERT((reg_code >= 0) && (reg_code <= 31));
2706 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2708 *m = reg_code & 0x1;
2709 *vm = reg_code >> 1;
2712 *m = (reg_code & 0x10) >> 4;
2713 *vm = reg_code & 0x0F;
2719 static Instr EncodeVCVT(
const VFPType dst_type,
2721 const VFPType src_type,
2725 ASSERT(src_type != dst_type);
2727 SplitRegCode(src_type, src_code, &Vm, &M);
2728 SplitRegCode(dst_type, dst_code, &Vd, &D);
2730 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2735 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2739 if (IsIntegerVFPType(dst_type)) {
2740 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2741 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2744 ASSERT(IsIntegerVFPType(src_type));
2746 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2747 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2757 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2758 return (cond | 0xE*
B24 | B23 | D*
B22 | 0x3*
B20 | 0x7*
B16 |
2765 const SwVfpRegister src,
2768 emit(EncodeVCVT(F64, dst.code(), S32, src.code(),
mode, cond));
2773 const SwVfpRegister src,
2776 emit(EncodeVCVT(F32, dst.code(), S32, src.code(),
mode, cond));
2781 const SwVfpRegister src,
2784 emit(EncodeVCVT(F64, dst.code(), U32, src.code(),
mode, cond));
2789 const DwVfpRegister src,
2792 emit(EncodeVCVT(S32, dst.code(), F64, src.code(),
mode, cond));
2797 const DwVfpRegister src,
2800 emit(EncodeVCVT(U32, dst.code(), F64, src.code(),
mode, cond));
2805 const SwVfpRegister src,
2808 emit(EncodeVCVT(F64, dst.code(), F32, src.code(),
mode, cond));
2813 const DwVfpRegister src,
2816 emit(EncodeVCVT(F32, dst.code(), F64, src.code(),
mode, cond));
2826 ASSERT(fraction_bits > 0 && fraction_bits <= 32);
2829 dst.split_code(&vd, &d);
2830 int i = ((32 - fraction_bits) >> 4) & 1;
2831 int imm4 = (32 - fraction_bits) & 0xf;
2838 const DwVfpRegister src,
2844 dst.split_code(&vd, &d);
2846 src.split_code(&vm, &m);
2854 const DwVfpRegister src,
2860 dst.split_code(&vd, &d);
2862 src.split_code(&vm, &m);
2869 const DwVfpRegister src1,
2870 const DwVfpRegister src2,
2878 dst.split_code(&vd, &d);
2880 src1.split_code(&vn, &n);
2882 src2.split_code(&vm, &m);
2889 const DwVfpRegister src1,
2890 const DwVfpRegister src2,
2898 dst.split_code(&vd, &d);
2900 src1.split_code(&vn, &n);
2902 src2.split_code(&vm, &m);
2909 const DwVfpRegister src1,
2910 const DwVfpRegister src2,
2918 dst.split_code(&vd, &d);
2920 src1.split_code(&vn, &n);
2922 src2.split_code(&vm, &m);
2929 const DwVfpRegister src1,
2930 const DwVfpRegister src2,
2936 dst.split_code(&vd, &d);
2938 src1.split_code(&vn, &n);
2940 src2.split_code(&vm, &m);
2947 const DwVfpRegister src1,
2948 const DwVfpRegister src2,
2954 dst.split_code(&vd, &d);
2956 src1.split_code(&vn, &n);
2958 src2.split_code(&vm, &m);
2965 const DwVfpRegister src1,
2966 const DwVfpRegister src2,
2974 dst.split_code(&vd, &d);
2976 src1.split_code(&vn, &n);
2978 src2.split_code(&vm, &m);
2985 const DwVfpRegister src2,
2992 src1.split_code(&vd, &d);
2994 src2.split_code(&vm, &m);
3009 src1.split_code(&vd, &d);
3019 dst.code()*
B12 | 0xA*
B8 |
B4);
3028 dst.code()*
B12 | 0xA*
B8 |
B4);
3033 const DwVfpRegister src,
3039 dst.split_code(&vd, &d);
3041 src.split_code(&vm, &m);
3050 const NeonListOperand& dst,
3051 const NeonMemOperand& src) {
3057 dst.base().split_code(&vd, &d);
3059 dst.type()*
B8 | size*
B6 | src.align()*
B4 | src.rm().code());
3064 const NeonListOperand& src,
3065 const NeonMemOperand& dst) {
3071 src.base().split_code(&vd, &d);
3073 size*
B6 | dst.align()*
B4 | dst.rm().code());
3083 dst.split_code(&vd, &d);
3085 src.split_code(&vm, &m);
3098 ASSERT(0 <= type && type <= 14);
3099 emit(
al | 13*
B21 | type*
B12 | type);
3106 EncodeMovwImmediate(0xFFFF));
3107 return instr == 0x34*
B20;
3114 EncodeMovwImmediate(0xFFFF));
3115 return instr == 0x30*
B20;
3120 ASSERT(0 <= type && type <= 14);
3122 return instr == (
al | 13*
B21 | type*
B12 | type);
3129 return fits_shifter(imm32, &dummy1, &dummy2,
NULL);
3134 return is_uint12(abs(imm32));
3142 RecordRelocInfo(RelocInfo::JS_RETURN);
3149 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3154 if (FLAG_code_comments) {
3156 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3164 #ifdef ENABLE_DEBUGGER_SUPPORT
3165 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3170 void Assembler::GrowBuffer() {
3176 desc.buffer_size = 4*
KB;
3185 desc.buffer = NewArray<byte>(desc.buffer_size);
3191 int pc_delta = desc.buffer -
buffer_;
3195 reloc_info_writer.pos(), desc.reloc_size);
3202 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3203 reloc_info_writer.last_pc() + pc_delta);
3210 for (
int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3211 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3212 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3213 rinfo.rmode() != RelocInfo::POSITION);
3214 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3215 rinfo.set_pc(rinfo.pc() + pc_delta);
3218 for (
int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3219 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3220 ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3221 rinfo.set_pc(rinfo.pc() + pc_delta);
3223 constant_pool_builder_.Relocate(pc_delta);
3231 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3232 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3234 *
reinterpret_cast<uint8_t*
>(
pc_) = data;
3235 pc_ +=
sizeof(uint8_t);
3243 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3244 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3246 *
reinterpret_cast<uint32_t*
>(
pc_) = data;
3247 pc_ +=
sizeof(uint32_t);
3253 *
reinterpret_cast<uint32_t*
>(
pc_) =
3254 reinterpret_cast<uint32_t>(stub->instruction_start());
3255 pc_ +=
sizeof(uint32_t);
3259 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3261 RecordRelocInfo(rinfo);
3265 void Assembler::RecordRelocInfo(
const RelocInfo& rinfo) {
3266 if (!RelocInfo::IsNone(rinfo.rmode())) {
3268 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
3279 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3280 RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3285 reloc_info_writer.Write(&reloc_info_with_ast_id);
3287 reloc_info_writer.Write(&rinfo);
3293 void Assembler::ConstantPoolAddEntry(
const RelocInfo& rinfo) {
3294 if (FLAG_enable_ool_constant_pool) {
3295 constant_pool_builder_.AddEntry(
this, rinfo);
3297 if (rinfo.rmode() == RelocInfo::NONE64) {
3299 if (num_pending_64_bit_reloc_info_ == 0) {
3302 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3305 if (num_pending_32_bit_reloc_info_ == 0) {
3308 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3318 if (FLAG_enable_ool_constant_pool) {
3320 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3321 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3326 if (no_const_pool_before_ < pc_limit) {
3330 ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3331 (start - first_const_pool_32_use_ +
3333 ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3336 no_const_pool_before_ = pc_limit;
3339 if (next_buffer_check_ < no_const_pool_before_) {
3340 next_buffer_check_ = no_const_pool_before_;
3346 if (FLAG_enable_ool_constant_pool) {
3348 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3349 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3363 if ((num_pending_32_bit_reloc_info_ == 0) &&
3364 (num_pending_64_bit_reloc_info_ == 0)) {
3366 next_buffer_check_ =
pc_offset() + kCheckPoolInterval;
3373 int jump_instr = require_jump ? kInstrSize : 0;
3374 int size_up_to_marker = jump_instr +
kInstrSize;
3375 int size_after_marker = num_pending_32_bit_reloc_info_ *
kPointerSize;
3376 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3377 bool require_64_bit_align =
false;
3378 if (has_fp_values) {
3379 require_64_bit_align = (((uintptr_t)
pc_ + size_up_to_marker) & 0x7);
3380 if (require_64_bit_align) {
3383 size_after_marker += num_pending_64_bit_reloc_info_ *
kDoubleSize;
3386 int size = size_up_to_marker + size_after_marker;
3397 ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3398 bool need_emit =
false;
3399 if (has_fp_values) {
3402 num_pending_32_bit_reloc_info_ * kPointerSize -
3403 first_const_pool_64_use_;
3410 pc_offset() + size - first_const_pool_32_use_;
3415 if (!need_emit)
return;
3418 int needed_space = size + kGap;
3438 if (require_64_bit_align) {
3444 for (
int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3445 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3455 ASSERT(is_uint10(delta));
3458 uint64_t value = rinfo.raw_data64();
3459 for (
int j = 0;
j < i;
j++) {
3460 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[
j];
3461 if (value == rinfo2.raw_data64()) {
3463 ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3467 delta += rinfo2.pc() - rinfo.pc();
3475 uint64_t uint_data = rinfo.raw_data64();
3476 emit(uint_data & 0xFFFFFFFF);
3477 emit(uint_data >> 32);
3482 for (
int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3483 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3484 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3485 rinfo.rmode() != RelocInfo::POSITION &&
3486 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3487 rinfo.rmode() != RelocInfo::CONST_POOL &&
3488 rinfo.rmode() != RelocInfo::NONE64);
3498 ASSERT(is_uint12(delta));
3506 for (
int j = 0;
j < i;
j++) {
3507 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[
j];
3509 if ((rinfo2.data() == rinfo.data()) &&
3510 (rinfo2.rmode() == rinfo.rmode())) {
3514 delta += rinfo2.pc() - rinfo.pc();
3532 num_pending_32_bit_reloc_info_ = 0;
3533 num_pending_64_bit_reloc_info_ = 0;
3534 first_const_pool_32_use_ = -1;
3535 first_const_pool_64_use_ = -1;
3539 if (after_pool.is_linked()) {
3546 next_buffer_check_ =
pc_offset() + kCheckPoolInterval;
3551 ASSERT(FLAG_enable_ool_constant_pool);
3552 return constant_pool_builder_.Allocate(heap);
3557 ASSERT(FLAG_enable_ool_constant_pool);
3558 constant_pool_builder_.Populate(
this, constant_pool);
3562 ConstantPoolBuilder::ConstantPoolBuilder()
3566 count_of_code_ptr_(0),
3567 count_of_heap_ptr_(0),
3568 count_of_32bit_(0) { }
3571 bool ConstantPoolBuilder::IsEmpty() {
3572 return entries_.size() == 0;
3576 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
3577 return rmode == RelocInfo::NONE64;
3581 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
3582 return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
3586 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3587 return RelocInfo::IsCodeTarget(rmode);
3591 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3592 return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3596 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3597 const RelocInfo& rinfo) {
3598 RelocInfo::Mode rmode = rinfo.rmode();
3599 ASSERT(rmode != RelocInfo::COMMENT &&
3600 rmode != RelocInfo::POSITION &&
3601 rmode != RelocInfo::STATEMENT_POSITION &&
3602 rmode != RelocInfo::CONST_POOL);
3606 int merged_index = -1;
3607 if (RelocInfo::IsNone(rmode) ||
3610 std::vector<RelocInfo>::const_iterator it;
3611 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3612 if (RelocInfo::IsEqual(rinfo, *it)) {
3619 entries_.push_back(rinfo);
3620 merged_indexes_.push_back(merged_index);
3622 if (merged_index == -1) {
3624 if (Is64BitEntry(rmode)) {
3626 }
else if (Is32BitEntry(rmode)) {
3628 }
else if (IsCodePtrEntry(rmode)) {
3629 count_of_code_ptr_++;
3631 ASSERT(IsHeapPtrEntry(rmode));
3632 count_of_heap_ptr_++;
3641 count_of_32bit_))) &&
3643 assm->set_constant_pool_full();
3648 void ConstantPoolBuilder::Relocate(
int pc_delta) {
3649 for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
3650 rinfo != entries_.end(); rinfo++) {
3651 ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
3652 rinfo->set_pc(rinfo->pc() + pc_delta);
3657 MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
3659 return heap->empty_constant_pool_array();
3661 return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
3662 count_of_heap_ptr_, count_of_32bit_);
3667 void ConstantPoolBuilder::Populate(Assembler* assm,
3668 ConstantPoolArray* constant_pool) {
3669 ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
3670 ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
3671 ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
3672 ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
3673 ASSERT(entries_.size() == merged_indexes_.size());
3675 int index_64bit = 0;
3676 int index_code_ptr = count_of_64bit_;
3677 int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
3678 int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
3681 std::vector<RelocInfo>::const_iterator rinfo;
3682 for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3683 RelocInfo::Mode rmode = rinfo->rmode();
3687 if (merged_indexes_[i] == -1) {
3688 if (Is64BitEntry(rmode)) {
3689 offset = constant_pool->OffsetOfElementAt(index_64bit) -
kHeapObjectTag;
3690 constant_pool->set(index_64bit++, rinfo->data64());
3691 }
else if (Is32BitEntry(rmode)) {
3692 offset = constant_pool->OffsetOfElementAt(index_32bit) -
kHeapObjectTag;
3693 constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3694 }
else if (IsCodePtrEntry(rmode)) {
3695 offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3697 constant_pool->set(index_code_ptr++,
3698 reinterpret_cast<Object *>(rinfo->data()));
3700 ASSERT(IsHeapPtrEntry(rmode));
3701 offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
3703 constant_pool->set(index_heap_ptr++,
3704 reinterpret_cast<Object *>(rinfo->data()));
3706 merged_indexes_[i] = offset;
3708 size_t merged_index =
static_cast<size_t>(merged_indexes_[i]);
3709 ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
3710 offset = merged_indexes_[merged_index];
3714 Instr instr = assm->instr_at(rinfo->pc());
3715 if (Is64BitEntry(rmode)) {
3719 ASSERT(is_uint10(offset));
3720 assm->instr_at_put(rinfo->pc(),
3726 ASSERT(is_uint12(offset));
3727 assm->instr_at_put(rinfo->pc(),
3732 ASSERT((index_64bit == count_of_64bit_) &&
3733 (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
3734 (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3735 (index_32bit == (index_heap_ptr + count_of_32bit_)));
3741 #endif // V8_TARGET_ARCH_ARM
void cmp(Register src1, const Operand &src2, Condition cond=al)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static bool IsBranch(Instr instr)
static const int kMaxDistToFPPool
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
static bool IsVldrDPcImmediateOffset(Instr instr)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
static int GetBranchOffset(Instr instr)
void ClearRecordedAstId()
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const Instr kLdrRegFpOffsetPattern
static bool IsCmpRegister(Instr instr)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void PrintF(const char *format,...)
static void PrintFeatures()
const Instr kMovwLeaveCCFlip
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mov_label_offset(Register dst, Label *label)
void mrs(Register dst, SRegister s, Condition cond=al)
const Instr kLdrPCPattern
const Instr kMovMvnPattern
static bool IsStrRegFpNegOffset(Instr instr)
void instr_at_put(int pos, Instr instr)
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static bool IsStrRegisterImmediate(Instr instr)
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond=al)
static bool IsMovW(Instr instr)
static bool ArmUsingHardFloat()
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
static bool IsVldrDPpImmediateOffset(Instr instr)
const int kRegister_pc_Code
static HeapObject * cast(Object *obj)
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
int EncodeConstantPoolLength(int length)
static int GetCmpImmediateRawImmediate(Instr instr)
kSerializedDataOffset Object
void b(int branch_offset, Condition cond=al)
void vld1(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
static const char * Name(int reg, bool is_double)
void cmn(Register src1, const Operand &src2, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsSupported(CpuFeature f)
const Instr kLdrStrOffsetMask
static bool IsLdrPpImmediateOffset(Instr instr)
void clz(Register dst, Register src, Condition cond=al)
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static bool IsStrRegFpOffset(Instr instr)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void RecordConstPool(int size)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static Register GetRm(Instr instr)
const uint32_t kMaxStopCode
static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset)
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
friend class BlockConstPoolScope
void svc(uint32_t imm24, Condition cond=al)
static bool IsCmpImmediate(Instr instr)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
void ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
void ldc2(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short)
static int NumAllocatableRegisters()
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const Instr kCmpCmnPattern
void blx(int branch_offset)
void target_at_put(int pos, int target_pos)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vst1(NeonSize size, const NeonListOperand &src, const NeonMemOperand &dst)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset)
void strb(Register src, const MemOperand &dst, Condition cond=al)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
const Instr kPopRegPattern
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
const VmovIndex VmovIndexHi
static const char * AllocationIndexToString(int index)
static const int kMaxNumPending32RelocInfo
void asr(const Register &rd, const Register &rn, unsigned shift)
const Instr kVldrDPCPattern
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Condition GetCondition(Instr instr)
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
const Instr kPushRegPattern
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static bool IsPush(Instr instr)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void sh(Register rd, const MemOperand &rs)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void vmls(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Instr kLdrStrInstrArgumentMask
const int32_t kDefaultStopCode
void pkhtb(Register dst, Register src1, const Operand &src2, Condition cond=al)
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static void MemCopy(void *dest, const void *src, size_t size)
const Instr kLdrRegFpNegOffsetPattern
void GetCode(CodeDesc *desc)
void strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
#define kScratchDoubleReg
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
int branch_offset(Label *L, bool jump_elimination_allowed)
static void TooLateToEnableNow()
void umlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsPop(Instr instr)
const Instr kMovLeaveCCMask
void movt(Register reg, uint32_t immediate, Condition cond=al)
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2)
static int32_t & int32_at(Address addr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static bool IsMovT(Instr instr)
static void MemMove(void *dest, const void *src, size_t size)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Instr kPopInstruction
const Instr kStrRegFpOffsetPattern
static Register from_code(int code)
#define ASSERT_LE(v1, v2)
const int kRegister_r8_Code
void vmrs(const Register dst, const Condition cond=al)
MaybeObject * AllocateConstantPool(Heap *heap)
void vmla(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
static const int kMaxDistToIntPool
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
const int kRegister_fp_Code
void vmov(const DwVfpRegister dst, double imm, const Register scratch=no_reg)
void CheckConstPool(bool force_emit, bool require_jump)
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
const int kRegister_lr_Code
static Register GetRn(Instr instr)
void uxtb16(Register dst, const Operand &src, Condition cond=al)
void pld(const MemOperand &address)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset)
static bool IsTstImmediate(Instr instr)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Register GetRd(Instr instr)
void uxtb(Register dst, const Operand &src, Condition cond=al)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
const int kConstantPoolMarker
void movw(Register reg, uint32_t immediate, Condition cond=al)
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset)
const Instr kMovLeaveCCPattern
const Instr kLdrStrInstrTypeMask
void smlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
Handle< T > handle(T *t, Isolate *isolate)
const Instr kLdrPpPattern
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
bool emit_debug_code() const
static const int kMaxNumPending64RelocInfo
void emit_code_stub_address(Code *stub)
static bool IsVldrDRegisterImmediate(Instr instr)
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const VmovIndex VmovIndexLo
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static Register GetCmpImmediateRegister(Instr instr)
const Instr kBlxRegPattern
bool is_const_pool_blocked() const
static void PrintTarget()
static bool IsAddRegisterImmediate(Instr instr)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static const int kHeaderSize
void vmsr(const Register dst, const Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
static int SizeFor(int number_of_int64_entries, int number_of_code_ptr_entries, int number_of_heap_ptr_entries, int number_of_int32_entries)
static LowDwVfpRegister from_code(int code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
void bx(Register target, Condition cond=al)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
uint32_t SRegisterFieldMask
static int GetVldrDRegisterImmediateOffset(Instr instr)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool ImmediateFitsAddrMode2Instruction(int32_t imm32)
PositionsRecorder * positions_recorder()
Condition ConditionField() const
static const int kInstrSize
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2, Condition cond=al)
static uint64_t CpuFeaturesImpliedByPlatform()
void bfc(Register dst, int lsb, int width, Condition cond=al)
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
bool IsEnabled(CpuFeature f)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool can_use_constant_pool() const
const Instr kStrRegFpNegOffsetPattern
const int kRegister_sp_Code
void DeleteArray(T *array)
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrRegisterImmediate(Instr instr)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src)
void ldc(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short, Condition cond=al)
static bool IsLdrRegFpNegOffset(Instr instr)
void bkpt(uint32_t imm16)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void uxtab(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int GetLdrRegisterImmediateOffset(Instr instr)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void rsc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
static bool IsLdrRegFpOffset(Instr instr)
static const int kNumReservedRegisters
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrPcImmediateOffset(Instr instr)
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void pkhbt(Register dst, Register src1, const Operand &src2, Condition cond=al)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)