31 #if V8_TARGET_ARCH_ARM64
33 #define ARM64_DEFINE_REG_STATICS
44 ExternalReference ExternalReference::cpu_features() {
45 return ExternalReference(&CpuFeatures::supported_);
58 ASSERT((1 << index) & list_);
70 index = kRegListSizeInBits - 1 - index;
71 ASSERT((1 << index) & list_);
153 const int RelocInfo::kApplyMask = 0;
156 bool RelocInfo::IsCodedSpecially() {
164 bool RelocInfo::IsInConstantPool() {
165 Instruction* instr =
reinterpret_cast<Instruction*
>(pc_);
166 return instr->IsLdrLiteralX();
170 void RelocInfo::PatchCode(
byte* instructions,
int instruction_count) {
173 Instr* instr =
reinterpret_cast<Instr*
>(instructions);
174 for (
int i = 0; i < instruction_count; i++) {
175 *(pc + i) = *(instr + i);
185 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
191 Register reg3, Register reg4) {
192 CPURegList regs(reg1, reg2, reg3, reg4);
195 if (regs.IncludesAliasOf(candidate))
continue;
203 bool AreAliased(
const CPURegister& reg1,
const CPURegister& reg2,
204 const CPURegister& reg3,
const CPURegister& reg4,
205 const CPURegister& reg5,
const CPURegister& reg6,
206 const CPURegister& reg7,
const CPURegister& reg8) {
207 int number_of_valid_regs = 0;
208 int number_of_valid_fpregs = 0;
213 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
215 for (
unsigned i = 0; i <
sizeof(regs) /
sizeof(regs[0]); i++) {
216 if (regs[i].IsRegister()) {
217 number_of_valid_regs++;
218 unique_regs |= regs[i].Bit();
219 }
else if (regs[i].IsFPRegister()) {
220 number_of_valid_fpregs++;
221 unique_fpregs |= regs[i].Bit();
223 ASSERT(!regs[i].IsValid());
227 int number_of_unique_regs =
229 int number_of_unique_fpregs =
232 ASSERT(number_of_valid_regs >= number_of_unique_regs);
233 ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
235 return (number_of_valid_regs != number_of_unique_regs) ||
236 (number_of_valid_fpregs != number_of_unique_fpregs);
241 const CPURegister& reg3,
const CPURegister& reg4,
242 const CPURegister& reg5,
const CPURegister& reg6,
243 const CPURegister& reg7,
const CPURegister& reg8) {
246 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
247 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
248 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
249 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
250 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
251 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
252 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
257 void Operand::initialize_handle(Handle<Object>
handle) {
262 if (obj->IsHeapObject()) {
264 immediate_ =
reinterpret_cast<intptr_t
>(handle.location());
265 rmode_ = RelocInfo::EMBEDDED_OBJECT;
268 immediate_ =
reinterpret_cast<intptr_t
>(
obj);
269 rmode_ = RelocInfo::NONE64;
275 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
284 return !RelocInfo::IsNone(rmode_);
291 : AssemblerBase(isolate, buffer, buffer_size),
292 recorded_ast_id_(TypeFeedbackId::
None()),
293 unresolved_branches_(),
294 positions_recorder_(this) {
295 const_pool_blocked_nesting_ = 0;
296 veneer_pool_blocked_nesting_ = 0;
302 ASSERT(num_pending_reloc_info_ == 0);
303 ASSERT(const_pool_blocked_nesting_ == 0);
304 ASSERT(veneer_pool_blocked_nesting_ == 0);
311 ASSERT(const_pool_blocked_nesting_ == 0);
312 ASSERT(veneer_pool_blocked_nesting_ == 0);
318 reinterpret_cast<byte*>(
pc_));
319 num_pending_reloc_info_ = 0;
320 next_constant_pool_check_ = 0;
322 no_const_pool_before_ = 0;
323 first_const_pool_use_ = -1;
331 ASSERT(num_pending_reloc_info_ == 0);
339 reloc_info_writer.pos();
353 void Assembler::CheckLabelLinkChain(Label
const * label) {
355 if (label->is_linked()) {
356 int linkoffset = label->pos();
357 bool end_of_chain =
false;
358 while (!end_of_chain) {
360 int linkpcoffset = link->ImmPCOffset();
361 int prevlinkoffset = linkoffset + linkpcoffset;
363 end_of_chain = (linkoffset == prevlinkoffset);
364 linkoffset = linkoffset + linkpcoffset;
373 Instruction* label_veneer) {
374 ASSERT(label->is_linked());
376 CheckLabelLinkChain(label);
379 Instruction* prev_link = link;
380 Instruction* next_link;
381 bool end_of_chain =
false;
383 while (link != branch && !end_of_chain) {
385 end_of_chain = (link == next_link);
391 next_link = branch->ImmPCOffsetTarget();
393 if (branch == prev_link) {
395 if (branch == next_link) {
400 label->link_to(reinterpret_cast<byte*>(next_link) -
buffer_);
403 }
else if (branch == next_link) {
405 prev_link->SetImmPCOffsetTarget(prev_link);
409 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
410 prev_link->SetImmPCOffsetTarget(next_link);
411 }
else if (label_veneer !=
NULL) {
413 prev_link->SetImmPCOffsetTarget(prev_link);
415 end_of_chain =
false;
417 while (!end_of_chain) {
418 next_link = link->ImmPCOffsetTarget();
419 end_of_chain = (link == next_link);
420 link->SetImmPCOffsetTarget(label_veneer);
442 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
447 CheckLabelLinkChain(label);
456 ASSERT(!label->is_near_linked());
457 ASSERT(!label->is_bound());
471 while (label->is_linked()) {
472 int linkoffset = label->pos();
474 int prevlinkoffset = linkoffset + link->ImmPCOffset();
476 CheckLabelLinkChain(label);
480 ASSERT((linkoffset > prevlinkoffset) ||
481 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
482 ASSERT(prevlinkoffset >= 0);
485 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(
pc_));
488 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
493 label->link_to(prevlinkoffset);
498 ASSERT(label->is_bound());
499 ASSERT(!label->is_linked());
501 DeleteUnresolvedBranchInfoForLabel(label);
505 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
507 CheckLabelLinkChain(label);
510 if (label->is_bound()) {
523 if (label->is_linked()) {
530 ASSERT(offset != kStartOfLabelLinkChain);
538 offset = kStartOfLabelLinkChain;
548 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
555 std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
559 if (it_tmp->second.label_ == label) {
574 if (const_pool_blocked_nesting_++ == 0) {
577 next_constant_pool_check_ =
kMaxInt;
583 if (--const_pool_blocked_nesting_ == 0) {
585 ASSERT((num_pending_reloc_info_ == 0) ||
586 (
pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
592 next_constant_pool_check_ = no_const_pool_before_;
598 return (const_pool_blocked_nesting_ > 0) ||
607 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
611 ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
612 instr->following()->Rn() == xzr.code()));
626 reinterpret_cast<const char*
>(
638 return instr->ImmLLiteral();
663 Instruction* instr =
reinterpret_cast<Instruction*
>(
pc_);
664 ASSERT(instr->preceding()->IsLdrLiteralX() &&
665 instr->preceding()->Rt() == xzr.code());
672 ++veneer_pool_blocked_nesting_;
677 if (--veneer_pool_blocked_nesting_ == 0) {
716 b(LinkAndGetInstructionOffsetTo(label));
727 b(LinkAndGetInstructionOffsetTo(label), cond);
739 bl(LinkAndGetInstructionOffsetTo(label));
753 cbz(rt, LinkAndGetInstructionOffsetTo(label));
767 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
784 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
801 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
812 adr(rd, LinkAndGetByteOffsetTo(label));
818 const Operand& operand) {
825 const Operand& operand) {
831 const Operand& operand) {
833 adds(zr, rn, operand);
839 const Operand& operand) {
846 const Operand& operand) {
853 subs(zr, rn, operand);
859 sub(rd, zr, operand);
865 subs(rd, zr, operand);
871 const Operand& operand) {
878 const Operand& operand) {
885 const Operand& operand) {
892 const Operand& operand) {
899 sbc(rd, zr, operand);
905 sbcs(rd, zr, operand);
912 const Operand& operand) {
919 const Operand& operand) {
925 const Operand& operand) {
932 const Operand& operand) {
939 const Operand& operand) {
946 const Operand& operand) {
953 const Operand& operand) {
960 const Operand& operand) {
967 const Operand& operand) {
974 const Register& rm) {
975 ASSERT(rd.SizeInBits() == rn.SizeInBits());
976 ASSERT(rd.SizeInBits() == rm.SizeInBits());
983 const Register& rm) {
984 ASSERT(rd.SizeInBits() == rn.SizeInBits());
985 ASSERT(rd.SizeInBits() == rm.SizeInBits());
992 const Register& rm) {
993 ASSERT(rd.SizeInBits() == rn.SizeInBits());
994 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1001 const Register& rm) {
1002 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1003 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1013 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1015 Emit(
SF(rd) |
BFM | N |
1016 ImmR(immr, rd.SizeInBits()) |
1017 ImmS(imms, rn.SizeInBits()) |
1026 ASSERT(rd.Is64Bits() || rn.Is32Bits());
1029 ImmR(immr, rd.SizeInBits()) |
1030 ImmS(imms, rn.SizeInBits()) |
1039 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1042 ImmR(immr, rd.SizeInBits()) |
1043 ImmS(imms, rn.SizeInBits()) |
1052 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1053 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1056 ImmS(lsb, rn.SizeInBits()) |
Rn(rn) |
Rd(rd));
1064 ConditionalSelect(rd, rn, rm, cond,
CSEL);
1072 ConditionalSelect(rd, rn, rm, cond,
CSINC);
1080 ConditionalSelect(rd, rn, rm, cond,
CSINV);
1088 ConditionalSelect(rd, rn, rm, cond,
CSNEG);
1124 void Assembler::ConditionalSelect(
const Register& rd,
1129 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1130 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1131 Emit(
SF(rd) | op |
Rm(rm) |
Cond(cond) |
Rn(rn) |
Rd(rd));
1136 const Operand& operand,
1144 const Operand& operand,
1151 void Assembler::DataProcessing3Source(
const Register& rd,
1156 Emit(
SF(rd) | op |
Rm(rm) |
Ra(ra) |
Rn(rn) |
Rd(rd));
1162 const Register& rm) {
1165 DataProcessing3Source(rd, rn, rm, zr,
MADD);
1172 const Register& ra) {
1174 DataProcessing3Source(rd, rn, rm, ra,
MADD);
1180 const Register& rm) {
1183 DataProcessing3Source(rd, rn, rm, zr,
MSUB);
1190 const Register& ra) {
1192 DataProcessing3Source(rd, rn, rm, ra,
MSUB);
1199 const Register& ra) {
1200 ASSERT(rd.Is64Bits() && ra.Is64Bits());
1201 ASSERT(rn.Is32Bits() && rm.Is32Bits());
1202 DataProcessing3Source(rd, rn, rm, ra,
SMADDL_x);
1209 const Register& ra) {
1210 ASSERT(rd.Is64Bits() && ra.Is64Bits());
1211 ASSERT(rn.Is32Bits() && rm.Is32Bits());
1212 DataProcessing3Source(rd, rn, rm, ra,
SMSUBL_x);
1219 const Register& ra) {
1220 ASSERT(rd.Is64Bits() && ra.Is64Bits());
1221 ASSERT(rn.Is32Bits() && rm.Is32Bits());
1222 DataProcessing3Source(rd, rn, rm, ra,
UMADDL_x);
1229 const Register& ra) {
1230 ASSERT(rd.Is64Bits() && ra.Is64Bits());
1231 ASSERT(rn.Is32Bits() && rm.Is32Bits());
1232 DataProcessing3Source(rd, rn, rm, ra,
UMSUBL_x);
1238 const Register& rm) {
1240 ASSERT(rn.Is32Bits() && rm.Is32Bits());
1241 DataProcessing3Source(rd, rn, rm, xzr,
SMADDL_x);
1247 const Register& rm) {
1249 DataProcessing3Source(rd, rn, rm, xzr,
SMULH_x);
1255 const Register& rm) {
1256 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1257 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1264 const Register& rm) {
1265 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1266 ASSERT(rd.SizeInBits() == rm.SizeInBits());
1272 const Register& rn) {
1273 DataProcessing1Source(rd, rn,
RBIT);
1278 const Register& rn) {
1279 DataProcessing1Source(rd, rn,
REV16);
1284 const Register& rn) {
1286 DataProcessing1Source(rd, rn,
REV);
1291 const Register& rn) {
1292 DataProcessing1Source(rd, rn, rd.Is64Bits() ?
REV_x :
REV_w);
1297 const Register& rn) {
1298 DataProcessing1Source(rd, rn,
CLZ);
1303 const Register& rn) {
1304 DataProcessing1Source(rd, rn,
CLS);
1309 const CPURegister& rt2,
1316 const CPURegister& rt2,
1323 const Register& rt2,
1326 LoadStorePair(rt, rt2, src, LDPSW_x);
1330 void Assembler::LoadStorePair(
const CPURegister& rt,
1331 const CPURegister& rt2,
1342 if (addr.IsImmediateOffset()) {
1346 ASSERT(!rt.Is(addr.base()));
1347 ASSERT(!rt2.Is(addr.base()));
1348 ASSERT(addr.offset() != 0);
1349 if (addr.IsPreIndex()) {
1352 ASSERT(addr.IsPostIndex());
1356 Emit(addrmodeop | memop);
1361 const CPURegister& rt2,
1363 LoadStorePairNonTemporal(rt, rt2, src,
1369 const CPURegister& rt2,
1371 LoadStorePairNonTemporal(rt, rt2, dst,
1376 void Assembler::LoadStorePairNonTemporal(
const CPURegister& rt,
1377 const CPURegister& rt2,
1382 ASSERT(addr.IsImmediateOffset());
1386 Emit(op |
Rt(rt) |
Rt2(rt2) |
RnSP(addr.base()) |
1403 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1418 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1466 if (rd.IsSP() || rm.IsSP()) {
1474 void Assembler::mvn(
const Register& rd,
const Operand& operand) {
1526 ASSERT(rd.SizeInBits() == fn.SizeInBits());
1528 Emit(op |
Rd(rd) |
Rn(fn));
1533 ASSERT(fd.SizeInBits() == rn.SizeInBits());
1535 Emit(op |
Rd(fd) |
Rn(rn));
1540 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1546 const FPRegister& fn,
1547 const FPRegister& fm) {
1548 FPDataProcessing2Source(fd, fn, fm,
FADD);
1553 const FPRegister& fn,
1554 const FPRegister& fm) {
1555 FPDataProcessing2Source(fd, fn, fm,
FSUB);
1560 const FPRegister& fn,
1561 const FPRegister& fm) {
1562 FPDataProcessing2Source(fd, fn, fm,
FMUL);
1567 const FPRegister& fn,
1568 const FPRegister& fm,
1569 const FPRegister& fa) {
1570 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FMADD_s :
FMADD_d);
1575 const FPRegister& fn,
1576 const FPRegister& fm,
1577 const FPRegister& fa) {
1578 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FMSUB_s :
FMSUB_d);
1583 const FPRegister& fn,
1584 const FPRegister& fm,
1585 const FPRegister& fa) {
1586 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FNMADD_s :
FNMADD_d);
1591 const FPRegister& fn,
1592 const FPRegister& fm,
1593 const FPRegister& fa) {
1594 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FNMSUB_s :
FNMSUB_d);
1599 const FPRegister& fn,
1600 const FPRegister& fm) {
1601 FPDataProcessing2Source(fd, fn, fm,
FDIV);
1606 const FPRegister& fn,
1607 const FPRegister& fm) {
1608 FPDataProcessing2Source(fd, fn, fm,
FMAX);
1613 const FPRegister& fn,
1614 const FPRegister& fm) {
1615 FPDataProcessing2Source(fd, fn, fm,
FMAXNM);
1620 const FPRegister& fn,
1621 const FPRegister& fm) {
1622 FPDataProcessing2Source(fd, fn, fm,
FMIN);
1627 const FPRegister& fn,
1628 const FPRegister& fm) {
1629 FPDataProcessing2Source(fd, fn, fm,
FMINNM);
1634 const FPRegister& fn) {
1635 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1636 FPDataProcessing1Source(fd, fn,
FABS);
1641 const FPRegister& fn) {
1642 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1643 FPDataProcessing1Source(fd, fn,
FNEG);
1648 const FPRegister& fn) {
1649 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1650 FPDataProcessing1Source(fd, fn,
FSQRT);
1655 const FPRegister& fn) {
1656 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1657 FPDataProcessing1Source(fd, fn,
FRINTA);
1662 const FPRegister& fn) {
1663 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1664 FPDataProcessing1Source(fd, fn,
FRINTN);
1669 const FPRegister& fn) {
1670 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1671 FPDataProcessing1Source(fd, fn,
FRINTZ);
1676 const FPRegister& fm) {
1677 ASSERT(fn.SizeInBits() == fm.SizeInBits());
1694 const FPRegister& fm,
1697 ASSERT(fn.SizeInBits() == fm.SizeInBits());
1703 const FPRegister& fn,
1704 const FPRegister& fm,
1706 ASSERT(fd.SizeInBits() == fn.SizeInBits());
1707 ASSERT(fd.SizeInBits() == fm.SizeInBits());
1713 const FPRegister& fn,
1720 const FPRegister& fn) {
1721 if (fd.Is64Bits()) {
1724 FPDataProcessing1Source(fd, fn,
FCVT_ds);
1728 FPDataProcessing1Source(fd, fn,
FCVT_sd);
1804 uint32_t bits = float_to_rawbits(imm);
1806 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1808 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1810 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1812 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1820 uint64_t bits = double_to_rawbits(imm);
1822 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1824 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1826 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1828 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1833 void Assembler::MoveWide(
const Register& rd,
1839 ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
1840 ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
1846 if ((imm & ~0xffffUL) == 0) {
1848 }
else if ((imm & ~(0xffffUL << 16)) == 0) {
1851 }
else if ((imm & ~(0xffffUL << 32)) == 0) {
1855 }
else if ((imm & ~(0xffffUL << 48)) == 0) {
1871 const Operand& operand,
1874 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1875 ASSERT(!operand.NeedsRelocation());
1876 if (operand.IsImmediate()) {
1877 int64_t immediate = operand.immediate();
1882 }
else if (operand.IsShiftedRegister()) {
1883 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
1893 if (rn.IsSP() || rd.IsSP()) {
1895 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(),
S,
1901 ASSERT(operand.IsExtendedRegister());
1909 const Operand& operand,
1912 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1913 ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
1914 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1915 ASSERT(!operand.NeedsRelocation());
1916 Emit(
SF(rd) | op |
Flags(S) |
Rm(operand.reg()) |
Rn(rn) |
Rd(rd));
1933 #ifdef USE_SIMULATOR
1942 BlockPoolsScope scope(
this);
1963 if (params &
BREAK) {
1971 const Operand& operand,
1973 ASSERT(rd.SizeInBits() == rn.SizeInBits());
1974 ASSERT(!operand.NeedsRelocation());
1975 if (operand.IsImmediate()) {
1976 int64_t immediate = operand.immediate();
1977 unsigned reg_size = rd.SizeInBits();
1981 ASSERT(rd.Is64Bits() || is_uint32(immediate));
1986 immediate = rd.Is64Bits() ? ~immediate : (~immediate &
kWRegMask);
1989 unsigned n, imm_s, imm_r;
1990 if (
IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
1998 ASSERT(operand.IsShiftedRegister());
1999 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
2001 DataProcShiftedRegister(rd, rn, operand,
LeaveFlags, dp_op);
2012 unsigned reg_size = rd.SizeInBits();
2021 const Operand& operand,
2026 ASSERT(!operand.NeedsRelocation());
2027 if (operand.IsImmediate()) {
2028 int64_t immediate = operand.immediate();
2032 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2035 Emit(
SF(rn) | ccmpop |
Cond(cond) |
Rn(rn) |
Nzcv(nzcv));
2039 void Assembler::DataProcessing1Source(
const Register& rd,
2042 ASSERT(rd.SizeInBits() == rn.SizeInBits());
2043 Emit(
SF(rn) | op |
Rn(rn) |
Rd(rd));
2047 void Assembler::FPDataProcessing1Source(
const FPRegister& fd,
2048 const FPRegister& fn,
2054 void Assembler::FPDataProcessing2Source(
const FPRegister& fd,
2055 const FPRegister& fn,
2056 const FPRegister& fm,
2058 ASSERT(fd.SizeInBits() == fn.SizeInBits());
2059 ASSERT(fd.SizeInBits() == fm.SizeInBits());
2064 void Assembler::FPDataProcessing3Source(
const FPRegister& fd,
2065 const FPRegister& fn,
2066 const FPRegister& fm,
2067 const FPRegister& fa,
2077 unsigned shift_amount) {
2080 lsl(rd, rn, shift_amount);
2083 lsr(rd, rn, shift_amount);
2086 asr(rd, rn, shift_amount);
2089 ror(rd, rn, shift_amount);
2100 unsigned left_shift) {
2101 ASSERT(rd.SizeInBits() >= rn.SizeInBits());
2102 unsigned reg_size = rd.SizeInBits();
2106 unsigned high_bit = (8 << (extend & 0x3)) - 1;
2108 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2110 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2114 case UXTW:
ubfm(rd, rn_, non_shift_bits, high_bit);
break;
2117 case SXTW:
sbfm(rd, rn_, non_shift_bits, high_bit);
break;
2122 lsl(rd, rn_, left_shift);
2129 lsl(rd, rn_, left_shift);
2134 void Assembler::DataProcShiftedRegister(
const Register& rd,
2136 const Operand& operand,
2139 ASSERT(operand.IsShiftedRegister());
2140 ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2141 ASSERT(!operand.NeedsRelocation());
2144 Rm(operand.reg()) |
Rn(rn) |
Rd(rd));
2148 void Assembler::DataProcExtendedRegister(
const Register& rd,
2150 const Operand& operand,
2153 ASSERT(!operand.NeedsRelocation());
2155 Emit(
SF(rd) | op |
Flags(S) |
Rm(operand.reg()) |
2157 dest_reg |
RnSP(rn));
2162 return is_uint12(immediate) ||
2163 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2170 ptrdiff_t offset = addr.offset();
2172 if (addr.IsImmediateOffset()) {
2185 }
else if (addr.IsRegisterOffset()) {
2186 Extend ext = addr.extend();
2187 Shift shift = addr.shift();
2188 unsigned shift_amount = addr.shift_amount();
2197 ASSERT((shift_amount == 0) ||
2203 ASSERT(!rt.Is(addr.base()));
2205 if (addr.IsPreIndex()) {
2208 ASSERT(addr.IsPostIndex());
2220 return is_int9(offset);
2225 bool offset_is_size_multiple = (((offset >>
size) << size) == offset);
2226 return offset_is_size_multiple && is_uint12(offset >> size);
2241 void Assembler::LoadRelocatedValue(
const CPURegister& rt,
2242 const Operand& operand,
2244 int64_t imm = operand.immediate();
2245 ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
2246 RecordRelocInfo(operand.rmode(), imm);
2291 if ((value == 0) || (value == 0xffffffffffffffffUL) ||
2307 int imm_s_mask = 0x3F;
2314 *imm_r = (value & 3) - 1;
2318 *n = (width == 64) ? 1 : 0;
2319 *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
2320 if ((lead_zero + set_bits) == width) {
2323 *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
2328 if (lead_zero + trail_zero + set_bits == width) {
2334 if (lead_one + trail_one + (width - set_bits) == width) {
2341 uint64_t mask = (1UL << (width >> 1)) - 1;
2342 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
2356 return is_uint5(immediate);
2363 uint32_t bits = float_to_rawbits(imm);
2365 if ((bits & 0x7ffff) != 0) {
2370 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2371 if (b_pattern != 0 && b_pattern != 0x3e00) {
2376 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2388 uint64_t bits = double_to_rawbits(imm);
2390 if ((bits & 0xffffffffffffL) != 0) {
2395 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2396 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2401 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2409 void Assembler::GrowBuffer() {
2415 desc.buffer_size = 4 *
KB;
2426 desc.buffer = NewArray<byte>(desc.buffer_size);
2429 desc.reloc_size = (buffer +
buffer_size_) - reloc_info_writer.pos();
2432 intptr_t pc_delta = desc.buffer - buffer;
2433 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2435 memmove(desc.buffer, buffer, desc.instr_size);
2436 memmove(reloc_info_writer.pos() + rc_delta,
2437 reloc_info_writer.pos(), desc.reloc_size);
2443 pc_ =
reinterpret_cast<byte*
>(
pc_) + pc_delta;
2444 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2445 reloc_info_writer.last_pc() + pc_delta);
2452 for (
int i = 0; i < num_pending_reloc_info_; i++) {
2453 RelocInfo& rinfo = pending_reloc_info_[i];
2454 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2455 rinfo.rmode() != RelocInfo::POSITION);
2456 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2457 rinfo.set_pc(rinfo.pc() + pc_delta);
2463 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2466 if (((rmode >= RelocInfo::JS_RETURN) &&
2467 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2468 (rmode == RelocInfo::CONST_POOL) ||
2469 (rmode == RelocInfo::VENEER_POOL)) {
2471 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2472 || RelocInfo::IsJSReturn(rmode)
2473 || RelocInfo::IsComment(rmode)
2474 || RelocInfo::IsPosition(rmode)
2475 || RelocInfo::IsConstPool(rmode)
2476 || RelocInfo::IsVeneerPool(rmode));
2479 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2480 if (num_pending_reloc_info_ == 0) {
2483 pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2489 if (!RelocInfo::IsNone(rmode)) {
2491 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2502 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2506 reloc_info_writer.Write(&reloc_info_with_ast_id);
2508 reloc_info_writer.Write(&rinfo);
2516 if (no_const_pool_before_ < pc_limit) {
2519 ASSERT((num_pending_reloc_info_ == 0) ||
2520 (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
2521 no_const_pool_before_ = pc_limit;
2524 if (next_constant_pool_check_ < no_const_pool_before_) {
2525 next_constant_pool_check_ = no_const_pool_before_;
2541 if (num_pending_reloc_info_ == 0) {
2543 next_constant_pool_check_ =
pc_offset() + kCheckConstPoolInterval;
2553 ASSERT(first_const_pool_use_ >= 0);
2554 int dist =
pc_offset() - first_const_pool_use_;
2555 if (!force_emit && dist < kAvgDistToConstPool &&
2556 (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
2560 int jump_instr = require_jump ? kInstructionSize : 0;
2563 int pool_size = jump_instr + size_pool_marker + size_pool_guard +
2565 int needed_space = pool_size + kGap;
2583 BlockPoolsScope block_pools(
this);
2608 for (
int i = 0; i < num_pending_reloc_info_; i++) {
2609 RelocInfo& rinfo = pending_reloc_info_[i];
2610 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2611 rinfo.rmode() != RelocInfo::POSITION &&
2612 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2613 rinfo.rmode() != RelocInfo::CONST_POOL &&
2614 rinfo.rmode() != RelocInfo::VENEER_POOL);
2616 Instruction* instr =
reinterpret_cast<Instruction*
>(rinfo.pc());
2618 ASSERT(instr->IsLdrLiteral() &&
2619 instr->ImmLLiteral() == 0);
2621 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(
pc_));
2625 num_pending_reloc_info_ = 0;
2626 first_const_pool_use_ = -1;
2630 if (after_pool.is_linked()) {
2637 next_constant_pool_check_ =
pc_offset() + kCheckConstPoolInterval;
2640 static_cast<unsigned>(pool_size));
2647 return pc_offset() > max_reachable_pc - margin - protection_offset -
2653 #ifdef ENABLE_DEBUGGER_SUPPORT
2655 RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
2657 reloc_info_writer.Write(&rinfo);
2663 BlockPoolsScope scope(
this);
2673 int veneer_pool_relocinfo_loc =
pc_offset();
2676 if (need_protection) {
2682 Label veneer_size_check;
2684 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2690 Label* label = it->second.label_;
2693 bind(&veneer_size_check);
2697 Instruction* veneer =
reinterpret_cast<Instruction*
>(
pc_);
2699 branch->SetImmPCOffsetTarget(veneer);
2704 veneer_size_check.Unuse();
2707 it_to_delete = it++;
2749 if (!require_jump) {
2763 if (FLAG_code_comments) {
2765 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2771 return reloc_info_writer.pos() -
reinterpret_cast<byte*
>(
pc_);
2778 RecordRelocInfo(RelocInfo::JS_RETURN);
2785 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2792 #ifdef ENABLE_DEBUGGER_SUPPORT
2793 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
2813 #endif // V8_TARGET_ARCH_ARM64
void RecordVeneerPool(int location_offset, int size)
static Instr ImmPCRelAddress(int imm21)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void cbnz(const Register &rt, Label *label)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void lsl(const Register &rd, const Register &rn, unsigned shift)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
static Instr ImmSystemRegister(int imm15)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void ClearRecordedAstId()
const Register & AppropriateZeroRegFor(const CPURegister ®) const
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
const unsigned kDebugMessageOffset
static Instr Cond(Condition cond)
static Instr FPScale(unsigned scale)
void ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
void mrs(Register dst, SRegister s, Condition cond=al)
void sbfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr ImmCmpBranch(int imm19)
static Instr ImmTestBranch(int imm14)
static bool IsImmLSUnscaled(ptrdiff_t offset)
void negs(const Register &rd, const Operand &operand)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
int next_veneer_pool_check_
static const int kMaxVeneerCodeSize
static Instr ShiftDP(Shift shift)
void bfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
static HeapObject * cast(Object *obj)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
int CountLeadingZeros(uint64_t value, int width)
int SizeOfCodeGeneratedSince(Label *label)
void rev16(const Register &rd, const Register &rn)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
void adcs(const Register &rd, const Register &rn, const Operand &operand)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
const unsigned kLiteralEntrySizeLog2
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
void csetm(const Register &rd, Condition cond)
void bics(const Register &rd, const Register &rn, const Operand &operand)
static int NumAllocatableRegisters()
kSerializedDataOffset Object
void b(int branch_offset, Condition cond=al)
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
static Instr ImmLS(int imm9)
void cmn(Register src1, const Operand &src2, Condition cond=al)
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static Instr ImmBarrierType(int imm2)
void clz(Register dst, Register src, Condition cond=al)
static bool IsConstantPoolAt(Instruction *instr)
void fcvtns(const Register &rd, const FPRegister &fn)
void orn(const Register &rd, const Register &rn, const Operand &operand)
void frintz(const FPRegister &fd, const FPRegister &fn)
void RecordConstPool(int size)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr RdSP(Register rd)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void rbit(const Register &rd, const Register &rn)
TypeFeedbackId RecordedAstId()
FPDataProcessing2SourceOp
#define ASSERT(condition)
const RegList kJSCallerSaved
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
CPURegister PopHighestIndex()
void ands(const Register &rd, const Register &rn, const Operand &operand)
void frintn(const FPRegister &fd, const FPRegister &fn)
static Instr Flags(FlagsUpdate S)
void eon(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmFP32(float imm)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
bool ShouldEmitVeneer(int max_reachable_pc, int margin=kVeneerDistanceMargin)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void fneg(const FPRegister &fd, const FPRegister &fn)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cinv(const Register &rd, const Register &rn, Condition cond)
void RemoveBranchFromLabelLinkChain(Instruction *branch, Label *label, Instruction *label_veneer=NULL)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
CPURegister PopLowestIndex()
void strb(Register src, const MemOperand &dst, Condition cond=al)
static Instr Rt2(CPURegister rt2)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void udiv(const Register &rd, const Register &rn, const Register &rm)
const Instr kImmExceptionIsPrintf
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void asr(const Register &rd, const Register &rn, unsigned shift)
void rev32(const Register &rd, const Register &rn)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
int CountSetBits(uint64_t value, int width)
const unsigned kWRegSizeInBits
void ngcs(const Register &rd, const Operand &operand)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
void ret(const Register &xn=lr)
bool ShouldEmitVeneers(int margin=kVeneerDistanceMargin)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr ImmUncondBranch(int imm26)
static bool IsImmFP64(double imm)
void cneg(const Register &rd, const Register &rn, Condition cond)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
void neg(const Register &rd, const Operand &operand)
static Instr ImmMoveWide(uint64_t imm)
static Instr ImmException(int imm16)
void FPConvertToInt(const Register &rd, const FPRegister &fn, FPIntegerConvertOp op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static Instr ImmTestBranchBit(unsigned bit_pos)
void cbz(const Register &rt, Label *label)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
void sbcs(const Register &rd, const Register &rn, const Operand &operand)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void smulh(const Register &rd, const Register &rn, const Register &rm)
Condition InvertCondition(Condition cond)
void br(const Register &xn)
void EmitVeneers(bool force_emit, bool need_protection, int margin=kVeneerDistanceMargin)
const unsigned kPrintfLength
Instruction * ImmPCOffsetTarget()
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void rev(const Register &rd, const Register &rn)
void GetCode(CodeDesc *desc)
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static bool IsImmAddSub(int64_t immediate)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
const int kBitfieldNOffset
static const int kVeneerDistanceMargin
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static bool IsImmConditionalCompare(int64_t immediate)
static void TooLateToEnableNow()
void fmov(FPRegister fd, double imm)
const unsigned kDebugParamsOffset
static Instr ImmExtendShift(unsigned left_shift)
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
Instruction * InstructionAt(int offset) const
bool is_veneer_pool_blocked() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void StartBlockConstPool()
void mneg(const Register &rd, const Register &rn, const Register &rm)
static Instr Rd(CPURegister rd)
void blr(const Register &xn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
void ror(const Register &rd, const Register &rs, unsigned shift)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr Rn(CPURegister rn)
T RoundUp(T x, intptr_t m)
static const int kVeneerNoProtectionFactor
void adds(const Register &rd, const Register &rn, const Operand &operand)
void StartBlockVeneerPool()
MaybeObject * AllocateConstantPool(Heap *heap)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
LoadStorePairNonTemporalOp
void fcvtau(const Register &rd, const FPRegister &fn)
void CheckConstPool(bool force_emit, bool require_jump)
void ldrsw(const Register &rt, const MemOperand &src)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void fcvtnu(const Register &rd, const FPRegister &fn)
void fcvtzu(const Register &rd, const FPRegister &fn)
void ngc(const Register &rd, const Operand &operand)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cinc(const Register &rd, const Register &rn, Condition cond)
static Instr Ra(CPURegister ra)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
FPDataProcessing1SourceOp
void RecordDebugBreakSlot()
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
bool NeedsRelocation() const
void ldr(Register dst, const MemOperand &src, Condition cond=al)
unsigned RegisterSizeInBits() const
void ConstantPoolMarker(uint32_t size)
static bool IsImmFP32(float imm)
void cset(const Register &rd, Condition cond)
static Instr Rt(CPURegister rt)
static Instr ImmLLiteral(int imm19)
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
int unresolved_branches_first_limit() const
const Instr kImmExceptionIsUnreachable
Handle< T > handle(T *t, Isolate *isolate)
static Instr Rm(CPURegister rm)
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
void dmb(BarrierDomain domain, BarrierType type)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
bool emit_debug_code() const
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void hint(SystemHint code)
const unsigned kDebugCodeOffset
void fsqrt(const FPRegister &fd, const FPRegister &fn)
void fcvtas(const Register &rd, const FPRegister &fn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
static Instr ImmBarrierDomain(int imm2)
bool is_const_pool_blocked() const
void EmitStringData(const char *string)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Instr ImmLSPair(int imm7, LSDataSize size)
std::multimap< int, FarBranchInfo > unresolved_branches_
void dsb(BarrierDomain domain, BarrierType type)
CPURegister::RegisterType type() const
static Instr ImmHint(int imm7)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
FPDataProcessing3SourceOp
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Remove(const CPURegList &other)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void fcvtmu(const Register &rd, const FPRegister &fn)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr ImmR(unsigned immr, unsigned reg_size)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cls(const Register &rd, const Register &rn)
PositionsRecorder * positions_recorder()
static Instr ImmS(unsigned imms, unsigned reg_size)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
static Instr ImmDPShift(unsigned amount)
void ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr RnSP(Register rn)
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
static Instr ImmFP64(double imm)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
void rorv(const Register &rd, const Register &rn, const Register &rm)
void fcvt(const FPRegister &fd, const FPRegister &fn)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void adr(const Register &rd, Label *label)
void DeleteArray(T *array)
static Instr BitN(unsigned bitn, unsigned reg_size)
LSDataSize CalcLSPairDataSize(LoadStorePairOp op)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
int CountTrailingZeros(uint64_t value, int width)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr FPType(FPRegister fd)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
static CPURegList GetSafepointSavedRegisters()
void frinta(const FPRegister &fd, const FPRegister &fn)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
const Instr kImmExceptionIsDebug
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
void fcvtms(const Register &rd, const FPRegister &fn)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static int ConstantPoolSizeAt(Instruction *instr)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void tst(Register src1, const Operand &src2, Condition cond=al)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
void subs(const Register &rd, const Register &rn, const Operand &operand)
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
void EndBlockVeneerPool()
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)