30 #if V8_TARGET_ARCH_X64
43 bool CpuFeatures::initialized_ =
false;
45 uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
46 uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
47 uint64_t CpuFeatures::cross_compile_ = 0;
49 ExternalReference ExternalReference::cpu_features() {
50 ASSERT(CpuFeatures::initialized_);
51 return ExternalReference(&CpuFeatures::supported_);
56 ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
60 supported_ = kDefaultCpuFeatures;
66 uint64_t probed_features = 0;
68 if (cpu.has_sse41()) {
69 probed_features |=
static_cast<uint64_t
>(1) <<
SSE4_1;
72 probed_features |=
static_cast<uint64_t
>(1) <<
SSE3;
77 probed_features |=
static_cast<uint64_t
>(1) <<
SSE2;
81 probed_features |=
static_cast<uint64_t
>(1) <<
CMOV;
85 probed_features |=
static_cast<uint64_t
>(1) <<
SAHF;
89 supported_ = probed_features | platform_features;
90 found_by_runtime_probing_only_
91 = probed_features & ~kDefaultCpuFeatures & ~platform_features;
100 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
104 CodePatcher patcher(pc_, code_size);
108 Label check_codesize;
109 patcher.masm()->bind(&check_codesize);
119 patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
122 for (
int i = 0; i < guard_bytes; i++) {
123 patcher.masm()->int3();
128 void RelocInfo::PatchCode(
byte* instructions,
int instruction_count) {
130 for (
int i = 0; i < instruction_count; i++) {
131 *(pc_ + i) = *(instructions + i);
135 CPU::FlushICache(pc_, instruction_count);
143 Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
145 0, 3, 2, 1, 6, 7, 8, 9, 11, 14, 15
148 const int Register::kAllocationIndexByRegisterCode[
kNumRegisters] = {
149 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, -1, -1, 9, 10
158 if (base.is(
rsp) || base.is(
r12)) {
163 if (disp == 0 && !base.is(
rbp) && !base.is(
r13)) {
165 }
else if (is_int8(disp)) {
181 set_sib(scale, index, base);
182 if (disp == 0 && !base.is(
rbp) && !base.is(
r13)) {
186 }
else if (is_int8(disp)) {
202 set_sib(scale, index,
rbp);
208 ASSERT(operand.len_ >= 1);
210 byte modrm = operand.buf_[0];
212 bool has_sib = ((modrm & 0x07) == 0x04);
214 int disp_offset = has_sib ? 2 : 1;
215 int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
218 bool is_baseless = (mode == 0) && (base_reg == 0x05);
220 if (mode == 0x80 || is_baseless) {
222 disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
223 }
else if (mode == 0x40) {
225 disp_value =
static_cast<signed char>(operand.buf_[disp_offset]);
229 ASSERT(offset >= 0 ? disp_value + offset > disp_value
230 : disp_value + offset < disp_value);
231 disp_value += offset;
233 if (!is_int8(disp_value) || is_baseless) {
235 buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
236 len_ = disp_offset + 4;
238 }
else if (disp_value != 0 || (base_reg == 0x05)) {
240 buf_[0] = (modrm & 0x3f) | 0x40;
241 len_ = disp_offset + 1;
242 buf_[disp_offset] =
static_cast<byte>(disp_value);
245 buf_[0] = (modrm & 0x3f);
249 buf_[1] = operand.buf_[1];
254 bool Operand::AddressUsesRegister(Register reg)
const {
255 int code = reg.code();
256 ASSERT((buf_[0] & 0xC0) != 0xC0);
259 int base_code = buf_[0] & 0x07;
263 int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
265 if (index_code !=
rsp.
code() && index_code ==
code)
return true;
267 base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
269 if (base_code ==
rbp.
code() && ((buf_[0] & 0xC0) == 0))
return false;
270 return code == base_code;
274 if (base_code ==
rbp.
code() && ((buf_[0] & 0xC0) == 0))
return false;
275 base_code |= ((rex_ & 0x01) << 3);
276 return code == base_code;
284 #ifdef GENERATED_CODE_COVERAGE
285 static void InitCoverageLog();
289 : AssemblerBase(isolate, buffer, buffer_size),
291 positions_recorder_(this) {
304 #ifdef GENERATED_CODE_COVERAGE
318 ASSERT(desc->instr_size > 0);
327 int delta = (m - (
pc_offset() & (m - 1))) & (m - 1);
339 while (*a == 0x66) a++;
340 if (*a == 0x90)
return true;
341 if (a[0] == 0xf && a[1] == 0x1f)
return true;
346 void Assembler::bind_to(Label*
L,
int pos) {
349 if (L->is_linked()) {
350 int current = L->pos();
351 int next = long_at(current);
352 while (next != current) {
354 int imm32 = pos - (current +
sizeof(
int32_t));
355 long_at_put(current, imm32);
357 next = long_at(next);
360 int last_imm32 = pos - (current +
sizeof(
int32_t));
361 long_at_put(current, last_imm32);
363 while (L->is_near_linked()) {
364 int fixup_pos = L->near_link_pos();
366 static_cast<int>(*
reinterpret_cast<int8_t*
>(
addr_at(fixup_pos)));
367 ASSERT(offset_to_next <= 0);
368 int disp = pos - (fixup_pos +
sizeof(int8_t));
369 CHECK(is_int8(disp));
371 if (offset_to_next < 0) {
372 L->link_to(fixup_pos + offset_to_next, Label::kNear);
386 void Assembler::GrowBuffer() {
393 desc.buffer_size = 4*
KB;
400 (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
405 desc.buffer = NewArray<byte>(desc.buffer_size);
413 memset(desc.buffer, 0xCC, desc.buffer_size);
417 intptr_t pc_delta = desc.buffer -
buffer_;
418 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
422 reloc_info_writer.pos(), desc.reloc_size);
433 buffer_size_ = desc.buffer_size;
435 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
436 reloc_info_writer.last_pc() + pc_delta);
439 for (RelocIterator it(desc); !it.done(); it.next()) {
440 RelocInfo::Mode rmode = it.rinfo()->rmode();
441 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
442 intptr_t* p =
reinterpret_cast<intptr_t*
>(it.rinfo()->pc());
453 void Assembler::emit_operand(
int code,
const Operand& adr) {
455 const unsigned length = adr.len_;
459 ASSERT((adr.buf_[0] & 0x38) == 0);
460 pc_[0] = adr.buf_[0] | code << 3;
463 for (
unsigned i = 1; i < length; i++)
pc_[i] = adr.buf_[i];
470 void Assembler::arithmetic_op(
byte opcode, Register reg,
const Operand& op) {
472 emit_rex_64(reg, op);
474 emit_operand(reg, op);
478 void Assembler::arithmetic_op(
byte opcode, Register reg, Register rm_reg) {
480 ASSERT((opcode & 0xC6) == 2);
481 if (rm_reg.low_bits() == 4) {
483 emit_rex_64(rm_reg, reg);
485 emit_modrm(rm_reg, reg);
487 emit_rex_64(reg, rm_reg);
489 emit_modrm(reg, rm_reg);
494 void Assembler::arithmetic_op_16(
byte opcode, Register reg, Register rm_reg) {
496 ASSERT((opcode & 0xC6) == 2);
497 if (rm_reg.low_bits() == 4) {
500 emit_optional_rex_32(rm_reg, reg);
502 emit_modrm(rm_reg, reg);
505 emit_optional_rex_32(reg, rm_reg);
507 emit_modrm(reg, rm_reg);
512 void Assembler::arithmetic_op_16(
byte opcode,
514 const Operand& rm_reg) {
517 emit_optional_rex_32(reg, rm_reg);
519 emit_operand(reg, rm_reg);
523 void Assembler::arithmetic_op_32(
byte opcode, Register reg, Register rm_reg) {
525 ASSERT((opcode & 0xC6) == 2);
526 if (rm_reg.low_bits() == 4) {
528 emit_optional_rex_32(rm_reg, reg);
530 emit_modrm(rm_reg, reg);
532 emit_optional_rex_32(reg, rm_reg);
534 emit_modrm(reg, rm_reg);
539 void Assembler::arithmetic_op_32(
byte opcode,
541 const Operand& rm_reg) {
543 emit_optional_rex_32(reg, rm_reg);
545 emit_operand(reg, rm_reg);
549 void Assembler::immediate_arithmetic_op(
byte subcode,
554 if (is_int8(src.value_)) {
556 emit_modrm(subcode, dst);
558 }
else if (dst.is(
rax)) {
559 emit(0x05 | (subcode << 3));
563 emit_modrm(subcode, dst);
568 void Assembler::immediate_arithmetic_op(
byte subcode,
573 if (is_int8(src.value_)) {
575 emit_operand(subcode, dst);
579 emit_operand(subcode, dst);
585 void Assembler::immediate_arithmetic_op_16(
byte subcode,
590 emit_optional_rex_32(dst);
591 if (is_int8(src.value_)) {
593 emit_modrm(subcode, dst);
595 }
else if (dst.is(
rax)) {
596 emit(0x05 | (subcode << 3));
600 emit_modrm(subcode, dst);
606 void Assembler::immediate_arithmetic_op_16(
byte subcode,
611 emit_optional_rex_32(dst);
612 if (is_int8(src.value_)) {
614 emit_operand(subcode, dst);
618 emit_operand(subcode, dst);
624 void Assembler::immediate_arithmetic_op_32(
byte subcode,
628 emit_optional_rex_32(dst);
629 if (is_int8(src.value_)) {
631 emit_modrm(subcode, dst);
633 }
else if (dst.is(
rax)) {
634 emit(0x05 | (subcode << 3));
638 emit_modrm(subcode, dst);
644 void Assembler::immediate_arithmetic_op_32(
byte subcode,
648 emit_optional_rex_32(dst);
649 if (is_int8(src.value_)) {
651 emit_operand(subcode, dst);
655 emit_operand(subcode, dst);
661 void Assembler::immediate_arithmetic_op_8(
byte subcode,
665 emit_optional_rex_32(dst);
666 ASSERT(is_int8(src.value_) || is_uint8(src.value_));
668 emit_operand(subcode, dst);
673 void Assembler::immediate_arithmetic_op_8(
byte subcode,
677 if (!dst.is_byte_register()) {
681 ASSERT(is_int8(src.value_) || is_uint8(src.value_));
683 emit_modrm(subcode, dst);
688 void Assembler::shift(Register dst, Immediate shift_amount,
int subcode) {
690 ASSERT(is_uint6(shift_amount.value_));
691 if (shift_amount.value_ == 1) {
694 emit_modrm(subcode, dst);
698 emit_modrm(subcode, dst);
699 emit(shift_amount.value_);
704 void Assembler::shift(Register dst,
int subcode) {
708 emit_modrm(subcode, dst);
712 void Assembler::shift_32(Register dst,
int subcode) {
714 emit_optional_rex_32(dst);
716 emit_modrm(subcode, dst);
720 void Assembler::shift_32(Register dst, Immediate shift_amount,
int subcode) {
722 ASSERT(is_uint5(shift_amount.value_));
723 if (shift_amount.value_ == 1) {
724 emit_optional_rex_32(dst);
726 emit_modrm(subcode, dst);
728 emit_optional_rex_32(dst);
730 emit_modrm(subcode, dst);
731 emit(shift_amount.value_);
738 emit_rex_64(src, dst);
741 emit_operand(src, dst);
747 emit_rex_64(src, dst);
750 emit_operand(src, dst);
756 emit_optional_rex_32(dst, src);
759 emit_modrm(dst, src);
772 }
else if (L->is_linked()) {
785 ASSERT(RelocInfo::IsRuntimeEntry(rmode));
790 emit_runtime_entry(entry, rmode);
795 RelocInfo::Mode rmode,
796 TypeFeedbackId ast_id) {
801 emit_code_target(target, rmode, ast_id);
809 emit_optional_rex_32(adr);
811 emit_modrm(0x2, adr);
819 emit_optional_rex_32(op);
821 emit_operand(0x2, op);
835 intptr_t displacement = target - source;
836 ASSERT(is_int32(displacement));
837 emitl(static_cast<int32_t>(displacement));
862 }
else if (cc ==
never) {
870 emit_rex_64(dst, src);
873 emit_modrm(dst, src);
880 }
else if (cc ==
never) {
886 emit_rex_64(dst, src);
889 emit_operand(dst, src);
896 }
else if (cc ==
never) {
902 emit_optional_rex_32(dst, src);
905 emit_modrm(dst, src);
912 }
else if (cc ==
never) {
918 emit_optional_rex_32(dst, src);
921 emit_operand(dst, src);
926 ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
947 void Assembler::emit_dec(Register dst,
int size) {
951 emit_modrm(0x1, dst);
955 void Assembler::emit_dec(
const Operand& dst,
int size) {
959 emit_operand(1, dst);
965 if (!dst.is_byte_register()) {
970 emit_modrm(0x1, dst);
976 emit_optional_rex_32(dst);
978 emit_operand(1, dst);
996 void Assembler::emit_idiv(Register src,
int size) {
1000 emit_modrm(0x7, src);
1004 void Assembler::emit_imul(Register src,
int size) {
1006 emit_rex(src, size);
1008 emit_modrm(0x5, src);
1012 void Assembler::emit_imul(Register dst, Register src,
int size) {
1014 emit_rex(dst, src, size);
1017 emit_modrm(dst, src);
1021 void Assembler::emit_imul(Register dst,
const Operand& src,
int size) {
1023 emit_rex(dst, src, size);
1026 emit_operand(dst, src);
1030 void Assembler::emit_imul(Register dst, Register src, Immediate imm,
int size) {
1032 emit_rex(dst, src, size);
1033 if (is_int8(imm.value_)) {
1035 emit_modrm(dst, src);
1039 emit_modrm(dst, src);
1045 void Assembler::emit_inc(Register dst,
int size) {
1047 emit_rex(dst, size);
1049 emit_modrm(0x0, dst);
1053 void Assembler::emit_inc(
const Operand& dst,
int size) {
1055 emit_rex(dst, size);
1057 emit_operand(0, dst);
1071 }
else if (cc ==
never) {
1076 if (L->is_bound()) {
1077 const int short_size = 2;
1078 const int long_size = 6;
1093 emit((offs - short_size) & 0xFF);
1098 emitl(offs - long_size);
1100 }
else if (distance == Label::kNear) {
1104 if (L->is_near_linked()) {
1105 int offset = L->near_link_pos() -
pc_offset();
1107 disp =
static_cast<byte>(offset & 0xFF);
1111 }
else if (L->is_linked()) {
1123 L->link_to(current);
1129 ASSERT(RelocInfo::IsRuntimeEntry(rmode));
1134 emit_runtime_entry(entry, rmode);
1139 Handle<Code> target,
1140 RelocInfo::Mode rmode) {
1146 emit_code_target(target, rmode);
1152 const int short_size =
sizeof(int8_t);
1153 const int long_size =
sizeof(
int32_t);
1154 if (L->is_bound()) {
1160 emit((offs - short_size) & 0xFF);
1164 emitl(offs - long_size);
1166 }
else if (distance == Label::kNear) {
1169 if (L->is_near_linked()) {
1170 int offset = L->near_link_pos() -
pc_offset();
1172 disp =
static_cast<byte>(offset & 0xFF);
1176 }
else if (L->is_linked()) {
1187 L->link_to(current);
1192 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
1196 emit_code_target(target, rmode);
1201 ASSERT(RelocInfo::IsRuntimeEntry(rmode));
1203 ASSERT(RelocInfo::IsRuntimeEntry(rmode));
1205 emit_runtime_entry(entry, rmode);
1212 emit_optional_rex_32(target);
1214 emit_modrm(0x4, target);
1221 emit_optional_rex_32(src);
1223 emit_operand(0x4, src);
1227 void Assembler::emit_lea(Register dst,
const Operand& src,
int size) {
1229 emit_rex(dst, src, size);
1231 emit_operand(dst, src);
1254 load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
1266 if (!dst.is_byte_register()) {
1268 emit_rex_32(dst, src);
1270 emit_optional_rex_32(dst, src);
1273 emit_operand(dst, src);
1279 if (!dst.is_byte_register()) {
1282 emit(0xB0 + dst.low_bits());
1289 if (!src.is_byte_register()) {
1290 emit_rex_32(src, dst);
1292 emit_optional_rex_32(src, dst);
1295 emit_operand(src, dst);
1301 emit_optional_rex_32(dst);
1303 emit_operand(0x0, dst);
1304 emit(static_cast<byte>(imm.value_));
1311 emit_optional_rex_32(dst, src);
1313 emit_operand(dst, src);
1320 emit_optional_rex_32(src, dst);
1322 emit_operand(src, dst);
1329 emit_optional_rex_32(dst);
1331 emit_operand(0x0, dst);
1332 emit(static_cast<byte>(imm.value_ & 0xff));
1333 emit(static_cast<byte>(imm.value_ >> 8));
1337 void Assembler::emit_mov(Register dst,
const Operand& src,
int size) {
1339 emit_rex(dst, src, size);
1341 emit_operand(dst, src);
1345 void Assembler::emit_mov(Register dst, Register src,
int size) {
1347 if (src.low_bits() == 4) {
1348 emit_rex(src, dst, size);
1350 emit_modrm(src, dst);
1352 emit_rex(dst, src, size);
1354 emit_modrm(dst, src);
1359 void Assembler::emit_mov(
const Operand& dst, Register src,
int size) {
1361 emit_rex(src, dst, size);
1363 emit_operand(src, dst);
1367 void Assembler::emit_mov(Register dst, Immediate value,
int size) {
1369 emit_rex(dst, size);
1372 emit_modrm(0x0, dst);
1375 emit(0xB8 + dst.low_bits());
1381 void Assembler::emit_mov(
const Operand& dst, Immediate value,
int size) {
1383 emit_rex(dst, size);
1385 emit_operand(0x0, dst);
1390 void Assembler::movp(Register dst,
void* value, RelocInfo::Mode rmode) {
1393 emit(0xB8 | dst.low_bits());
1394 emitp(value, rmode);
1401 emit(0xB8 | dst.low_bits());
1407 movq(dst, static_cast<int64_t>(value));
1415 emit_optional_rex_32(dst);
1417 emit_operand(0, dst);
1418 if (src->is_bound()) {
1422 }
else if (src->is_linked()) {
1426 ASSERT(src->is_unused());
1429 src->link_to(current);
1436 emit_rex_64(dst, src);
1439 emit_operand(dst, src);
1445 emit_rex_64(dst, src);
1448 emit_operand(dst, src);
1454 emit_rex_64(dst, src);
1456 emit_modrm(dst, src);
1462 emit_rex_64(dst, src);
1464 emit_operand(dst, src);
1468 void Assembler::emit_movzxb(Register dst,
const Operand& src,
int size) {
1472 emit_optional_rex_32(dst, src);
1475 emit_operand(dst, src);
1479 void Assembler::emit_movzxw(Register dst,
const Operand& src,
int size) {
1483 emit_optional_rex_32(dst, src);
1486 emit_operand(dst, src);
1490 void Assembler::emit_movzxw(Register dst, Register src,
int size) {
1494 emit_optional_rex_32(dst, src);
1497 emit_modrm(dst, src);
1516 void Assembler::emit_repmovs(
int size) {
1528 emit_modrm(0x4, src);
1532 void Assembler::emit_neg(Register dst,
int size) {
1534 emit_rex(dst, size);
1536 emit_modrm(0x3, dst);
1540 void Assembler::emit_neg(
const Operand& dst,
int size) {
1544 emit_operand(3, dst);
1554 void Assembler::emit_not(Register dst,
int size) {
1556 emit_rex(dst, size);
1558 emit_modrm(0x2, dst);
1562 void Assembler::emit_not(
const Operand& dst,
int size) {
1564 emit_rex(dst, size);
1566 emit_operand(2, dst);
1649 emit_optional_rex_32(dst);
1650 emit(0x58 | dst.low_bits());
1656 emit_optional_rex_32(dst);
1658 emit_operand(0, dst);
1670 emit_optional_rex_32(src);
1671 emit(0x50 | src.low_bits());
1677 emit_optional_rex_32(src);
1679 emit_operand(6, src);
1685 if (is_int8(value.value_)) {
1690 emitl(value.value_);
1710 ASSERT(is_uint16(imm16));
1716 emit((imm16 >> 8) & 0xFF);
1728 if (!reg.is_byte_register()) {
1733 emit_modrm(0x0, reg);
1739 emit_rex_64(src, dst);
1742 emit_modrm(src, dst);
1748 emit_rex_64(src, dst);
1751 emit_modrm(src, dst);
1755 void Assembler::emit_xchg(Register dst, Register src,
int size) {
1757 if (src.is(
rax) || dst.is(
rax)) {
1758 Register other = src.is(
rax) ? dst : src;
1759 emit_rex(other, size);
1760 emit(0x90 | other.low_bits());
1761 }
else if (dst.low_bits() == 4) {
1762 emit_rex(dst, src, size);
1764 emit_modrm(dst, src);
1766 emit_rex(src, dst, size);
1768 emit_modrm(src, dst);
1792 store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
1798 if (src.low_bits() == 4) {
1799 emit_rex_32(src, dst);
1801 emit_modrm(src, dst);
1803 if (!dst.is_byte_register() || !src.is_byte_register()) {
1805 emit_rex_32(dst, src);
1808 emit_modrm(dst, src);
1814 ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
1820 if (!reg.is_byte_register()) {
1825 emit_modrm(0x0, reg);
1832 ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
1834 emit_optional_rex_32(
rax, op);
1836 emit_operand(
rax, op);
1843 if (!reg.is_byte_register()) {
1845 emit_rex_32(reg, op);
1847 emit_optional_rex_32(reg, op);
1850 emit_operand(reg, op);
1854 void Assembler::emit_test(Register dst, Register src,
int size) {
1856 if (src.low_bits() == 4) {
1857 emit_rex(src, dst, size);
1859 emit_modrm(src, dst);
1861 emit_rex(dst, src, size);
1863 emit_modrm(dst, src);
1868 void Assembler::emit_test(Register reg, Immediate mask,
int size) {
1870 if (is_uint8(mask.value_)) {
1876 emit_rex(
rax, size);
1880 emit_rex(reg, size);
1882 emit_modrm(0x0, reg);
1888 void Assembler::emit_test(
const Operand& op, Immediate mask,
int size) {
1890 if (is_uint8(mask.value_)) {
1895 emit_rex(
rax, op, size);
1897 emit_operand(
rax, op);
1902 void Assembler::emit_test(
const Operand& op, Register reg,
int size) {
1904 emit_rex(reg, op, size);
1906 emit_operand(reg, op);
1915 emit_farith(0xD9, 0xC0, i);
1949 emit_optional_rex_32(adr);
1951 emit_operand(0, adr);
1957 emit_optional_rex_32(adr);
1959 emit_operand(0, adr);
1965 emit_optional_rex_32(adr);
1967 emit_operand(3, adr);
1973 emit_optional_rex_32(adr);
1975 emit_operand(3, adr);
1982 emit_farith(0xDD, 0xD8, index);
1988 emit_optional_rex_32(adr);
1990 emit_operand(0, adr);
1996 emit_optional_rex_32(adr);
1998 emit_operand(5, adr);
2004 emit_optional_rex_32(adr);
2006 emit_operand(3, adr);
2013 emit_optional_rex_32(adr);
2015 emit_operand(1, adr);
2022 emit_optional_rex_32(adr);
2024 emit_operand(1, adr);
2030 emit_optional_rex_32(adr);
2032 emit_operand(2, adr);
2038 emit_optional_rex_32(adr);
2040 emit_operand(7, adr);
2109 emit_farith(0xDC, 0xC0, i);
2115 emit_farith(0xDC, 0xE8, i);
2121 emit_optional_rex_32(adr);
2123 emit_operand(4, adr);
2129 emit_farith(0xDC, 0xC8, i);
2135 emit_farith(0xDC, 0xF8, i);
2141 emit_farith(0xDE, 0xC0, i);
2147 emit_farith(0xDE, 0xE8, i);
2153 emit_farith(0xDE, 0xE0, i);
2159 emit_farith(0xDE, 0xC8, i);
2165 emit_farith(0xDE, 0xF8, i);
2185 emit_farith(0xD9, 0xC8, i);
2198 emit_farith(0xDD, 0xC0, i);
2211 emit_farith(0xDD, 0xE8, i);
2278 void Assembler::emit_farith(
int b1,
int b2,
int i) {
2279 ASSERT(is_uint8(b1) && is_uint8(b2));
2290 emit_optional_rex_32(dst, src);
2299 emit_optional_rex_32(dst, src);
2308 emit_optional_rex_32(dst, src);
2317 emit_optional_rex_32(dst, src);
2326 emit_optional_rex_32(dst, src);
2335 emit_optional_rex_32(dst, src);
2344 emit_optional_rex_32(dst, src);
2353 emit_optional_rex_32(dst, src);
2362 emit_optional_rex_32(dst, src);
2371 emit_optional_rex_32(dst, src);
2380 emit_optional_rex_32(dst, src);
2389 emit_optional_rex_32(dst, src);
2398 emit_optional_rex_32(dst, src);
2407 emit_optional_rex_32(dst, src);
2419 emit_optional_rex_32(dst, src);
2429 emit_optional_rex_32(src, dst);
2439 emit_rex_64(dst, src);
2449 emit_rex_64(src, dst);
2458 if (dst.low_bits() == 4) {
2461 emit_optional_rex_32(dst, src);
2467 emit_optional_rex_32(src, dst);
2478 emit_rex_64(src, dst);
2488 emit_rex_64(dst, src);
2498 emit_rex_64(src, dst);
2508 emit_rex_64(dst, src);
2520 emit_optional_rex_32(src, dst);
2532 emit_optional_rex_32(src, dst);
2542 emit_optional_rex_32(dst, src);
2552 emit_optional_rex_32(dst, src);
2561 if (src.low_bits() == 4) {
2563 emit_optional_rex_32(src, dst);
2568 emit_optional_rex_32(dst, src);
2579 emit_optional_rex_32(src, dst);
2589 if (src.low_bits() == 4) {
2592 emit_optional_rex_32(src, dst);
2598 emit_optional_rex_32(dst, src);
2609 emit_optional_rex_32(dst, src);
2619 emit_optional_rex_32(dst, src);
2639 emit_optional_rex_32(dst, src);
2642 emit_operand(dst, src);
2649 emit_optional_rex_32(dst, src);
2659 emit_optional_rex_32(dst, src);
2662 emit_operand(dst, src);
2669 emit_optional_rex_32(dst, src);
2679 emit_rex_64(dst, src);
2689 emit_optional_rex_32(dst, src);
2699 emit_optional_rex_32(dst, src);
2709 emit_optional_rex_32(dst, src);
2719 emit_rex_64(dst, src);
2729 emit_optional_rex_32(dst, src);
2739 emit_optional_rex_32(dst, src);
2749 emit_optional_rex_32(dst, src);
2759 emit_optional_rex_32(dst, src);
2769 emit_rex_64(dst, src);
2779 emit_optional_rex_32(dst, src);
2789 emit_optional_rex_32(dst, src);
2799 emit_optional_rex_32(dst, src);
2809 emit_optional_rex_32(dst, src);
2819 emit_optional_rex_32(dst, src);
2829 emit_optional_rex_32(dst, src);
2839 emit_optional_rex_32(dst, src);
2849 emit_optional_rex_32(dst, src);
2859 emit_optional_rex_32(dst, src);
2869 emit_optional_rex_32(dst, src);
2879 emit_optional_rex_32(dst, src);
2889 emit_optional_rex_32(dst, src);
2899 emit_optional_rex_32(dst, src);
2912 emit_optional_rex_32(dst, src);
2918 emit(static_cast<byte>(mode) | 0x8);
2925 emit_optional_rex_32(dst, src);
2934 emit_optional_rex_32(dst, src);
2942 Register ireg = { reg.code() };
2943 emit_operand(ireg, adr);
2948 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
2953 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
2958 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
2976 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2977 ASSERT(!RelocInfo::IsNone(rmode));
2978 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2988 }
else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
2993 reloc_info_writer.Write(&rinfo);
3000 RecordRelocInfo(RelocInfo::JS_RETURN);
3007 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3012 if (FLAG_code_comments || force) {
3014 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3032 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
3034 1 << RelocInfo::INTERNAL_REFERENCE |
3035 1 << RelocInfo::CODE_AGE_SEQUENCE;
3038 bool RelocInfo::IsCodedSpecially() {
3042 return (1 << rmode_) & kApplyMask;
3046 bool RelocInfo::IsInConstantPool() {
3053 #endif // V8_TARGET_ARCH_X64
void psllq(XMMRegister reg, int8_t shift)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static RelocInfo::Mode RelocInfoNone()
void cvtlsi2ss(XMMRegister dst, Register src)
void movapd(XMMRegister dst, XMMRegister src)
static const int kMaximalBufferSize
void pushq_imm32(int32_t imm32)
Isolate * isolate() const
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void load_rax(void *ptr, RelocInfo::Mode rmode)
void ucomisd(XMMRegister dst, XMMRegister src)
void cvttss2si(Register dst, const Operand &src)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void bsrl(Register dst, Register src)
bool buffer_overflow() const
void mulsd(XMMRegister dst, XMMRegister src)
void addps(XMMRegister dst, const Operand &src)
void cvtsd2si(Register dst, XMMRegister src)
void movq(Register dst, int64_t value)
void orpd(XMMRegister dst, XMMRegister src)
void cvtss2sd(XMMRegister dst, XMMRegister src)
void sqrtsd(XMMRegister dst, XMMRegister src)
static const int kMinimalBufferSize
void mulps(XMMRegister dst, const Operand &src)
void andpd(XMMRegister dst, XMMRegister src)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
#define ASSERT(condition)
void cvtlsi2sd(XMMRegister dst, const Operand &src)
void movsxlq(Register dst, Register src)
void xorpd(XMMRegister dst, XMMRegister src)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void bt(const Operand &dst, Register src)
void cmovl(Condition cc, Register dst, Register src)
void testb(Register dst, Register src)
void fistp_s(const Operand &adr)
void addsd(XMMRegister dst, XMMRegister src)
void fld_d(const Operand &adr)
void cmpb_al(const Operand &op)
void pushq(Immediate value)
void fild_s(const Operand &adr)
void enter(const Immediate &size)
void ret(const Register &xn=lr)
void shld(Register dst, Register src)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void andps(XMMRegister dst, const Operand &src)
void fisttp_d(const Operand &adr)
void movss(XMMRegister dst, const Operand &src)
void movb(Register dst, const Operand &src)
void set_byte_at(int pos, byte value)
void cvtsd2ss(XMMRegister dst, XMMRegister src)
void movp(Register dst, void *ptr, RelocInfo::Mode rmode)
bool predictable_code_size() const
void movsd(XMMRegister dst, XMMRegister src)
void GetCode(CodeDesc *desc)
void movdqa(XMMRegister dst, const Operand &src)
void movdqu(XMMRegister dst, const Operand &src)
void movsxbq(Register dst, const Operand &src)
static void TooLateToEnableNow()
void movmskpd(Register dst, XMMRegister src)
void fisttp_s(const Operand &adr)
static int32_t & int32_at(Address addr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void orps(XMMRegister dst, const Operand &src)
static void MemMove(void *dest, const void *src, size_t size)
MaybeObject * AllocateConstantPool(Heap *heap)
void emit_sse_operand(XMMRegister reg, const Operand &adr)
void cvtqsi2sd(XMMRegister dst, const Operand &src)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void movl(const Operand &dst, Label *src)
void setcc(Condition cc, Register reg)
void fld_s(const Operand &adr)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
void fstp_d(const Operand &adr)
void movw(Register reg, uint32_t immediate, Condition cond=al)
static const int kCallSequenceLength
void store_rax(void *dst, RelocInfo::Mode mode)
void fistp_d(const Operand &adr)
void cvtsd2siq(Register dst, XMMRegister src)
void shrd(Register dst, Register src)
void movaps(XMMRegister dst, XMMRegister src)
void movmskps(Register dst, XMMRegister src)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
void RecordComment(const char *msg)
bool emit_debug_code() const
void fstp_s(const Operand &adr)
void divsd(XMMRegister dst, XMMRegister src)
const Register kScratchRegister
void fild_d(const Operand &adr)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void cmpltsd(XMMRegister dst, XMMRegister src)
#define ASSERT_EQ(v1, v2)
void divps(XMMRegister dst, const Operand &src)
void movd(XMMRegister dst, Register src)
void xorps(XMMRegister dst, const Operand &src)
PositionsRecorder * positions_recorder()
void subps(XMMRegister dst, const Operand &src)
void fisub_s(const Operand &adr)
void extractps(Register dst, XMMRegister src, byte imm8)
void shufps(XMMRegister dst, XMMRegister src, byte imm8)
static uint64_t CpuFeaturesImpliedByPlatform()
#define RUNTIME_ENTRY(name, nargs, ressize)
bool IsEnabled(CpuFeature f)
void fist_s(const Operand &adr)
void movsxwq(Register dst, const Operand &src)
void DeleteArray(T *array)
void bts(Register dst, Register src)
void cmovq(Condition cc, Register dst, Register src)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void subsd(XMMRegister dst, XMMRegister src)
void cvttsd2siq(Register dst, XMMRegister src)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void cvttsd2si(Register dst, const Operand &src)