30 #if defined(V8_TARGET_ARCH_X64)
43 bool CpuFeatures::initialized_ =
false;
45 uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
46 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
50 ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
54 supported_ = kDefaultCpuFeatures;
60 const int kBufferSize = 4 *
KB;
61 VirtualMemory*
memory =
new VirtualMemory(kBufferSize);
62 if (!memory->IsReserved()) {
66 ASSERT(memory->size() >=
static_cast<size_t>(kBufferSize));
67 if (!memory->Commit(memory->address(), kBufferSize,
true)) {
72 Assembler assm(
NULL, memory->address(), kBufferSize);
87 __ xor_(
rax, Immediate(0x200000));
103 __ movl(
rax, Immediate(1));
104 supported_ = kDefaultCpuFeatures | (1 <<
CPUID);
105 { Scope fscope(
CPUID);
109 __ shl(
rcx, Immediate(32));
116 supported_ = kDefaultCpuFeatures;
120 __ movl(
rax, Immediate(1));
138 typedef uint64_t (*
F0)();
139 F0 probe = FUNCTION_CAST<F0>(
reinterpret_cast<Address>(memory->address()));
140 supported_ = probe();
141 found_by_runtime_probing_ = supported_;
142 found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
144 supported_ |= os_guarantees;
145 found_by_runtime_probing_ &= ~os_guarantees;
160 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
163 static const int kCallCodeSize = 13;
164 int code_size = kCallCodeSize + guard_bytes;
167 CodePatcher patcher(pc_, code_size);
171 Label check_codesize;
172 patcher.masm()->bind(&check_codesize);
177 patcher.masm()->call(
r10);
181 patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
184 for (
int i = 0; i < guard_bytes; i++) {
185 patcher.masm()->int3();
192 for (
int i = 0; i < instruction_count; i++) {
193 *(pc_ + i) = *(instructions + i);
204 const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
206 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
209 const int Register::kAllocationIndexByRegisterCode[
kNumRegisters] = {
210 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
217 Operand::Operand(Register base,
int32_t disp) : rex_(0) {
219 if (base.is(
rsp) || base.is(
r12)) {
224 if (disp == 0 && !base.is(
rbp) && !base.is(
r13)) {
236 Operand::Operand(Register base,
242 set_sib(scale, index, base);
243 if (disp == 0 && !base.is(
rbp) && !base.is(
r13)) {
257 Operand::Operand(Register index,
263 set_sib(scale, index,
rbp);
268 Operand::Operand(
const Operand& operand,
int32_t offset) {
269 ASSERT(operand.len_ >= 1);
271 byte modrm = operand.buf_[0];
273 bool has_sib = ((modrm & 0x07) == 0x04);
274 byte mode = modrm & 0xC0;
275 int disp_offset = has_sib ? 2 : 1;
276 int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
279 bool is_baseless = (mode == 0) && (base_reg == 0x05);
281 if (mode == 0x80 || is_baseless) {
283 disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
284 }
else if (mode == 0x40) {
286 disp_value =
static_cast<signed char>(operand.buf_[disp_offset]);
290 ASSERT(offset >= 0 ? disp_value + offset > disp_value
291 : disp_value + offset < disp_value);
292 disp_value += offset;
294 if (!
is_int8(disp_value) || is_baseless) {
296 buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
297 len_ = disp_offset + 4;
299 }
else if (disp_value != 0 || (base_reg == 0x05)) {
301 buf_[0] = (modrm & 0x3f) | 0x40;
302 len_ = disp_offset + 1;
303 buf_[disp_offset] =
static_cast<byte>(disp_value);
306 buf_[0] = (modrm & 0x3f);
310 buf_[1] = operand.buf_[1];
315 bool Operand::AddressUsesRegister(Register reg)
const {
316 int code = reg.code();
317 ASSERT((buf_[0] & 0xC0) != 0xC0);
320 int base_code = buf_[0] & 0x07;
324 int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
326 if (index_code !=
rsp.
code() && index_code ==
code)
return true;
328 base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
330 if (base_code ==
rbp.
code() && ((buf_[0] & 0xC0) == 0))
return false;
331 return code == base_code;
335 if (base_code ==
rbp.
code() && ((buf_[0] & 0xC0) == 0))
return false;
336 base_code |= ((rex_ & 0x01) << 3);
337 return code == base_code;
345 #ifdef GENERATED_CODE_COVERAGE
346 static void InitCoverageLog();
350 : AssemblerBase(arg_isolate),
352 positions_recorder_(this),
353 emit_debug_code_(FLAG_debug_code),
354 predictable_code_size_(
false) {
355 if (buffer ==
NULL) {
361 buffer =
isolate()->assembler_spare_buffer();
365 if (buffer ==
NULL) {
366 buffer_ = NewArray<byte>(buffer_size);
368 buffer_ =
static_cast<byte*
>(buffer);
370 buffer_size_ = buffer_size;
375 buffer_ =
static_cast<byte*
>(buffer);
376 buffer_size_ = buffer_size;
385 memset(buffer_, 0xCC, buffer_size);
392 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
395 #ifdef GENERATED_CODE_COVERAGE
406 isolate()->set_assembler_spare_buffer(buffer_);
417 ASSERT(pc_ <= reloc_info_writer.pos());
419 desc->buffer = buffer_;
420 desc->buffer_size = buffer_size_;
422 ASSERT(desc->instr_size > 0);
424 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
431 int delta = (m - (
pc_offset() & (m - 1))) & (m - 1);
443 while (*a == 0x66) a++;
444 if (*a == 0x90)
return true;
445 if (a[0] == 0xf && a[1] == 0x1f)
return true;
450 void Assembler::bind_to(Label*
L,
int pos) {
453 if (L->is_linked()) {
454 int current = L->pos();
455 int next = long_at(current);
456 while (next != current) {
458 int imm32 = pos - (current +
sizeof(
int32_t));
459 long_at_put(current, imm32);
461 next = long_at(next);
464 int last_imm32 = pos - (current +
sizeof(
int32_t));
465 long_at_put(current, last_imm32);
467 while (L->is_near_linked()) {
468 int fixup_pos = L->near_link_pos();
470 static_cast<int>(*
reinterpret_cast<int8_t*
>(
addr_at(fixup_pos)));
471 ASSERT(offset_to_next <= 0);
472 int disp = pos - (fixup_pos +
sizeof(int8_t));
475 if (offset_to_next < 0) {
476 L->link_to(fixup_pos + offset_to_next, Label::kNear);
490 void Assembler::GrowBuffer() {
492 if (!own_buffer_)
FATAL(
"external code buffer is too small");
496 if (buffer_size_ < 4*
KB) {
497 desc.buffer_size = 4*
KB;
499 desc.buffer_size = 2*buffer_size_;
504 (desc.buffer_size >
HEAP->MaxOldGenerationSize())) {
509 desc.buffer = NewArray<byte>(desc.buffer_size);
512 static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
517 memset(desc.buffer, 0xCC, desc.buffer_size);
521 intptr_t pc_delta = desc.buffer - buffer_;
522 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
523 (buffer_ + buffer_size_);
524 memmove(desc.buffer, buffer_, desc.instr_size);
525 memmove(rc_delta + reloc_info_writer.pos(),
526 reloc_info_writer.pos(), desc.reloc_size);
532 isolate()->set_assembler_spare_buffer(buffer_);
536 buffer_ = desc.buffer;
537 buffer_size_ = desc.buffer_size;
539 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
540 reloc_info_writer.last_pc() + pc_delta);
543 for (RelocIterator it(desc); !it.done(); it.next()) {
544 RelocInfo::Mode rmode = it.rinfo()->rmode();
545 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
546 intptr_t* p =
reinterpret_cast<intptr_t*
>(it.rinfo()->pc());
557 void Assembler::emit_operand(
int code,
const Operand& adr) {
559 const unsigned length = adr.len_;
563 ASSERT((adr.buf_[0] & 0x38) == 0);
564 pc_[0] = adr.buf_[0] | code << 3;
567 for (
unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
574 void Assembler::arithmetic_op(
byte opcode, Register reg,
const Operand& op) {
576 emit_rex_64(reg, op);
578 emit_operand(reg, op);
582 void Assembler::arithmetic_op(
byte opcode, Register reg, Register rm_reg) {
584 ASSERT((opcode & 0xC6) == 2);
585 if (rm_reg.low_bits() == 4) {
587 emit_rex_64(rm_reg, reg);
589 emit_modrm(rm_reg, reg);
591 emit_rex_64(reg, rm_reg);
593 emit_modrm(reg, rm_reg);
598 void Assembler::arithmetic_op_16(
byte opcode, Register reg, Register rm_reg) {
600 ASSERT((opcode & 0xC6) == 2);
601 if (rm_reg.low_bits() == 4) {
604 emit_optional_rex_32(rm_reg, reg);
606 emit_modrm(rm_reg, reg);
609 emit_optional_rex_32(reg, rm_reg);
611 emit_modrm(reg, rm_reg);
616 void Assembler::arithmetic_op_16(
byte opcode,
618 const Operand& rm_reg) {
621 emit_optional_rex_32(reg, rm_reg);
623 emit_operand(reg, rm_reg);
627 void Assembler::arithmetic_op_32(
byte opcode, Register reg, Register rm_reg) {
629 ASSERT((opcode & 0xC6) == 2);
630 if (rm_reg.low_bits() == 4) {
632 emit_optional_rex_32(rm_reg, reg);
634 emit_modrm(rm_reg, reg);
636 emit_optional_rex_32(reg, rm_reg);
638 emit_modrm(reg, rm_reg);
643 void Assembler::arithmetic_op_32(
byte opcode,
645 const Operand& rm_reg) {
647 emit_optional_rex_32(reg, rm_reg);
649 emit_operand(reg, rm_reg);
653 void Assembler::immediate_arithmetic_op(
byte subcode,
660 emit_modrm(subcode, dst);
662 }
else if (dst.is(
rax)) {
663 emit(0x05 | (subcode << 3));
667 emit_modrm(subcode, dst);
672 void Assembler::immediate_arithmetic_op(
byte subcode,
679 emit_operand(subcode, dst);
683 emit_operand(subcode, dst);
689 void Assembler::immediate_arithmetic_op_16(
byte subcode,
694 emit_optional_rex_32(dst);
697 emit_modrm(subcode, dst);
699 }
else if (dst.is(
rax)) {
700 emit(0x05 | (subcode << 3));
704 emit_modrm(subcode, dst);
710 void Assembler::immediate_arithmetic_op_16(
byte subcode,
715 emit_optional_rex_32(dst);
718 emit_operand(subcode, dst);
722 emit_operand(subcode, dst);
728 void Assembler::immediate_arithmetic_op_32(
byte subcode,
732 emit_optional_rex_32(dst);
735 emit_modrm(subcode, dst);
737 }
else if (dst.is(
rax)) {
738 emit(0x05 | (subcode << 3));
742 emit_modrm(subcode, dst);
748 void Assembler::immediate_arithmetic_op_32(
byte subcode,
752 emit_optional_rex_32(dst);
755 emit_operand(subcode, dst);
759 emit_operand(subcode, dst);
765 void Assembler::immediate_arithmetic_op_8(
byte subcode,
769 emit_optional_rex_32(dst);
772 emit_operand(subcode, dst);
777 void Assembler::immediate_arithmetic_op_8(
byte subcode,
781 if (!dst.is_byte_register()) {
787 emit_modrm(subcode, dst);
792 void Assembler::shift(Register dst, Immediate shift_amount,
int subcode) {
795 if (shift_amount.value_ == 1) {
798 emit_modrm(subcode, dst);
802 emit_modrm(subcode, dst);
803 emit(shift_amount.value_);
808 void Assembler::shift(Register dst,
int subcode) {
812 emit_modrm(subcode, dst);
816 void Assembler::shift_32(Register dst,
int subcode) {
818 emit_optional_rex_32(dst);
820 emit_modrm(subcode, dst);
824 void Assembler::shift_32(Register dst, Immediate shift_amount,
int subcode) {
827 if (shift_amount.value_ == 1) {
828 emit_optional_rex_32(dst);
830 emit_modrm(subcode, dst);
832 emit_optional_rex_32(dst);
834 emit_modrm(subcode, dst);
835 emit(shift_amount.value_);
842 emit_rex_64(src, dst);
845 emit_operand(src, dst);
851 emit_rex_64(src, dst);
854 emit_operand(src, dst);
867 }
else if (L->is_linked()) {
880 RelocInfo::Mode rmode,
881 TypeFeedbackId ast_id) {
886 emit_code_target(target, rmode, ast_id);
894 emit_optional_rex_32(adr);
896 emit_modrm(0x2, adr);
904 emit_optional_rex_32(op);
906 emit_operand(0x2, op);
920 intptr_t displacement = target - source;
922 emitl(static_cast<int32_t>(displacement));
945 }
else if (cc ==
never) {
953 emit_rex_64(dst, src);
956 emit_modrm(dst, src);
963 }
else if (cc ==
never) {
969 emit_rex_64(dst, src);
972 emit_operand(dst, src);
979 }
else if (cc ==
never) {
985 emit_optional_rex_32(dst, src);
988 emit_modrm(dst, src);
995 }
else if (cc ==
never) {
1001 emit_optional_rex_32(dst, src);
1004 emit_operand(dst, src);
1035 emit_modrm(0x1, dst);
1043 emit_operand(1, dst);
1049 emit_optional_rex_32(dst);
1051 emit_modrm(0x1, dst);
1057 emit_optional_rex_32(dst);
1059 emit_operand(1, dst);
1065 if (!dst.is_byte_register()) {
1070 emit_modrm(0x1, dst);
1076 emit_optional_rex_32(dst);
1078 emit_operand(1, dst);
1100 emit_modrm(0x7, src);
1106 emit_optional_rex_32(src);
1108 emit_modrm(0x7, src);
1116 emit_modrm(0x5, src);
1122 emit_rex_64(dst, src);
1125 emit_modrm(dst, src);
1131 emit_rex_64(dst, src);
1134 emit_operand(dst, src);
1140 emit_rex_64(dst, src);
1143 emit_modrm(dst, src);
1147 emit_modrm(dst, src);
1155 emit_optional_rex_32(dst, src);
1158 emit_modrm(dst, src);
1164 emit_optional_rex_32(dst, src);
1167 emit_operand(dst, src);
1173 emit_optional_rex_32(dst, src);
1176 emit_modrm(dst, src);
1180 emit_modrm(dst, src);
1190 emit_modrm(0x0, dst);
1198 emit_operand(0, dst);
1204 emit_optional_rex_32(dst);
1206 emit_operand(0, dst);
1212 emit_optional_rex_32(dst);
1228 }
else if (cc ==
never) {
1233 if (L->is_bound()) {
1234 const int short_size = 2;
1235 const int long_size = 6;
1247 if (
is_int8(offs - short_size) && !predictable_code_size_) {
1250 emit((offs - short_size) & 0xFF);
1255 emitl(offs - long_size);
1257 }
else if (distance == Label::kNear) {
1261 if (L->is_near_linked()) {
1262 int offset = L->near_link_pos() -
pc_offset();
1264 disp =
static_cast<byte>(offset & 0xFF);
1268 }
else if (L->is_linked()) {
1280 L->link_to(current);
1286 Handle<Code> target,
1287 RelocInfo::Mode rmode) {
1293 emit_code_target(target, rmode);
1299 const int short_size =
sizeof(int8_t);
1300 const int long_size =
sizeof(
int32_t);
1301 if (L->is_bound()) {
1304 if (
is_int8(offs - short_size) && !predictable_code_size_) {
1307 emit((offs - short_size) & 0xFF);
1311 emitl(offs - long_size);
1313 }
else if (distance == Label::kNear) {
1316 if (L->is_near_linked()) {
1317 int offset = L->near_link_pos() -
pc_offset();
1319 disp =
static_cast<byte>(offset & 0xFF);
1323 }
else if (L->is_linked()) {
1334 L->link_to(current);
1339 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
1343 emit_code_target(target, rmode);
1350 emit_optional_rex_32(target);
1352 emit_modrm(0x4, target);
1359 emit_optional_rex_32(src);
1361 emit_operand(0x4, src);
1367 emit_rex_64(dst, src);
1369 emit_operand(dst, src);
1375 emit_optional_rex_32(dst, src);
1377 emit_operand(dst, src);
1385 emitq(reinterpret_cast<uintptr_t>(value), mode);
1390 load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
1402 if (!dst.is_byte_register()) {
1404 emit_rex_32(dst, src);
1406 emit_optional_rex_32(dst, src);
1409 emit_operand(dst, src);
1415 if (!dst.is_byte_register()) {
1418 emit(0xB0 + dst.low_bits());
1425 if (!src.is_byte_register()) {
1426 emit_rex_32(src, dst);
1428 emit_optional_rex_32(src, dst);
1431 emit_operand(src, dst);
1438 emit_optional_rex_32(src, dst);
1440 emit_operand(src, dst);
1446 emit_optional_rex_32(dst, src);
1448 emit_operand(dst, src);
1454 if (src.low_bits() == 4) {
1455 emit_optional_rex_32(src, dst);
1457 emit_modrm(src, dst);
1459 emit_optional_rex_32(dst, src);
1461 emit_modrm(dst, src);
1468 emit_optional_rex_32(src, dst);
1470 emit_operand(src, dst);
1476 emit_optional_rex_32(dst);
1478 emit_operand(0x0, dst);
1485 emit_optional_rex_32(dst);
1486 emit(0xB8 + dst.low_bits());
1493 emit_rex_64(dst, src);
1495 emit_operand(dst, src);
1501 if (src.low_bits() == 4) {
1502 emit_rex_64(src, dst);
1504 emit_modrm(src, dst);
1506 emit_rex_64(dst, src);
1508 emit_modrm(dst, src);
1517 emit_modrm(0x0, dst);
1524 emit_rex_64(src, dst);
1526 emit_operand(src, dst);
1530 void Assembler::movq(Register dst,
void* value, RelocInfo::Mode rmode) {
1533 ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
1536 emit(0xB8 | dst.low_bits());
1537 emitq(reinterpret_cast<uintptr_t>(value), rmode);
1541 void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
1546 movq(dst, Immediate(static_cast<int32_t>(value)));
1549 movl(dst, Immediate(static_cast<int32_t>(value)));
1557 emit(0xB8 | dst.low_bits());
1558 emitq(value, rmode);
1563 int64_t value =
reinterpret_cast<int64_t
>(ref.address());
1564 movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
1572 emit_operand(0, dst);
1581 emit_optional_rex_32(dst);
1583 emit_operand(0, dst);
1584 if (src->is_bound()) {
1588 }
else if (src->is_linked()) {
1592 ASSERT(src->is_unused());
1595 src->link_to(current);
1600 void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
1610 ASSERT(value->IsHeapObject());
1613 emit(0xB8 | dst.low_bits());
1614 emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
1621 emit_rex_64(dst, src);
1624 emit_operand(dst, src);
1630 emit_rex_64(dst, src);
1633 emit_operand(dst, src);
1639 emit_rex_64(dst, src);
1641 emit_modrm(dst, src);
1647 emit_rex_64(dst, src);
1649 emit_operand(dst, src);
1657 emit_optional_rex_32(dst, src);
1660 emit_operand(dst, src);
1666 emit_optional_rex_32(dst, src);
1669 emit_operand(dst, src);
1675 emit_optional_rex_32(dst, src);
1678 emit_operand(dst, src);
1684 emit_optional_rex_32(dst, src);
1687 emit_operand(dst, src);
1725 emit_modrm(0x4, src);
1733 emit_modrm(0x3, dst);
1739 emit_optional_rex_32(dst);
1741 emit_modrm(0x3, dst);
1749 emit_operand(3, dst);
1763 emit_modrm(0x2, dst);
1771 emit_operand(2, dst);
1777 emit_optional_rex_32(dst);
1779 emit_modrm(0x2, dst);
1862 emit_optional_rex_32(dst);
1863 emit(0x58 | dst.low_bits());
1869 emit_optional_rex_32(dst);
1871 emit_operand(0, dst);
1883 emit_optional_rex_32(src);
1884 emit(0x50 | src.low_bits());
1890 emit_optional_rex_32(src);
1892 emit_operand(6, src);
1903 emitl(value.value_);
1936 emit((imm16 >> 8) & 0xFF);
1948 if (!reg.is_byte_register()) {
1953 emit_modrm(0x0, reg);
1959 emit_rex_64(src, dst);
1962 emit_modrm(src, dst);
1968 emit_rex_64(src, dst);
1971 emit_modrm(src, dst);
1977 if (src.is(
rax) || dst.is(
rax)) {
1978 Register other = src.is(
rax) ? dst : src;
1980 emit(0x90 | other.low_bits());
1981 }
else if (dst.low_bits() == 4) {
1982 emit_rex_64(dst, src);
1984 emit_modrm(dst, src);
1986 emit_rex_64(src, dst);
1988 emit_modrm(src, dst);
1997 emitq(reinterpret_cast<uintptr_t>(dst), mode);
2002 store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
2008 if (src.low_bits() == 4) {
2009 emit_rex_32(src, dst);
2011 emit_modrm(src, dst);
2013 if (!dst.is_byte_register() || !src.is_byte_register()) {
2015 emit_rex_32(dst, src);
2018 emit_modrm(dst, src);
2030 if (!reg.is_byte_register()) {
2035 emit_modrm(0x0, reg);
2044 emit_optional_rex_32(
rax, op);
2046 emit_operand(
rax, op);
2053 if (!reg.is_byte_register()) {
2055 emit_rex_32(reg, op);
2057 emit_optional_rex_32(reg, op);
2060 emit_operand(reg, op);
2066 if (src.low_bits() == 4) {
2067 emit_optional_rex_32(src, dst);
2069 emit_modrm(src, dst);
2071 emit_optional_rex_32(dst, src);
2073 emit_modrm(dst, src);
2089 emit_optional_rex_32(
rax, reg);
2091 emit_modrm(0x0, reg);
2104 emit_optional_rex_32(
rax, op);
2106 emit_operand(
rax, op);
2113 emit_rex_64(reg, op);
2115 emit_operand(reg, op);
2121 if (src.low_bits() == 4) {
2122 emit_rex_64(src, dst);
2124 emit_modrm(src, dst);
2126 emit_rex_64(dst, src);
2128 emit_modrm(dst, src);
2153 emit_farith(0xD9, 0xC0, i);
2187 emit_optional_rex_32(adr);
2189 emit_operand(0, adr);
2195 emit_optional_rex_32(adr);
2197 emit_operand(0, adr);
2203 emit_optional_rex_32(adr);
2205 emit_operand(3, adr);
2211 emit_optional_rex_32(adr);
2213 emit_operand(3, adr);
2220 emit_farith(0xDD, 0xD8, index);
2226 emit_optional_rex_32(adr);
2228 emit_operand(0, adr);
2234 emit_optional_rex_32(adr);
2236 emit_operand(5, adr);
2242 emit_optional_rex_32(adr);
2244 emit_operand(3, adr);
2251 emit_optional_rex_32(adr);
2253 emit_operand(1, adr);
2260 emit_optional_rex_32(adr);
2262 emit_operand(1, adr);
2268 emit_optional_rex_32(adr);
2270 emit_operand(2, adr);
2276 emit_optional_rex_32(adr);
2278 emit_operand(7, adr);
2347 emit_farith(0xDC, 0xC0, i);
2353 emit_farith(0xDC, 0xE8, i);
2359 emit_optional_rex_32(adr);
2361 emit_operand(4, adr);
2367 emit_farith(0xDC, 0xC8, i);
2373 emit_farith(0xDC, 0xF8, i);
2379 emit_farith(0xDE, 0xC0, i);
2385 emit_farith(0xDE, 0xE8, i);
2391 emit_farith(0xDE, 0xE0, i);
2397 emit_farith(0xDE, 0xC8, i);
2403 emit_farith(0xDE, 0xF8, i);
2423 emit_farith(0xD9, 0xC8, i);
2436 emit_farith(0xDD, 0xC0, i);
2449 emit_farith(0xDD, 0xE8, i);
2516 void Assembler::emit_farith(
int b1,
int b2,
int i) {
2528 emit_optional_rex_32(dst, src);
2538 emit_optional_rex_32(src, dst);
2548 emit_rex_64(dst, src);
2558 emit_rex_64(src, dst);
2567 if (dst.low_bits() == 4) {
2570 emit_optional_rex_32(dst, src);
2576 emit_optional_rex_32(src, dst);
2586 emit_rex_64(src, dst);
2596 emit_rex_64(dst, src);
2608 emit_optional_rex_32(dst, src);
2620 emit_optional_rex_32(src, dst);
2630 emit_optional_rex_32(dst, src);
2640 emit_optional_rex_32(dst, src);
2649 if (src.low_bits() == 4) {
2651 emit_optional_rex_32(src, dst);
2656 emit_optional_rex_32(dst, src);
2666 if (src.low_bits() == 4) {
2669 emit_optional_rex_32(src, dst);
2675 emit_optional_rex_32(dst, src);
2686 emit_optional_rex_32(dst, src);
2696 emit_optional_rex_32(dst, src);
2706 emit_optional_rex_32(dst, src);
2709 emit_operand(dst, src);
2716 emit_optional_rex_32(dst, src);
2726 emit_optional_rex_32(dst, src);
2729 emit_operand(dst, src);
2736 emit_optional_rex_32(dst, src);
2746 emit_rex_64(dst, src);
2756 emit_optional_rex_32(dst, src);
2766 emit_optional_rex_32(dst, src);
2776 emit_optional_rex_32(dst, src);
2786 emit_rex_64(dst, src);
2796 emit_optional_rex_32(dst, src);
2806 emit_optional_rex_32(dst, src);
2816 emit_optional_rex_32(dst, src);
2826 emit_optional_rex_32(dst, src);
2836 emit_rex_64(dst, src);
2846 emit_optional_rex_32(dst, src);
2856 emit_optional_rex_32(dst, src);
2866 emit_optional_rex_32(dst, src);
2876 emit_optional_rex_32(dst, src);
2886 emit_optional_rex_32(dst, src);
2896 emit_optional_rex_32(dst, src);
2906 emit_optional_rex_32(dst, src);
2915 emit_optional_rex_32(dst, src);
2925 emit_optional_rex_32(dst, src);
2935 emit_optional_rex_32(dst, src);
2945 emit_optional_rex_32(dst, src);
2957 emit_optional_rex_32(dst, src);
2963 emit(static_cast<byte>(mode) | 0x8);
2970 emit_optional_rex_32(dst, src);
2978 Register ireg = { reg.code() };
2979 emit_operand(ireg, adr);
2984 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
2988 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
2992 emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
3010 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3013 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
3024 reloc_info_writer.Write(&rinfo);
3030 RecordRelocInfo(RelocInfo::JS_RETURN);
3037 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3042 if (FLAG_code_comments || force) {
3044 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3049 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
3050 1 << RelocInfo::INTERNAL_REFERENCE;
3053 bool RelocInfo::IsCodedSpecially() {
3057 return (1 << rmode_) & kApplyMask;
3062 #endif // V8_TARGET_ARCH_X64
void cvtlsi2ss(XMMRegister dst, Register src)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable memory(in Mbytes)") DEFINE_bool(gc_global
void movapd(XMMRegister dst, XMMRegister src)
static const int kMaximalBufferSize
Isolate * isolate() const
void load_rax(void *ptr, RelocInfo::Mode rmode)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
void ucomisd(XMMRegister dst, XMMRegister src)
void cvttss2si(Register dst, const Operand &src)
void movzxbl(Register dst, const Operand &src)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
bool buffer_overflow() const
void mulsd(XMMRegister dst, XMMRegister src)
void cvtsd2si(Register dst, XMMRegister src)
void orpd(XMMRegister dst, XMMRegister src)
void push(Register src, Condition cond=al)
void cvtss2sd(XMMRegister dst, XMMRegister src)
void sqrtsd(XMMRegister dst, XMMRegister src)
static bool IsSupported(CpuFeature f)
void andpd(XMMRegister dst, XMMRegister src)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static const int kMinimalBufferSize
#define ASSERT(condition)
void cvtlsi2sd(XMMRegister dst, const Operand &src)
void movsxlq(Register dst, Register src)
void xorpd(XMMRegister dst, XMMRegister src)
void bt(const Operand &dst, Register src)
bool is_uint32(int64_t x)
void cmovl(Condition cc, Register dst, Register src)
void testb(Register dst, Register src)
void fistp_s(const Operand &adr)
void addsd(XMMRegister dst, XMMRegister src)
void fld_d(const Operand &adr)
void imull(Register dst, Register src)
void cmpb_al(const Operand &op)
void xchg(Register dst, Register src)
void fild_s(const Operand &adr)
void enter(const Immediate &size)
void testl(Register dst, Register src)
void movzxbq(Register dst, const Operand &src)
void shld(Register dst, Register src)
void fisttp_d(const Operand &adr)
void movss(XMMRegister dst, const Operand &src)
void movb(Register dst, const Operand &src)
void set_byte_at(int pos, byte value)
void cvtsd2ss(XMMRegister dst, XMMRegister src)
void movsd(XMMRegister dst, XMMRegister src)
void GetCode(CodeDesc *desc)
void movdqa(XMMRegister dst, const Operand &src)
void movsxbq(Register dst, const Operand &src)
static void TooLateToEnableNow()
void movmskpd(Register dst, XMMRegister src)
void fisttp_s(const Operand &adr)
void movzxwq(Register dst, const Operand &src)
static int32_t & int32_at(Address addr)
void movzxwl(Register dst, const Operand &src)
void emit_sse_operand(XMMRegister reg, const Operand &adr)
void cvtqsi2sd(XMMRegister dst, const Operand &src)
void xorps(XMMRegister dst, XMMRegister src)
activate correct semantics for inheriting readonliness false
void setcc(Condition cc, Register reg)
void fld_s(const Operand &adr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
void fstp_d(const Operand &adr)
void movw(Register reg, uint32_t immediate, Condition cond=al)
void store_rax(void *dst, RelocInfo::Mode mode)
void push_imm32(int32_t imm32)
void fistp_d(const Operand &adr)
void cvtsd2siq(Register dst, XMMRegister src)
void shrd(Register dst, Register src)
void movaps(XMMRegister dst, XMMRegister src)
void RecordComment(const char *msg)
void fstp_s(const Operand &adr)
void divsd(XMMRegister dst, XMMRegister src)
void lea(Register dst, const Operand &src)
void fild_d(const Operand &adr)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void movl(Register dst, Register src)
#define ASSERT_EQ(v1, v2)
void testq(const Operand &op, Register reg)
void movd(XMMRegister dst, Register src)
PositionsRecorder * positions_recorder()
void fisub_s(const Operand &adr)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
void extractps(Register dst, XMMRegister src, byte imm8)
static uint64_t CpuFeaturesImpliedByPlatform()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
void fist_s(const Operand &adr)
void movsxwq(Register dst, const Operand &src)
void DeleteArray(T *array)
void bts(Register dst, Register src)
void cmovq(Condition cc, Register dst, Register src)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
bool emit_debug_code() const
void subsd(XMMRegister dst, XMMRegister src)
void cvttsd2siq(Register dst, XMMRegister src)
void movq(const Operand &dst, Register src)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void cvttsd2si(Register dst, const Operand &src)
void leal(Register dst, const Operand &src)