38 #if V8_TARGET_ARCH_MIPS
47 bool CpuFeatures::initialized_ =
false;
49 unsigned CpuFeatures::supported_ = 0;
50 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
51 unsigned CpuFeatures::cross_compile_ = 0;
54 ExternalReference ExternalReference::cpu_features() {
55 ASSERT(CpuFeatures::initialized_);
56 return ExternalReference(&CpuFeatures::supported_);
64 static uint64_t CpuFeaturesImpliedByCompiler() {
66 #ifdef CAN_USE_FPU_INSTRUCTIONS
67 answer |=
static_cast<uint64_t
>(1) <<
FPU;
68 #endif // def CAN_USE_FPU_INSTRUCTIONS
74 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
75 answer |=
static_cast<uint64_t
>(1) <<
FPU;
76 #endif // defined(__mips_hard_float) && __mips_hard_float != 0
77 #endif // def __mips__
85 const char*
const names[] = {
107 CpuFeaturesImpliedByCompiler());
108 ASSERT(supported_ == 0 || supported_ == standard_features);
116 supported_ |= standard_features;
125 #if !defined(__mips__)
127 supported_ |=
static_cast<uint64_t
>(1) <<
FPU;
134 supported_ |=
static_cast<uint64_t
>(1) <<
FPU;
135 found_by_runtime_probing_only_ |=
static_cast<uint64_t
>(1) <<
FPU;
143 const int kNumbers[] = {
177 return kNumbers[reg.code()];
183 const Register kRegisters[] = {
188 t0, t1, t2, t3, t4, t5, t6, t7,
197 return kRegisters[num];
204 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
205 1 << RelocInfo::INTERNAL_REFERENCE;
208 bool RelocInfo::IsCodedSpecially() {
216 bool RelocInfo::IsInConstantPool() {
222 void RelocInfo::PatchCode(
byte* instructions,
int instruction_count) {
224 Instr* instr =
reinterpret_cast<Instr*
>(instructions);
225 for (
int i = 0; i < instruction_count; i++) {
226 *(pc + i) = *(instr + i);
236 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
251 if (obj->IsHeapObject()) {
253 imm32_ =
reinterpret_cast<intptr_t
>(handle.location());
254 rmode_ = RelocInfo::EMBEDDED_OBJECT;
257 imm32_ =
reinterpret_cast<intptr_t
>(
obj);
258 rmode_ = RelocInfo::NONE32;
269 OffsetAddend offset_addend) : Operand(rm) {
270 offset_ = unit * multiplier + offset_addend;
277 static const int kNegOffset = 0x00008000;
302 | (kNegOffset & kImm16Mask);
311 : AssemblerBase(isolate, buffer, buffer_size),
312 recorded_ast_id_(TypeFeedbackId::
None()),
313 positions_recorder_(this) {
314 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
316 last_trampoline_pool_end_ = 0;
317 no_trampoline_pool_before_ = 0;
318 trampoline_pool_blocked_nesting_ = 0;
321 next_buffer_check_ = FLAG_force_long_branches
322 ?
kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
323 internal_trampoline_exception_ =
false;
326 trampoline_emitted_ = FLAG_force_long_branches;
327 unbound_labels_count_ = 0;
328 block_buffer_growth_ =
false;
330 ClearRecordedAstId();
442 return instr & ~kImm16Mask;
493 const int kEndOfChain = -4;
495 const int kEndOfJumpChain = 0;
503 return opcode ==
BEQ ||
513 (opcode ==
COP1 && rs_field ==
BC1);
519 return label_constant == 0;
539 return opcode ==
J || opcode ==
JAL ||
540 (opcode ==
SPECIAL && rt_field == 0 &&
541 ((function_field ==
JALR) || (rd_field == 0 && (function_field ==
JR))));
570 return opcode ==
LUI;
577 return opcode ==
ORI;
586 uint32_t rt =
GetRt(instr);
587 uint32_t rd =
GetRd(instr);
588 uint32_t sa =
GetSa(instr);
595 Register nop_rt_reg = (type == 0) ? zero_reg : at;
597 rd ==
static_cast<uint32_t
>(
ToNumber(zero_reg)) &&
598 rt == static_cast<uint32_t>(
ToNumber(nop_rt_reg)) &&
607 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
618 return ((instr & kImm16Mask));
640 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
651 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
662 if ((instr & ~kImm16Mask) == 0) {
668 return (imm18 + pos);
678 if (imm18 == kEndOfChain) {
684 }
else if (
IsLui(instr)) {
691 if (imm == kEndOfJumpChain) {
695 uint32_t instr_address =
reinterpret_cast<int32_t>(
buffer_ + pos);
696 int32_t delta = instr_address - imm;
702 if (imm28 == kEndOfJumpChain) {
706 uint32_t instr_address =
reinterpret_cast<int32_t>(
buffer_ + pos);
708 int32_t delta = instr_address - imm28;
718 if ((instr & ~kImm16Mask) == 0) {
719 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
731 instr &= ~kImm16Mask;
736 }
else if (
IsLui(instr)) {
740 uint32_t imm =
reinterpret_cast<uint32_t
>(
buffer_) + target_pos;
743 instr_lui &= ~kImm16Mask;
744 instr_ori &= ~kImm16Mask;
749 instr_ori | (imm & kImm16Mask));
751 uint32_t imm28 =
reinterpret_cast<uint32_t
>(
buffer_) + target_pos;
756 uint32_t imm26 = imm28 >> 2;
764 void Assembler::print(Label*
L) {
765 if (L->is_unused()) {
767 }
else if (L->is_bound()) {
768 PrintF(
"bound label to %d\n", L->pos());
769 }
else if (L->is_linked()) {
772 while (l.is_linked()) {
775 if ((instr & ~kImm16Mask) == 0) {
783 PrintF(
"label in inconsistent state (pos = %d)\n", L->pos_);
788 void Assembler::bind_to(Label* L,
int pos) {
790 int32_t trampoline_pos = kInvalidSlotPos;
791 if (L->is_linked() && !trampoline_emitted_) {
792 unbound_labels_count_--;
793 next_buffer_check_ += kTrampolineSlotsSize;
796 while (L->is_linked()) {
798 int32_t dist = pos - fixup_pos;
802 if (dist > kMaxBranchOffset) {
803 if (trampoline_pos == kInvalidSlotPos) {
804 trampoline_pos = get_trampoline_entry(fixup_pos);
805 CHECK(trampoline_pos != kInvalidSlotPos);
807 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
809 fixup_pos = trampoline_pos;
810 dist = pos - fixup_pos;
822 if (pos > last_bound_pos_)
823 last_bound_pos_ = pos;
833 void Assembler::next(Label* L) {
836 if (link == kEndOfChain) {
858 return !RelocInfo::IsNone(rmode);
861 void Assembler::GenInstrRegister(
Opcode opcode,
867 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
874 void Assembler::GenInstrRegister(
Opcode opcode,
880 ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
887 void Assembler::GenInstrRegister(
Opcode opcode,
893 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
900 void Assembler::GenInstrRegister(
Opcode opcode,
906 ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
913 void Assembler::GenInstrRegister(
Opcode opcode,
919 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
926 void Assembler::GenInstrRegister(
Opcode opcode,
929 FPUControlRegister fs,
931 ASSERT(fs.is_valid() && rt.is_valid());
940 void Assembler::GenInstrImmediate(
Opcode opcode,
944 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
951 void Assembler::GenInstrImmediate(
Opcode opcode,
955 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
956 Instr instr = opcode | (rs.code() <<
kRsShift) | SF | (j & kImm16Mask);
961 void Assembler::GenInstrImmediate(
Opcode opcode,
965 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
972 void Assembler::GenInstrJump(
Opcode opcode,
975 ASSERT(is_uint26(address));
976 Instr instr = opcode | address;
984 int32_t trampoline_entry = kInvalidSlotPos;
986 if (!internal_trampoline_exception_) {
987 if (trampoline_.start() > pos) {
988 trampoline_entry = trampoline_.take_slot();
991 if (kInvalidSlotPos == trampoline_entry) {
992 internal_trampoline_exception_ =
true;
995 return trampoline_entry;
1002 if (L->is_bound()) {
1003 target_pos = L->pos();
1005 if (L->is_linked()) {
1006 target_pos = L->pos();
1010 return kEndOfJumpChain;
1014 uint32_t imm =
reinterpret_cast<uint32_t
>(
buffer_) + target_pos;
1024 if (L->is_bound()) {
1025 target_pos = L->pos();
1027 if (L->is_linked()) {
1028 target_pos = L->pos();
1032 if (!trampoline_emitted_) {
1033 unbound_labels_count_++;
1034 next_buffer_check_ -= kTrampolineSlotsSize;
1041 ASSERT((offset & 3) == 0);
1042 ASSERT(is_int16(offset >> 2));
1050 if (L->is_bound()) {
1051 target_pos = L->pos();
1054 if (L->is_linked()) {
1055 target_pos = L->pos();
1056 int32_t imm18 = target_pos - at_offset;
1057 ASSERT((imm18 & 3) == 0);
1062 target_pos = kEndOfChain;
1064 if (!trampoline_emitted_) {
1065 unbound_labels_count_++;
1066 next_buffer_check_ -= kTrampolineSlotsSize;
1069 L->link_to(at_offset);
1077 beq(zero_reg, zero_reg, offset);
1083 bgezal(zero_reg, offset);
1089 GenInstrImmediate(
BEQ, rs, rt, offset);
1111 GenInstrImmediate(
BGTZ, rs, zero_reg, offset);
1118 GenInstrImmediate(
BLEZ, rs, zero_reg, offset);
1140 GenInstrImmediate(
BNE, rs, rt, offset);
1148 uint32_t ipc =
reinterpret_cast<uint32_t
>(
pc_ + 1 *
kInstrSize);
1149 bool in_range = (ipc ^
static_cast<uint32_t
>(target) >>
1151 ASSERT(in_range && ((target & 3) == 0));
1153 GenInstrJump(
J, target >> 2);
1162 GenInstrRegister(
SPECIAL, rs, zero_reg, zero_reg, 0,
JR);
1170 uint32_t ipc =
reinterpret_cast<uint32_t
>(
pc_ + 1 *
kInstrSize);
1171 bool in_range = (ipc ^
static_cast<uint32_t
>(target) >>
1173 ASSERT(in_range && ((target & 3) == 0));
1176 GenInstrJump(
JAL, target >> 2);
1183 GenInstrRegister(
SPECIAL, rs, zero_reg, rd, 0,
JALR);
1190 uint32_t ipc =
reinterpret_cast<uint32_t
>(
pc_ + 1 *
kInstrSize);
1191 bool in_range = (ipc ^
static_cast<uint32_t
>(target) >>
1203 uint32_t ipc =
reinterpret_cast<uint32_t
>(
pc_ + 1 *
kInstrSize);
1204 bool in_range = (ipc ^
static_cast<uint32_t
>(target) >>
1224 GenInstrImmediate(
ADDIU, rs, rd, j);
1239 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
MULT);
1249 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
DIV);
1254 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
DIVU);
1261 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
AND);
1267 GenInstrImmediate(
ANDI, rs, rt, j);
1272 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
OR);
1278 GenInstrImmediate(
ORI, rs, rt, j);
1283 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
XOR);
1289 GenInstrImmediate(
XORI, rs, rt, j);
1294 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
NOR);
1302 bool coming_from_nop) {
1307 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1308 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SLL);
1318 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SRL);
1328 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SRA);
1339 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1349 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1360 void Assembler::LoadRegPlusOffsetToAt(
const MemOperand& src) {
1361 ASSERT(!src.rm().is(at));
1363 ori(at, at, src.offset_ & kImm16Mask);
1364 addu(at, at, src.rm());
1369 if (is_int16(rs.offset_)) {
1370 GenInstrImmediate(
LB, rs.rm(), rd, rs.offset_);
1372 LoadRegPlusOffsetToAt(rs);
1373 GenInstrImmediate(
LB, at, rd, 0);
1379 if (is_int16(rs.offset_)) {
1380 GenInstrImmediate(
LBU, rs.rm(), rd, rs.offset_);
1382 LoadRegPlusOffsetToAt(rs);
1383 GenInstrImmediate(
LBU, at, rd, 0);
1389 if (is_int16(rs.offset_)) {
1390 GenInstrImmediate(
LH, rs.rm(), rd, rs.offset_);
1392 LoadRegPlusOffsetToAt(rs);
1393 GenInstrImmediate(
LH, at, rd, 0);
1399 if (is_int16(rs.offset_)) {
1400 GenInstrImmediate(
LHU, rs.rm(), rd, rs.offset_);
1402 LoadRegPlusOffsetToAt(rs);
1403 GenInstrImmediate(
LHU, at, rd, 0);
1409 if (is_int16(rs.offset_)) {
1410 GenInstrImmediate(
LW, rs.rm(), rd, rs.offset_);
1412 LoadRegPlusOffsetToAt(rs);
1413 GenInstrImmediate(
LW, at, rd, 0);
1419 GenInstrImmediate(
LWL, rs.rm(), rd, rs.offset_);
1424 GenInstrImmediate(
LWR, rs.rm(), rd, rs.offset_);
1429 if (is_int16(rs.offset_)) {
1430 GenInstrImmediate(
SB, rs.rm(), rd, rs.offset_);
1432 LoadRegPlusOffsetToAt(rs);
1433 GenInstrImmediate(
SB, at, rd, 0);
1439 if (is_int16(rs.offset_)) {
1440 GenInstrImmediate(
SH, rs.rm(), rd, rs.offset_);
1442 LoadRegPlusOffsetToAt(rs);
1443 GenInstrImmediate(
SH, at, rd, 0);
1449 if (is_int16(rs.offset_)) {
1450 GenInstrImmediate(
SW, rs.rm(), rd, rs.offset_);
1452 LoadRegPlusOffsetToAt(rs);
1453 GenInstrImmediate(
SW, at, rd, 0);
1459 GenInstrImmediate(
SWL, rs.rm(), rd, rs.offset_);
1464 GenInstrImmediate(
SWR, rs.rm(), rd, rs.offset_);
1470 GenInstrImmediate(
LUI, zero_reg, rd, j);
1478 ASSERT((code & ~0xfffff) == 0);
1496 #if V8_HOST_ARCH_MIPS
1498 #else // V8_HOST_ARCH_MIPS
1503 emit(reinterpret_cast<Instr>(msg));
1511 | rt.code() <<
kRtShift | code << 6;
1519 | rt.code() <<
kRtShift | code << 6;
1536 | rt.code() <<
kRtShift | code << 6;
1560 GenInstrRegister(
SPECIAL, zero_reg, zero_reg, rd, 0,
MFHI);
1565 GenInstrRegister(
SPECIAL, zero_reg, zero_reg, rd, 0,
MFLO);
1571 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
SLT);
1581 GenInstrImmediate(
SLTI, rs, rt, j);
1586 GenInstrImmediate(
SLTIU, rs, rt, j);
1603 rt.code_ = (cc & 0x0007) << 2 | 1;
1610 rt.code_ = (cc & 0x0007) << 2 | 0;
1626 GenInstrRegister(
SPECIAL3, rs, rt, pos + size - 1, pos,
INS);
1634 GenInstrRegister(
SPECIAL3, rs, rt, size - 1, pos,
EXT);
1640 ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
1651 GenInstrImmediate(
LWC1, src.rm(), fd, src.offset_);
1658 GenInstrImmediate(
LWC1, src.rm(), fd, src.offset_);
1659 FPURegister nextfpreg;
1660 nextfpreg.setcode(fd.code() + 1);
1661 GenInstrImmediate(
LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1666 GenInstrImmediate(
SWC1, src.rm(), fd, src.offset_);
1673 GenInstrImmediate(
SWC1, src.rm(), fd, src.offset_);
1674 FPURegister nextfpreg;
1675 nextfpreg.setcode(fd.code() + 1);
1676 GenInstrImmediate(
SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1691 GenInstrRegister(
COP1,
CTC1, rt, fs);
1696 GenInstrRegister(
COP1,
CFC1, rt, fs);
1704 *lo = i & 0xffffffff;
1712 GenInstrRegister(
COP1,
D, ft, fs, fd,
ADD_D);
1717 GenInstrRegister(
COP1,
D, ft, fs, fd,
SUB_D);
1722 GenInstrRegister(
COP1,
D, ft, fs, fd,
MUL_D);
1733 GenInstrRegister(
COP1,
D, ft, fs, fd,
DIV_D);
1897 FPURegister fs, FPURegister ft,
uint16_t cc) {
1901 | cc << 8 | 3 << 4 | cond;
1911 c(cond,
D, src1,
f14, 0);
1933 RecordRelocInfo(RelocInfo::JS_RETURN);
1940 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1945 if (FLAG_code_comments) {
1947 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1961 if (imm == kEndOfJumpChain) {
1967 instr_lui &= ~kImm16Mask;
1968 instr_ori &= ~kImm16Mask;
1971 instr_lui | ((imm >>
kLuiShift) & kImm16Mask));
1973 instr_ori | (imm & kImm16Mask));
1977 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1982 ASSERT((imm28 & 3) == 0);
1985 uint32_t imm26 = imm28 >> 2;
1986 ASSERT(is_uint26(imm26));
1994 void Assembler::GrowBuffer() {
2000 desc.buffer_size = 4*
KB;
2009 desc.buffer = NewArray<byte>(desc.buffer_size);
2015 int pc_delta = desc.buffer -
buffer_;
2019 reloc_info_writer.pos(), desc.reloc_size);
2026 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2027 reloc_info_writer.last_pc() + pc_delta);
2030 for (RelocIterator it(desc); !it.done(); it.next()) {
2031 RelocInfo::Mode rmode = it.rinfo()->rmode();
2032 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2033 byte* p =
reinterpret_cast<byte*
>(it.rinfo()->pc());
2044 *
reinterpret_cast<uint8_t*
>(
pc_) = data;
2045 pc_ +=
sizeof(uint8_t);
2051 *
reinterpret_cast<uint32_t*
>(
pc_) = data;
2052 pc_ +=
sizeof(uint32_t);
2058 *
reinterpret_cast<uint32_t*
>(
pc_) =
2059 reinterpret_cast<uint32_t>(stub->instruction_start());
2060 pc_ +=
sizeof(uint32_t);
2064 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2067 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2069 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2070 || RelocInfo::IsJSReturn(rmode)
2071 || RelocInfo::IsComment(rmode)
2072 || RelocInfo::IsPosition(rmode));
2075 if (!RelocInfo::IsNone(rinfo.rmode())) {
2077 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2088 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2094 reloc_info_writer.Write(&reloc_info_with_ast_id);
2096 reloc_info_writer.Write(&rinfo);
2113 if ((trampoline_pool_blocked_nesting_ > 0) ||
2114 (
pc_offset() < no_trampoline_pool_before_)) {
2117 if (trampoline_pool_blocked_nesting_ > 0) {
2120 next_buffer_check_ = no_trampoline_pool_before_;
2125 ASSERT(!trampoline_emitted_);
2126 ASSERT(unbound_labels_count_ >= 0);
2127 if (unbound_labels_count_ > 0) {
2135 for (
int i = 0; i < unbound_labels_count_; i++) {
2138 { BlockGrowBufferScope block_buf_growth(
this);
2142 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2144 ori(at, at, (imm32 & kImm16Mask));
2150 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2152 trampoline_emitted_ =
true;
2161 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2173 return reinterpret_cast<Address>(
2202 uint32_t* p =
reinterpret_cast<uint32_t*
>(
pc);
2203 uint32_t itarget =
reinterpret_cast<uint32_t
>(target);
2215 *(p+1) =
ORI | rt_code | (rt_code << 5) | (itarget &
kImm16Mask);
2234 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc + 3 *
kInstrSize);
2236 uint32_t target_field =
2238 bool patched_jump =
false;
2240 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2248 uint32_t segment_mask = ((256 *
MB) - 1) ^ ((32 *
KB) - 1);
2249 uint32_t ipc_segment_addr = ipc & segment_mask;
2250 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2257 if (in_range &&
GetRt(instr2) ==
GetRs(instr3)) {
2258 *(p+2) =
JAL | target_field;
2259 patched_jump =
true;
2261 }
else if (
IsJr(instr3)) {
2263 bool is_ret =
static_cast<int>(
GetRs(instr3)) == ra.code();
2264 if (in_range && !is_ret &&
GetRt(instr2) ==
GetRs(instr3)) {
2265 *(p+2) =
J | target_field;
2266 patched_jump =
true;
2268 }
else if (
IsJal(instr3)) {
2271 *(p+2) =
JAL | target_field;
2276 uint32_t rd_field = ra.code() <<
kRdShift;
2279 patched_jump =
true;
2280 }
else if (
IsJ(instr3)) {
2283 *(p+2) =
J | target_field;
2290 patched_jump =
true;
2293 CPU::FlushICache(pc, (patched_jump ? 3 : 2) *
sizeof(
int32_t));
2300 uint32_t* p =
reinterpret_cast<uint32_t*
>(
pc);
2306 bool patched =
false;
2308 if (
IsJal(instr3)) {
2313 uint32_t rd_field = ra.code() <<
kRdShift;
2316 }
else if (
IsJ(instr3)) {
2326 CPU::FlushICache(pc+2,
sizeof(
Address));
2346 #endif // V8_TARGET_ARCH_MIPS
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
static const int kBranchPCOffset
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void ClearRecordedAstId()
void trunc_l_d(FPURegister fd, FPURegister fs)
static uint32_t GetOpcodeField(Instr instr)
void mtc1(Register rt, FPURegister fs)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
static bool IsAddImmediate(Instr instr)
void PrintF(const char *format,...)
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
void neg_d(FPURegister fd, FPURegister fs)
void blez(Register rs, int16_t offset)
void sw(Register rd, const MemOperand &rs)
static const int kMaxNumAllocatableRegisters
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
void rotr(Register rd, Register rt, uint16_t sa)
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
static uint32_t GetFunctionField(Instr instr)
static HeapObject * cast(Object *obj)
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void round_w_s(FPURegister fd, FPURegister fs)
kSerializedDataOffset Object
void b(int branch_offset, Condition cond=al)
const uint32_t kMaxWatchpointCode
void floor_l_s(FPURegister fd, FPURegister fs)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
void div(Register rs, Register rt)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
const uint32_t kMaxStopCode
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
void pref(int32_t hint, const MemOperand &rs)
const Instr kSwRegFpOffsetPattern
static uint32_t GetRdField(Instr instr)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const int kFunctionFieldMask
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
void CheckTrampolinePool()
const Instr kPopRegPattern
static const char * AllocationIndexToString(int index)
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
void cvt_w_d(FPURegister fd, FPURegister fs)
static void JumpLabelToJumpRegister(Address pc)
const Instr kPushRegPattern
void break_(uint32_t code, bool break_as_stop=false)
void ret(const Register &xn=lr)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
void sh(Register rd, const MemOperand &rs)
static bool IsJr(Instr instr)
static bool IsOri(Instr instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
void ceil_w_d(FPURegister fd, FPURegister fs)
void trunc_l_s(FPURegister fd, FPURegister fs)
const Instr kLwSwInstrTypeMask
static void MemCopy(void *dest, const void *src, size_t size)
void trunc_w_s(FPURegister fd, FPURegister fs)
void srlv(Register rd, Register rt, Register rs)
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void jal_or_jalr(int32_t target, Register rs)
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kLwRegFpOffsetPattern
int branch_offset(Label *L, bool jump_elimination_allowed)
const Instr kPushInstruction
static void TooLateToEnableNow()
static bool IsPop(Instr instr)
void movt(Register reg, uint32_t immediate, Condition cond=al)
void lui(Register rd, int32_t j)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void MemMove(void *dest, const void *src, size_t size)
const Instr kPopInstruction
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
void srl(Register rd, Register rt, uint16_t sa)
static Register GetRdReg(Instr instr)
MaybeObject * AllocateConstantPool(Heap *heap)
void tlt(Register rs, Register rt, uint16_t code)
void slti(Register rd, Register rs, int32_t j)
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
const int kRegister_fp_Code
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
void movz(const Register &rd, uint64_t imm, int shift=-1)
void floor_l_d(FPURegister fd, FPURegister fs)
void cfc1(Register rt, FPUControlRegister fs)
friend class BlockTrampolinePoolScope
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
#define UNIMPLEMENTED_MIPS()
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
static uint32_t GetLabelConst(Instr instr)
void ori(Register rd, Register rs, int32_t j)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
static HeapNumber * cast(Object *obj)
void set_value(double value)
static double nan_value()
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
bool emit_debug_code() const
static void QuietNaN(HeapObject *nan)
void emit_code_stub_address(Code *stub)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
static bool IsSwRegFpOffset(Instr instr)
static const int kHeaderSize
static bool IsJump(Instr instr)
void mfc1(Register rt, FPURegister fs)
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
bool MustUseReg(RelocInfo::Mode rmode)
const Instr kLwSwInstrArgumentMask
void trunc_w_d(FPURegister fd, FPURegister fs)
static bool IsEmittedConstant(Instr instr)
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
void floor_w_d(FPURegister fd, FPURegister fs)
void sdc1(FPURegister fs, const MemOperand &dst)
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
void movn(const Register &rd, uint64_t imm, int shift=-1)
static bool IsBeq(Instr instr)
static const int kInstrSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static uint64_t CpuFeaturesImpliedByPlatform()
const Instr kLwRegFpNegOffsetPattern
void fcmp(const FPRegister &fn, const FPRegister &fm)
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
const Instr kLwSwOffsetMask
const int kRegister_sp_Code
void DeleteArray(T *array)
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
const Instr kSwRegFpNegOffsetPattern
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLui(Instr instr)
static bool IsAndImmediate(Instr instr)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void sb(Register rd, const MemOperand &rs)