38 #if defined(V8_TARGET_ARCH_MIPS)
47 bool CpuFeatures::initialized_ =
false;
49 unsigned CpuFeatures::supported_ = 0;
50 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
57 static uint64_t CpuFeaturesImpliedByCompiler() {
59 #ifdef CAN_USE_FPU_INSTRUCTIONS
61 #endif // def CAN_USE_FPU_INSTRUCTIONS
67 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
69 #endif // defined(__mips_hard_float) && __mips_hard_float != 0
70 #endif // def __mips__
78 CpuFeaturesImpliedByCompiler());
79 ASSERT(supported_ == 0 || supported_ == standard_features);
87 supported_ |= standard_features;
96 #if !defined(__mips__)
98 if (FLAG_enable_fpu) {
99 supported_ |= 1u <<
FPU;
106 supported_ |= 1u <<
FPU;
107 found_by_runtime_probing_ |= 1u <<
FPU;
115 const int kNumbers[] = {
149 return kNumbers[reg.code()];
155 const Register kRegisters[] = {
160 t0, t1, t2, t3, t4, t5, t6, t7,
169 return kRegisters[num];
176 const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
177 1 << RelocInfo::INTERNAL_REFERENCE;
180 bool RelocInfo::IsCodedSpecially() {
192 for (
int i = 0; i < instruction_count; i++) {
193 *(pc + i) = *(instr + i);
203 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
213 Operand::Operand(Handle<Object> handle) {
218 if (obj->IsHeapObject()) {
219 imm32_ =
reinterpret_cast<intptr_t
>(handle.location());
220 rmode_ = RelocInfo::EMBEDDED_OBJECT;
223 imm32_ =
reinterpret_cast<intptr_t
>(obj);
237 static const int kNegOffset = 0x00008000;
262 | (kNegOffset & kImm16Mask);
271 static const int kMinimalBufferSize = 4 *
KB;
275 : AssemblerBase(arg_isolate),
276 recorded_ast_id_(TypeFeedbackId::
None()),
277 positions_recorder_(this),
278 emit_debug_code_(FLAG_debug_code) {
279 if (buffer ==
NULL) {
281 if (buffer_size <= kMinimalBufferSize) {
282 buffer_size = kMinimalBufferSize;
284 if (isolate()->assembler_spare_buffer() !=
NULL) {
285 buffer = isolate()->assembler_spare_buffer();
286 isolate()->set_assembler_spare_buffer(
NULL);
289 if (buffer ==
NULL) {
290 buffer_ = NewArray<byte>(buffer_size);
294 buffer_size_ = buffer_size;
301 buffer_size_ = buffer_size;
308 reloc_info_writer.Reposition(
buffer_ + buffer_size, pc_);
310 last_trampoline_pool_end_ = 0;
311 no_trampoline_pool_before_ = 0;
312 trampoline_pool_blocked_nesting_ = 0;
315 next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
316 internal_trampoline_exception_ =
false;
319 trampoline_emitted_ =
false;
320 unbound_labels_count_ = 0;
321 block_buffer_growth_ =
false;
323 ClearRecordedAstId();
331 isolate()->set_assembler_spare_buffer(buffer_);
340 ASSERT(pc_ <= reloc_info_writer.pos());
342 desc->buffer = buffer_;
343 desc->buffer_size = buffer_size_;
345 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
446 return instr & ~kImm16Mask;
497 const int kEndOfChain = -4;
499 const int kEndOfJumpChain = 0;
508 return opcode ==
BEQ ||
518 (opcode ==
COP1 && rs_field ==
BC1) ||
539 return opcode ==
J || opcode ==
JAL ||
540 (opcode ==
SPECIAL && rt_field == 0 &&
541 ((function_field ==
JALR) || (rd_field == 0 && (function_field ==
JR))));
568 return opcode ==
LUI;
575 return opcode ==
ORI;
584 uint32_t rt =
GetRt(instr);
585 uint32_t rd =
GetRd(instr);
586 uint32_t sa =
GetSa(instr);
593 Register nop_rt_reg = (type == 0) ? zero_reg : at;
595 rd ==
static_cast<uint32_t
>(
ToNumber(zero_reg)) &&
596 rt == static_cast<uint32_t>(
ToNumber(nop_rt_reg)) &&
605 return ((
int16_t)(instr & kImm16Mask)) << 2;
616 return ((instr & kImm16Mask));
638 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
649 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
660 if ((instr & ~kImm16Mask) == 0) {
666 return (imm18 + pos);
676 if (imm18 == kEndOfChain) {
682 }
else if (
IsLui(instr)) {
689 if (imm == kEndOfJumpChain) {
693 uint32_t instr_address =
reinterpret_cast<int32_t>(buffer_ + pos);
694 int32_t delta = instr_address - imm;
700 if (imm28 == kEndOfJumpChain) {
704 uint32_t instr_address =
reinterpret_cast<int32_t>(buffer_ + pos);
706 int32_t delta = instr_address - imm28;
716 if ((instr & ~kImm16Mask) == 0) {
717 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
729 instr &= ~kImm16Mask;
734 }
else if (
IsLui(instr)) {
738 uint32_t imm = (uint32_t)buffer_ + target_pos;
741 instr_lui &= ~kImm16Mask;
742 instr_ori &= ~kImm16Mask;
747 instr_ori | (imm & kImm16Mask));
749 uint32_t imm28 = (uint32_t)buffer_ + target_pos;
754 uint32_t imm26 = imm28 >> 2;
762 void Assembler::print(Label*
L) {
763 if (L->is_unused()) {
765 }
else if (L->is_bound()) {
766 PrintF(
"bound label to %d\n", L->pos());
767 }
else if (L->is_linked()) {
770 while (l.is_linked()) {
773 if ((instr & ~kImm16Mask) == 0) {
781 PrintF(
"label in inconsistent state (pos = %d)\n", L->pos_);
786 void Assembler::bind_to(Label* L,
int pos) {
788 int32_t trampoline_pos = kInvalidSlotPos;
789 if (L->is_linked() && !trampoline_emitted_) {
790 unbound_labels_count_--;
791 next_buffer_check_ += kTrampolineSlotsSize;
794 while (L->is_linked()) {
796 int32_t dist = pos - fixup_pos;
800 if (dist > kMaxBranchOffset) {
801 if (trampoline_pos == kInvalidSlotPos) {
802 trampoline_pos = get_trampoline_entry(fixup_pos);
803 CHECK(trampoline_pos != kInvalidSlotPos);
805 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
807 fixup_pos = trampoline_pos;
808 dist = pos - fixup_pos;
820 if (pos > last_bound_pos_)
821 last_bound_pos_ = pos;
831 void Assembler::next(Label* L) {
834 if (link == kEndOfChain) {
857 void Assembler::GenInstrRegister(
Opcode opcode,
863 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() &&
is_uint5(sa));
870 void Assembler::GenInstrRegister(
Opcode opcode,
883 void Assembler::GenInstrRegister(
Opcode opcode,
889 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
897 void Assembler::GenInstrRegister(
Opcode opcode,
903 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
911 void Assembler::GenInstrRegister(
Opcode opcode,
914 FPUControlRegister fs,
916 ASSERT(fs.is_valid() && rt.is_valid());
926 void Assembler::GenInstrImmediate(
Opcode opcode,
937 void Assembler::GenInstrImmediate(
Opcode opcode,
942 Instr instr = opcode | (rs.code() <<
kRsShift) | SF | (j & kImm16Mask);
947 void Assembler::GenInstrImmediate(
Opcode opcode,
959 void Assembler::GenInstrJump(
Opcode opcode,
963 Instr instr = opcode | address;
971 int32_t trampoline_entry = kInvalidSlotPos;
973 if (!internal_trampoline_exception_) {
974 if (trampoline_.start() > pos) {
975 trampoline_entry = trampoline_.take_slot();
978 if (kInvalidSlotPos == trampoline_entry) {
979 internal_trampoline_exception_ =
true;
982 return trampoline_entry;
990 target_pos = L->pos();
992 if (L->is_linked()) {
993 target_pos = L->pos();
997 return kEndOfJumpChain;
1001 uint32_t imm = (uint32_t)buffer_ + target_pos;
1011 if (L->is_bound()) {
1012 target_pos = L->pos();
1014 if (L->is_linked()) {
1015 target_pos = L->pos();
1019 if (!trampoline_emitted_) {
1020 unbound_labels_count_++;
1021 next_buffer_check_ -= kTrampolineSlotsSize;
1028 ASSERT((offset & 3) == 0);
1037 if (L->is_bound()) {
1038 target_pos = L->pos();
1041 if (L->is_linked()) {
1042 target_pos = L->pos();
1043 int32_t imm18 = target_pos - at_offset;
1044 ASSERT((imm18 & 3) == 0);
1049 target_pos = kEndOfChain;
1051 if (!trampoline_emitted_) {
1052 unbound_labels_count_++;
1053 next_buffer_check_ -= kTrampolineSlotsSize;
1056 L->link_to(at_offset);
1064 beq(zero_reg, zero_reg, offset);
1070 bgezal(zero_reg, offset);
1076 GenInstrImmediate(
BEQ, rs, rt, offset);
1098 GenInstrImmediate(
BGTZ, rs, zero_reg, offset);
1105 GenInstrImmediate(
BLEZ, rs, zero_reg, offset);
1127 GenInstrImmediate(
BNE, rs, rt, offset);
1135 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc_ + 1 *
kInstrSize);
1137 ASSERT(in_range && ((target & 3) == 0));
1139 GenInstrJump(
J, target >> 2);
1148 GenInstrRegister(
SPECIAL, rs, zero_reg, zero_reg, 0,
JR);
1156 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc_ + 1 *
kInstrSize);
1158 ASSERT(in_range && ((target & 3) == 0));
1161 GenInstrJump(
JAL, target >> 2);
1168 GenInstrRegister(
SPECIAL, rs, zero_reg, rd, 0,
JALR);
1175 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc_ + 1 *
kInstrSize);
1188 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc_ + 1 *
kInstrSize);
1209 GenInstrImmediate(
ADDIU, rs, rd, j);
1224 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
MULT);
1234 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
DIV);
1239 GenInstrRegister(
SPECIAL, rs, rt, zero_reg, 0,
DIVU);
1246 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
AND);
1252 GenInstrImmediate(
ANDI, rs, rt, j);
1257 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
OR);
1263 GenInstrImmediate(
ORI, rs, rt, j);
1268 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
XOR);
1274 GenInstrImmediate(
XORI, rs, rt, j);
1279 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
NOR);
1287 bool coming_from_nop) {
1292 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1293 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SLL);
1303 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SRL);
1313 GenInstrRegister(
SPECIAL, zero_reg, rt, rd, sa,
SRA);
1334 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1345 void Assembler::LoadRegPlusOffsetToAt(
const MemOperand& src) {
1346 ASSERT(!src.rm().is(at));
1348 ori(at, at, src.offset_ & kImm16Mask);
1349 addu(at, at, src.rm());
1355 GenInstrImmediate(
LB, rs.rm(), rd, rs.offset_);
1357 LoadRegPlusOffsetToAt(rs);
1358 GenInstrImmediate(
LB, at, rd, 0);
1365 GenInstrImmediate(
LBU, rs.rm(), rd, rs.offset_);
1367 LoadRegPlusOffsetToAt(rs);
1368 GenInstrImmediate(
LBU, at, rd, 0);
1375 GenInstrImmediate(
LH, rs.rm(), rd, rs.offset_);
1377 LoadRegPlusOffsetToAt(rs);
1378 GenInstrImmediate(
LH, at, rd, 0);
1385 GenInstrImmediate(
LHU, rs.rm(), rd, rs.offset_);
1387 LoadRegPlusOffsetToAt(rs);
1388 GenInstrImmediate(
LHU, at, rd, 0);
1395 GenInstrImmediate(
LW, rs.rm(), rd, rs.offset_);
1397 LoadRegPlusOffsetToAt(rs);
1398 GenInstrImmediate(
LW, at, rd, 0);
1404 GenInstrImmediate(
LWL, rs.rm(), rd, rs.offset_);
1409 GenInstrImmediate(
LWR, rs.rm(), rd, rs.offset_);
1415 GenInstrImmediate(
SB, rs.rm(), rd, rs.offset_);
1417 LoadRegPlusOffsetToAt(rs);
1418 GenInstrImmediate(
SB, at, rd, 0);
1425 GenInstrImmediate(
SH, rs.rm(), rd, rs.offset_);
1427 LoadRegPlusOffsetToAt(rs);
1428 GenInstrImmediate(
SH, at, rd, 0);
1435 GenInstrImmediate(
SW, rs.rm(), rd, rs.offset_);
1437 LoadRegPlusOffsetToAt(rs);
1438 GenInstrImmediate(
SW, at, rd, 0);
1444 GenInstrImmediate(
SWL, rs.rm(), rd, rs.offset_);
1449 GenInstrImmediate(
SWR, rs.rm(), rd, rs.offset_);
1455 GenInstrImmediate(
LUI, zero_reg, rd, j);
1463 ASSERT((code & ~0xfffff) == 0);
1481 #if defined(V8_HOST_ARCH_MIPS)
1483 #else // V8_HOST_ARCH_MIPS
1488 emit(reinterpret_cast<Instr>(msg));
1496 | rt.code() <<
kRtShift | code << 6;
1504 | rt.code() <<
kRtShift | code << 6;
1521 | rt.code() <<
kRtShift | code << 6;
1545 GenInstrRegister(
SPECIAL, zero_reg, zero_reg, rd, 0,
MFHI);
1550 GenInstrRegister(
SPECIAL, zero_reg, zero_reg, rd, 0,
MFLO);
1556 GenInstrRegister(
SPECIAL, rs, rt, rd, 0,
SLT);
1566 GenInstrImmediate(
SLTI, rs, rt, j);
1571 GenInstrImmediate(
SLTIU, rs, rt, j);
1588 rt.code_ = (cc & 0x0007) << 2 | 1;
1595 rt.code_ = (cc & 0x0007) << 2 | 0;
1611 GenInstrRegister(
SPECIAL3, rs, rt, pos + size - 1, pos,
INS);
1619 GenInstrRegister(
SPECIAL3, rs, rt, size - 1, pos,
EXT);
1627 GenInstrImmediate(
LWC1, src.rm(), fd, src.offset_);
1634 GenInstrImmediate(
LWC1, src.rm(), fd, src.offset_);
1635 FPURegister nextfpreg;
1636 nextfpreg.setcode(fd.code() + 1);
1637 GenInstrImmediate(
LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1642 GenInstrImmediate(
SWC1, src.rm(), fd, src.offset_);
1649 GenInstrImmediate(
SWC1, src.rm(), fd, src.offset_);
1650 FPURegister nextfpreg;
1651 nextfpreg.setcode(fd.code() + 1);
1652 GenInstrImmediate(
SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1667 GenInstrRegister(
COP1,
CTC1, rt, fs);
1672 GenInstrRegister(
COP1,
CFC1, rt, fs);
1679 *lo = i & 0xffffffff;
1686 GenInstrRegister(
COP1,
D, ft, fs, fd,
ADD_D);
1691 GenInstrRegister(
COP1,
D, ft, fs, fd,
SUB_D);
1696 GenInstrRegister(
COP1,
D, ft, fs, fd,
MUL_D);
1701 GenInstrRegister(
COP1,
D, ft, fs, fd,
DIV_D);
1865 FPURegister fs, FPURegister ft,
uint16_t cc) {
1870 | cc << 8 | 3 << 4 | cond;
1881 c(cond,
D, src1,
f14, 0);
1905 RecordRelocInfo(RelocInfo::JS_RETURN);
1912 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1917 if (FLAG_code_comments) {
1919 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1933 if (imm == kEndOfJumpChain) {
1939 instr_lui &= ~kImm16Mask;
1940 instr_ori &= ~kImm16Mask;
1943 instr_lui | ((imm >>
kLuiShift) & kImm16Mask));
1945 instr_ori | (imm & kImm16Mask));
1949 if ((
int32_t)imm28 == kEndOfJumpChain) {
1954 ASSERT((imm28 & 3) == 0);
1957 uint32_t imm26 = imm28 >> 2;
1966 void Assembler::GrowBuffer() {
1967 if (!own_buffer_)
FATAL(
"external code buffer is too small");
1971 if (buffer_size_ < 4*
KB) {
1972 desc.buffer_size = 4*
KB;
1973 }
else if (buffer_size_ < 1*
MB) {
1974 desc.buffer_size = 2*buffer_size_;
1976 desc.buffer_size = buffer_size_ + 1*
MB;
1981 desc.buffer = NewArray<byte>(desc.buffer_size);
1984 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1987 int pc_delta = desc.buffer - buffer_;
1988 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1989 memmove(desc.buffer, buffer_, desc.instr_size);
1990 memmove(reloc_info_writer.pos() + rc_delta,
1991 reloc_info_writer.pos(), desc.reloc_size);
1995 buffer_ = desc.buffer;
1996 buffer_size_ = desc.buffer_size;
1998 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1999 reloc_info_writer.last_pc() + pc_delta);
2002 for (RelocIterator it(desc); !it.done(); it.next()) {
2003 RelocInfo::Mode rmode = it.rinfo()->rmode();
2004 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2005 byte* p =
reinterpret_cast<byte*
>(it.rinfo()->pc());
2016 *
reinterpret_cast<uint8_t*
>(pc_) = data;
2017 pc_ +=
sizeof(uint8_t);
2023 *
reinterpret_cast<uint32_t*
>(pc_) = data;
2024 pc_ +=
sizeof(uint32_t);
2028 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2031 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2033 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2034 || RelocInfo::IsJSReturn(rmode)
2035 || RelocInfo::IsComment(rmode)
2036 || RelocInfo::IsPosition(rmode));
2041 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2052 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2058 reloc_info_writer.Write(&reloc_info_with_ast_id);
2060 reloc_info_writer.Write(&rinfo);
2077 if ((trampoline_pool_blocked_nesting_ > 0) ||
2078 (
pc_offset() < no_trampoline_pool_before_)) {
2081 if (trampoline_pool_blocked_nesting_ > 0) {
2084 next_buffer_check_ = no_trampoline_pool_before_;
2089 ASSERT(!trampoline_emitted_);
2090 ASSERT(unbound_labels_count_ >= 0);
2091 if (unbound_labels_count_ > 0) {
2099 for (
int i = 0; i < unbound_labels_count_; i++) {
2102 { BlockGrowBufferScope block_buf_growth(
this);
2106 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2108 ori(at, at, (imm32 & kImm16Mask));
2114 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2116 trampoline_emitted_ =
true;
2125 kMaxBranchOffset - kTrampolineSlotsSize * 16;
2137 return reinterpret_cast<Address>(
2166 uint32_t* p =
reinterpret_cast<uint32_t*
>(
pc);
2167 uint32_t itarget =
reinterpret_cast<uint32_t
>(target);
2179 *(p+1) =
ORI | rt_code | (rt_code << 5) | (itarget &
kImm16Mask);
2198 uint32_t ipc =
reinterpret_cast<uint32_t
>(pc + 3 *
kInstrSize);
2202 bool patched_jump =
false;
2204 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2212 uint32_t segment_mask = ((256 *
MB) - 1) ^ ((32 *
KB) - 1);
2213 uint32_t ipc_segment_addr = ipc & segment_mask;
2214 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2221 if (in_range &&
GetRt(instr2) ==
GetRs(instr3)) {
2222 *(p+2) =
JAL | target_field;
2223 patched_jump =
true;
2225 }
else if (
IsJr(instr3)) {
2227 bool is_ret =
static_cast<int>(
GetRs(instr3)) == ra.code();
2228 if (in_range && !is_ret &&
GetRt(instr2) ==
GetRs(instr3)) {
2229 *(p+2) =
J | target_field;
2230 patched_jump =
true;
2232 }
else if (
IsJal(instr3)) {
2235 *(p+2) =
JAL | target_field;
2240 uint32_t rd_field = ra.code() <<
kRdShift;
2243 patched_jump =
true;
2244 }
else if (
IsJ(instr3)) {
2247 *(p+2) =
J | target_field;
2254 patched_jump =
true;
2263 uint32_t* p =
reinterpret_cast<uint32_t*
>(
pc);
2269 bool patched =
false;
2271 if (
IsJal(instr3)) {
2276 uint32_t rd_field = ra.code() <<
kRdShift;
2279 }
else if (
IsJ(instr3)) {
2295 #endif // V8_TARGET_ARCH_MIPS
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
Isolate * isolate() const
static const int kBranchPCOffset
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void ClearRecordedAstId()
void trunc_l_d(FPURegister fd, FPURegister fs)
static uint32_t GetOpcodeField(Instr instr)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
void mtc1(Register rt, FPURegister fs)
static bool IsAddImmediate(Instr instr)
void PrintF(const char *format,...)
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
void neg_d(FPURegister fd, FPURegister fs)
void blez(Register rs, int16_t offset)
void sw(Register rd, const MemOperand &rs)
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
void rotr(Register rd, Register rt, uint16_t sa)
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
static uint32_t GetFunctionField(Instr instr)
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void round_w_s(FPURegister fd, FPURegister fs)
void b(int branch_offset, Condition cond=al)
const uint32_t kMaxWatchpointCode
void floor_l_s(FPURegister fd, FPURegister fs)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
void div(Register rs, Register rt)
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
const uint32_t kMaxStopCode
static const int kMinimalBufferSize
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
const Instr kSwRegFpOffsetPattern
static uint32_t GetRdField(Instr instr)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
StringInputBuffer *const buffer_
const int kFunctionFieldMask
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
void CheckTrampolinePool()
const Instr kPopRegPattern
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
void cvt_w_d(FPURegister fd, FPURegister fs)
static void JumpLabelToJumpRegister(Address pc)
const Instr kPushRegPattern
void break_(uint32_t code, bool break_as_stop=false)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
void sh(Register rd, const MemOperand &rs)
static bool IsJr(Instr instr)
static bool IsOri(Instr instr)
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
void ceil_w_d(FPURegister fd, FPURegister fs)
void trunc_l_s(FPURegister fd, FPURegister fs)
const Instr kLwSwInstrTypeMask
void trunc_w_s(FPURegister fd, FPURegister fs)
void srlv(Register rd, Register rt, Register rs)
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void jal_or_jalr(int32_t target, Register rs)
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kLwRegFpOffsetPattern
int branch_offset(Label *L, bool jump_elimination_allowed)
const Instr kPushInstruction
static void set_target_address_at(Address pc, Address target)
static void TooLateToEnableNow()
static bool IsPop(Instr instr)
void movt(Register reg, uint32_t immediate, Condition cond=al)
void lui(Register rd, int32_t j)
const Instr kPopInstruction
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
void srl(Register rd, Register rt, uint16_t sa)
static Register GetRdReg(Instr instr)
void tlt(Register rs, Register rt, uint16_t code)
void slti(Register rd, Register rs, int32_t j)
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
const int kRegister_fp_Code
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
void floor_l_d(FPURegister fd, FPURegister fs)
static bool MipsCpuHasFeature(CpuFeature feature)
void cfc1(Register rt, FPUControlRegister fs)
friend class BlockTrampolinePoolScope
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
#define UNIMPLEMENTED_MIPS()
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
static uint32_t GetLabelConst(Instr instr)
void ori(Register rd, Register rs, int32_t j)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
void movz(Register rd, Register rs, Register rt)
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
static Address target_address_at(Address pc)
static HeapNumber * cast(Object *obj)
void set_value(double value)
static double nan_value()
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
void fcmp(FPURegister src1, const double src2, FPUCondition cond)
static void QuietNaN(HeapObject *nan)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
static bool IsSwRegFpOffset(Instr instr)
static const int kHeaderSize
static bool IsJump(Instr instr)
void mfc1(Register rt, FPURegister fs)
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
bool MustUseReg(RelocInfo::Mode rmode)
const Instr kLwSwInstrArgumentMask
void trunc_w_d(FPURegister fd, FPURegister fs)
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
void floor_w_d(FPURegister fd, FPURegister fs)
void sdc1(FPURegister fs, const MemOperand &dst)
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
static bool IsBeq(Instr instr)
static const int kInstrSize
MemOperand(Register rn, int32_t offset=0)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static uint64_t CpuFeaturesImpliedByPlatform()
const Instr kLwRegFpNegOffsetPattern
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
const Instr kLwSwOffsetMask
const int kRegister_sp_Code
void DeleteArray(T *array)
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
const Instr kSwRegFpNegOffsetPattern
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
bool emit_debug_code() const
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLui(Instr instr)
static bool IsAndImmediate(Instr instr)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void movn(Register rd, Register rs, Register rt)
void sb(Register rd, const MemOperand &rs)