32 #if V8_TARGET_ARCH_MIPS
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(
false),
48 if (isolate() !=
NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
61 }
else if (r.IsUInteger8()) {
63 }
else if (r.IsInteger16()) {
65 }
else if (r.IsUInteger16()) {
73 void MacroAssembler::Store(Register src,
77 if (r.IsInteger8() || r.IsUInteger8()) {
79 }
else if (r.IsInteger16() || r.IsUInteger16()) {
87 void MacroAssembler::LoadRoot(Register destination,
88 Heap::RootListIndex index) {
93 void MacroAssembler::LoadRoot(Register destination,
94 Heap::RootListIndex index,
96 Register src1,
const Operand& src2) {
102 void MacroAssembler::StoreRoot(Register source,
103 Heap::RootListIndex index) {
108 void MacroAssembler::StoreRoot(Register source,
109 Heap::RootListIndex index,
111 Register src1,
const Operand& src2) {
118 void MacroAssembler::PushSafepointRegisters() {
123 if (num_unsaved > 0) {
130 void MacroAssembler::PopSafepointRegisters() {
133 if (num_unsaved > 0) {
139 void MacroAssembler::PushSafepointRegistersAndDoubles() {
140 PushSafepointRegisters();
141 Subu(
sp,
sp, Operand(FPURegister::NumAllocatableRegisters() *
kDoubleSize));
142 for (
int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
143 FPURegister reg = FPURegister::FromAllocationIndex(i);
149 void MacroAssembler::PopSafepointRegistersAndDoubles() {
150 for (
int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
151 FPURegister reg = FPURegister::FromAllocationIndex(i);
154 Addu(
sp,
sp, Operand(FPURegister::NumAllocatableRegisters() *
kDoubleSize));
155 PopSafepointRegisters();
159 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
161 sw(src, SafepointRegistersAndDoublesSlot(dst));
165 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
166 sw(src, SafepointRegisterSlot(dst));
170 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
171 lw(dst, SafepointRegisterSlot(src));
175 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
182 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
187 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
190 int doubles_size = FPURegister::NumAllocatableRegisters() *
kDoubleSize;
191 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
196 void MacroAssembler::InNewSpace(Register
object,
201 And(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
202 Branch(branch, cc, scratch,
203 Operand(ExternalReference::new_space_start(isolate())));
207 void MacroAssembler::RecordWriteField(
223 JumpIfSmi(value, &done);
231 if (emit_debug_code()) {
234 Branch(&ok,
eq, t8, Operand(zero_reg));
235 stop(
"Unaligned cell in write barrier");
244 remembered_set_action,
251 if (emit_debug_code()) {
252 li(value, Operand(BitCast<int32_t>(
kZapValue + 4)));
253 li(dst, Operand(BitCast<int32_t>(
kZapValue + 8)));
261 void MacroAssembler::RecordWrite(Register
object,
271 if (emit_debug_code()) {
274 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
278 isolate()->counters()->write_barriers_static()->Increment();
287 JumpIfSmi(value, &done);
292 MemoryChunk::kPointersToHereAreInterestingMask,
295 CheckPageFlag(
object,
297 MemoryChunk::kPointersFromHereAreInterestingMask,
305 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
315 if (emit_debug_code()) {
316 li(address, Operand(BitCast<int32_t>(
kZapValue + 12)));
317 li(value, Operand(BitCast<int32_t>(
kZapValue + 16)));
322 void MacroAssembler::RememberedSetHelper(Register
object,
326 RememberedSetFinalAction and_then) {
328 if (emit_debug_code()) {
330 JumpIfNotInNewSpace(
object, scratch, &ok);
331 stop(
"Remembered set pointer is in new space");
335 ExternalReference store_buffer =
336 ExternalReference::store_buffer_top(isolate());
337 li(t8, Operand(store_buffer));
346 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
347 if (and_then == kFallThroughAtEnd) {
348 Branch(&done,
eq, t8, Operand(zero_reg));
350 ASSERT(and_then == kReturnAtEnd);
351 Ret(
eq, t8, Operand(zero_reg));
354 StoreBufferOverflowStub store_buffer_overflow =
355 StoreBufferOverflowStub(fp_mode);
356 CallStub(&store_buffer_overflow);
359 if (and_then == kReturnAtEnd) {
369 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
374 ASSERT(!holder_reg.is(scratch));
375 ASSERT(!holder_reg.is(at));
379 lw(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
382 Check(
ne, kWeShouldNotHaveAnEmptyLexicalContext,
383 scratch, Operand(zero_reg));
388 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
390 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
393 if (emit_debug_code()) {
397 LoadRoot(at, Heap::kNativeContextMapRootIndex);
398 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
399 holder_reg, Operand(at));
404 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
405 Branch(&same_contexts,
eq, scratch, Operand(at));
408 if (emit_debug_code()) {
411 LoadRoot(at, Heap::kNullValueRootIndex);
412 Check(
ne, kJSGlobalProxyContextShouldNotBeNull,
413 holder_reg, Operand(at));
416 LoadRoot(at, Heap::kNativeContextMapRootIndex);
417 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
418 holder_reg, Operand(at));
422 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
428 int token_offset = Context::kHeaderSize +
433 Branch(miss,
ne, scratch, Operand(at));
435 bind(&same_contexts);
439 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
441 LoadRoot(scratch, Heap::kHashSeedRootIndex);
445 xor_(reg0, reg0, scratch);
451 nor(scratch, reg0, zero_reg);
453 addu(reg0, scratch, at);
457 xor_(reg0, reg0, at);
461 addu(reg0, reg0, at);
465 xor_(reg0, reg0, at);
468 sll(scratch, reg0, 11);
470 addu(reg0, reg0, at);
471 addu(reg0, reg0, scratch);
475 xor_(reg0, reg0, at);
479 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
510 GetNumberHash(reg0, reg1);
513 lw(reg1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
515 Subu(reg1, reg1, Operand(1));
518 for (
int i = 0; i < kNumberDictionaryProbes; i++) {
523 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
525 and_(reg2, reg2, reg1);
528 ASSERT(SeededNumberDictionary::kEntrySize == 3);
530 addu(reg2, reg2, at);
534 addu(reg2, elements, at);
536 lw(at,
FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
537 if (i != kNumberDictionaryProbes - 1) {
538 Branch(&done,
eq, key, Operand(at));
540 Branch(miss,
ne, key, Operand(at));
547 const int kDetailsOffset =
548 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
550 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
551 Branch(miss,
ne, at, Operand(zero_reg));
554 const int kValueOffset =
555 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
563 void MacroAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
565 addu(rd, rs, rt.rm());
567 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
568 addiu(rd, rs, rt.imm32_);
579 void MacroAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
581 subu(rd, rs, rt.rm());
583 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
584 addiu(rd, rs, -rt.imm32_);
595 void MacroAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
601 mul(rd, rs, rt.rm());
617 void MacroAssembler::Mult(Register rs,
const Operand& rt) {
629 void MacroAssembler::Multu(Register rs,
const Operand& rt) {
641 void MacroAssembler::Div(Register rs,
const Operand& rt) {
653 void MacroAssembler::Divu(Register rs,
const Operand& rt) {
665 void MacroAssembler::And(Register rd, Register rs,
const Operand& rt) {
667 and_(rd, rs, rt.rm());
669 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
670 andi(rd, rs, rt.imm32_);
681 void MacroAssembler::Or(Register rd, Register rs,
const Operand& rt) {
683 or_(rd, rs, rt.rm());
685 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
686 ori(rd, rs, rt.imm32_);
697 void MacroAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
699 xor_(rd, rs, rt.rm());
701 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
702 xori(rd, rs, rt.imm32_);
713 void MacroAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
715 nor(rd, rs, rt.rm());
725 void MacroAssembler::Neg(Register rs,
const Operand& rt) {
730 xor_(rs, rt.rm(), at);
734 void MacroAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
736 slt(rd, rs, rt.rm());
738 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739 slti(rd, rs, rt.imm32_);
750 void MacroAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
752 sltu(rd, rs, rt.rm());
754 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755 sltiu(rd, rs, rt.imm32_);
766 void MacroAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
769 rotrv(rd, rs, rt.rm());
771 rotr(rd, rs, rt.imm32_);
775 subu(at, zero_reg, rt.rm());
777 srlv(rd, rs, rt.rm());
780 if (rt.imm32_ == 0) {
783 srl(at, rs, rt.imm32_);
784 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
803 void MacroAssembler::Ulw(Register rd,
const MemOperand& rs) {
805 lwl(rd,
MemOperand(rs.rm(), rs.offset() + 3));
809 void MacroAssembler::Usw(Register rd,
const MemOperand& rs) {
811 swl(rd,
MemOperand(rs.rm(), rs.offset() + 3));
815 void MacroAssembler::li(Register dst, Handle<Object> value,
LiFlags mode) {
817 if (value->IsSmi()) {
818 li(dst, Operand(value), mode);
820 ASSERT(value->IsHeapObject());
821 if (isolate()->heap()->InNewSpace(*value)) {
822 Handle<Cell> cell = isolate()->factory()->NewCell(value);
823 li(dst, Operand(cell));
826 li(dst, Operand(value));
832 void MacroAssembler::li(Register rd, Operand j,
LiFlags mode) {
834 BlockTrampolinePoolScope block_trampoline_pool(
this);
837 if (is_int16(j.imm32_)) {
838 addiu(rd, zero_reg, j.imm32_);
839 }
else if (!(j.imm32_ &
kHiMask)) {
840 ori(rd, zero_reg, j.imm32_);
848 if (MustUseReg(j.rmode_)) {
849 RecordRelocInfo(j.rmode_, j.imm32_);
859 void MacroAssembler::MultiPush(
RegList regs) {
863 Subu(
sp,
sp, Operand(stack_offset));
865 if ((regs & (1 << i)) != 0) {
873 void MacroAssembler::MultiPushReversed(
RegList regs) {
877 Subu(
sp,
sp, Operand(stack_offset));
879 if ((regs & (1 << i)) != 0) {
887 void MacroAssembler::MultiPop(
RegList regs) {
891 if ((regs & (1 << i)) != 0) {
896 addiu(
sp,
sp, stack_offset);
900 void MacroAssembler::MultiPopReversed(
RegList regs) {
903 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
904 if ((regs & (1 << i)) != 0) {
909 addiu(
sp,
sp, stack_offset);
913 void MacroAssembler::MultiPushFPU(
RegList regs) {
917 Subu(
sp,
sp, Operand(stack_offset));
918 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
919 if ((regs & (1 << i)) != 0) {
921 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
927 void MacroAssembler::MultiPushReversedFPU(
RegList regs) {
931 Subu(
sp,
sp, Operand(stack_offset));
933 if ((regs & (1 << i)) != 0) {
935 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
941 void MacroAssembler::MultiPopFPU(
RegList regs) {
945 if ((regs & (1 << i)) != 0) {
946 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
950 addiu(
sp,
sp, stack_offset);
954 void MacroAssembler::MultiPopReversedFPU(
RegList regs) {
957 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
958 if ((regs & (1 << i)) != 0) {
959 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
963 addiu(
sp,
sp, stack_offset);
967 void MacroAssembler::FlushICache(Register address,
unsigned instructions) {
969 MultiPush(saved_regs);
970 AllowExternalCallThatCantCauseGC scope(
this);
974 PrepareCallCFunction(2, t0);
976 li(a1, instructions * kInstrSize);
977 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
978 MultiPop(saved_regs);
982 void MacroAssembler::Ext(Register rt,
990 ext_(rt, rs, pos, size);
994 int shift_left = 32 - (pos +
size);
995 sll(rt, rs, shift_left);
997 int shift_right = 32 -
size;
998 if (shift_right > 0) {
999 srl(rt, rt, shift_right);
1005 void MacroAssembler::Ins(Register rt,
1010 ASSERT(pos + size <= 32);
1014 ins_(rt, rs, pos, size);
1016 ASSERT(!rt.is(t8) && !rs.is(t8));
1017 Subu(at, zero_reg, Operand(1));
1018 srl(at, at, 32 - size);
1022 nor(at, at, zero_reg);
1029 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1031 FPURegister scratch) {
1034 Cvt_d_uw(fd, t8, scratch);
1038 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1040 FPURegister scratch) {
1059 Label conversion_done;
1063 Branch(&conversion_done,
eq, t9, Operand(zero_reg));
1067 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1068 mtc1(zero_reg, scratch);
1070 add_d(fd, fd, scratch);
1072 bind(&conversion_done);
1076 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1078 FPURegister scratch) {
1079 Trunc_uw_d(fs, t8, scratch);
1084 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1085 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1086 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1088 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1095 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1096 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1097 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1099 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1106 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1107 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1108 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1110 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1117 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1118 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1119 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1121 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1128 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1130 FPURegister scratch) {
1136 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1137 mtc1(zero_reg, scratch);
1140 Label simple_convert;
1141 BranchF(&simple_convert,
NULL,
lt, fd, scratch);
1145 sub_d(scratch, fd, scratch);
1146 trunc_w_d(scratch, scratch);
1148 Or(rs, rs, 1 << 31);
1153 bind(&simple_convert);
1154 trunc_w_d(scratch, fd);
1161 void MacroAssembler::BranchF(Label* target,
1167 BlockTrampolinePoolScope block_trampoline_pool(
this);
1176 c(
UN,
D, cmp1, cmp2);
1186 c(
OLT,
D, cmp1, cmp2);
1190 c(
ULE,
D, cmp1, cmp2);
1194 c(
ULT,
D, cmp1, cmp2);
1198 c(
OLE,
D, cmp1, cmp2);
1202 c(
EQ,
D, cmp1, cmp2);
1206 c(
UEQ,
D, cmp1, cmp2);
1210 c(
EQ,
D, cmp1, cmp2);
1214 c(
UEQ,
D, cmp1, cmp2);
1228 void MacroAssembler::Move(FPURegister dst,
double imm) {
1229 static const DoubleRepresentation minus_zero(-0.0);
1230 static const DoubleRepresentation
zero(0.0);
1231 DoubleRepresentation value_rep(imm);
1234 if (value_rep ==
zero && !force_load) {
1236 }
else if (value_rep == minus_zero && !force_load) {
1240 DoubleAsTwoUInt32(imm, &lo, &hi);
1244 li(at, Operand(lo));
1247 mtc1(zero_reg, dst);
1252 li(at, Operand(hi));
1253 mtc1(at, dst.high());
1255 mtc1(zero_reg, dst.high());
1261 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1264 Branch(&done,
ne, rt, Operand(zero_reg));
1273 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1276 Branch(&done,
eq, rt, Operand(zero_reg));
1285 void MacroAssembler::Movt(Register rd, Register rs,
uint16_t cc) {
1290 ASSERT(!(rs.is(t8) || rd.is(t8)));
1292 Register scratch = t8;
1296 cfc1(scratch,
FCSR);
1300 srl(scratch, scratch, 16);
1301 andi(scratch, scratch, 0x0080);
1302 Branch(&done,
eq, scratch, Operand(zero_reg));
1311 void MacroAssembler::Movf(Register rd, Register rs,
uint16_t cc) {
1316 ASSERT(!(rs.is(t8) || rd.is(t8)));
1318 Register scratch = t8;
1322 cfc1(scratch,
FCSR);
1326 srl(scratch, scratch, 16);
1327 andi(scratch, scratch, 0x0080);
1328 Branch(&done,
ne, scratch, Operand(zero_reg));
1337 void MacroAssembler::Clz(Register rd, Register rs) {
1339 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1341 Register scratch = t9;
1347 and_(scratch, at, mask);
1348 Branch(&end,
ne, scratch, Operand(zero_reg));
1364 Register except_flag,
1366 ASSERT(!result.is(scratch));
1367 ASSERT(!double_input.is(double_scratch));
1368 ASSERT(!except_flag.is(scratch));
1373 mov(except_flag, zero_reg);
1376 cvt_w_d(double_scratch, double_input);
1377 mfc1(result, double_scratch);
1378 cvt_d_w(double_scratch, double_scratch);
1379 BranchF(&done,
NULL,
eq, double_input, double_scratch);
1389 cfc1(scratch,
FCSR);
1391 ctc1(zero_reg,
FCSR);
1394 switch (rounding_mode) {
1396 Round_w_d(double_scratch, double_input);
1399 Trunc_w_d(double_scratch, double_input);
1402 Ceil_w_d(double_scratch, double_input);
1405 Floor_w_d(double_scratch, double_input);
1410 cfc1(except_flag,
FCSR);
1412 ctc1(scratch,
FCSR);
1414 mfc1(result, double_scratch);
1417 And(except_flag, except_flag, Operand(except_mask));
1423 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1427 Register scratch = at;
1428 Register scratch2 = t9;
1431 cfc1(scratch2,
FCSR);
1432 ctc1(zero_reg,
FCSR);
1434 trunc_w_d(single_scratch, double_input);
1435 mfc1(result, single_scratch);
1437 cfc1(scratch,
FCSR);
1438 ctc1(scratch2,
FCSR);
1444 Branch(done,
eq, scratch, Operand(zero_reg));
1448 void MacroAssembler::TruncateDoubleToI(Register result,
1452 TryInlineTruncateDoubleToI(result, double_input, &done);
1456 Subu(
sp,
sp, Operand(kDoubleSize));
1459 DoubleToIStub stub(
sp, result, 0,
true,
true);
1462 Addu(
sp,
sp, Operand(kDoubleSize));
1469 void MacroAssembler::TruncateHeapNumberToI(Register result, Register
object) {
1472 ASSERT(!result.is(
object));
1474 ldc1(double_scratch,
1476 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1480 DoubleToIStub stub(
object,
1492 void MacroAssembler::TruncateNumberToI(Register
object,
1494 Register heap_number_map,
1496 Label* not_number) {
1498 ASSERT(!result.is(
object));
1500 UntagAndJumpIfSmi(result,
object, &done);
1501 JumpIfNotHeapNumber(
object, heap_number_map, scratch, not_number);
1502 TruncateHeapNumberToI(result,
object);
1508 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1510 int num_least_bits) {
1515 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1517 int num_least_bits) {
1518 And(dst, src, Operand((1 << num_least_bits) - 1));
1525 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1526 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1527 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1531 BranchShort(offset, bdslot);
1538 BranchShort(offset, cond, rs, rt, bdslot);
1543 if (L->is_bound()) {
1545 BranchShort(L, bdslot);
1550 if (is_trampoline_emitted()) {
1553 BranchShort(L, bdslot);
1559 void MacroAssembler::Branch(Label* L,
Condition cond, Register rs,
1562 if (L->is_bound()) {
1564 BranchShort(L, cond, rs, rt, bdslot);
1569 BranchShort(&skip, neg_cond, rs, rt);
1577 if (is_trampoline_emitted()) {
1581 BranchShort(&skip, neg_cond, rs, rt);
1588 BranchShort(L, cond, rs, rt, bdslot);
1594 void MacroAssembler::Branch(Label* L,
1597 Heap::RootListIndex index,
1599 LoadRoot(at, index);
1600 Branch(L, cond, rs, Operand(at), bdslot);
1613 void MacroAssembler::BranchShort(
int16_t offset,
Condition cond, Register rs,
1616 BRANCH_ARGS_CHECK(cond, rs, rt);
1617 ASSERT(!rs.is(zero_reg));
1619 Register scratch = at;
1624 BlockTrampolinePoolScope block_trampoline_pool(
this);
1631 beq(rs, r2, offset);
1634 bne(rs, r2, offset);
1638 if (r2.is(zero_reg)) {
1641 slt(scratch, r2, rs);
1642 bne(scratch, zero_reg, offset);
1646 if (r2.is(zero_reg)) {
1649 slt(scratch, rs, r2);
1650 beq(scratch, zero_reg, offset);
1654 if (r2.is(zero_reg)) {
1657 slt(scratch, rs, r2);
1658 bne(scratch, zero_reg, offset);
1662 if (r2.is(zero_reg)) {
1665 slt(scratch, r2, rs);
1666 beq(scratch, zero_reg, offset);
1671 if (r2.is(zero_reg)) {
1674 sltu(scratch, r2, rs);
1675 bne(scratch, zero_reg, offset);
1679 if (r2.is(zero_reg)) {
1682 sltu(scratch, rs, r2);
1683 beq(scratch, zero_reg, offset);
1687 if (r2.is(zero_reg)) {
1691 sltu(scratch, rs, r2);
1692 bne(scratch, zero_reg, offset);
1696 if (r2.is(zero_reg)) {
1699 sltu(scratch, r2, rs);
1700 beq(scratch, zero_reg, offset);
1710 BlockTrampolinePoolScope block_trampoline_pool(
this);
1720 beq(rs, r2, offset);
1727 bne(rs, r2, offset);
1731 if (rt.imm32_ == 0) {
1736 slt(scratch, r2, rs);
1737 bne(scratch, zero_reg, offset);
1741 if (rt.imm32_ == 0) {
1743 }
else if (is_int16(rt.imm32_)) {
1744 slti(scratch, rs, rt.imm32_);
1745 beq(scratch, zero_reg, offset);
1749 slt(scratch, rs, r2);
1750 beq(scratch, zero_reg, offset);
1754 if (rt.imm32_ == 0) {
1756 }
else if (is_int16(rt.imm32_)) {
1757 slti(scratch, rs, rt.imm32_);
1758 bne(scratch, zero_reg, offset);
1762 slt(scratch, rs, r2);
1763 bne(scratch, zero_reg, offset);
1767 if (rt.imm32_ == 0) {
1772 slt(scratch, r2, rs);
1773 beq(scratch, zero_reg, offset);
1778 if (rt.imm32_ == 0) {
1783 sltu(scratch, r2, rs);
1784 bne(scratch, zero_reg, offset);
1788 if (rt.imm32_ == 0) {
1790 }
else if (is_int16(rt.imm32_)) {
1791 sltiu(scratch, rs, rt.imm32_);
1792 beq(scratch, zero_reg, offset);
1796 sltu(scratch, rs, r2);
1797 beq(scratch, zero_reg, offset);
1801 if (rt.imm32_ == 0) {
1804 }
else if (is_int16(rt.imm32_)) {
1805 sltiu(scratch, rs, rt.imm32_);
1806 bne(scratch, zero_reg, offset);
1810 sltu(scratch, rs, r2);
1811 bne(scratch, zero_reg, offset);
1815 if (rt.imm32_ == 0) {
1820 sltu(scratch, r2, rs);
1821 beq(scratch, zero_reg, offset);
1838 b(shifted_branch_offset(L,
false));
1846 void MacroAssembler::BranchShort(Label* L,
Condition cond, Register rs,
1849 BRANCH_ARGS_CHECK(cond, rs, rt);
1853 Register scratch = at;
1855 BlockTrampolinePoolScope block_trampoline_pool(
this);
1862 offset = shifted_branch_offset(L,
false);
1866 offset = shifted_branch_offset(L,
false);
1867 beq(rs, r2, offset);
1870 offset = shifted_branch_offset(L,
false);
1871 bne(rs, r2, offset);
1875 if (r2.is(zero_reg)) {
1876 offset = shifted_branch_offset(L,
false);
1879 slt(scratch, r2, rs);
1880 offset = shifted_branch_offset(L,
false);
1881 bne(scratch, zero_reg, offset);
1885 if (r2.is(zero_reg)) {
1886 offset = shifted_branch_offset(L,
false);
1889 slt(scratch, rs, r2);
1890 offset = shifted_branch_offset(L,
false);
1891 beq(scratch, zero_reg, offset);
1895 if (r2.is(zero_reg)) {
1896 offset = shifted_branch_offset(L,
false);
1899 slt(scratch, rs, r2);
1900 offset = shifted_branch_offset(L,
false);
1901 bne(scratch, zero_reg, offset);
1905 if (r2.is(zero_reg)) {
1906 offset = shifted_branch_offset(L,
false);
1909 slt(scratch, r2, rs);
1910 offset = shifted_branch_offset(L,
false);
1911 beq(scratch, zero_reg, offset);
1916 if (r2.is(zero_reg)) {
1917 offset = shifted_branch_offset(L,
false);
1920 sltu(scratch, r2, rs);
1921 offset = shifted_branch_offset(L,
false);
1922 bne(scratch, zero_reg, offset);
1926 if (r2.is(zero_reg)) {
1927 offset = shifted_branch_offset(L,
false);
1930 sltu(scratch, rs, r2);
1931 offset = shifted_branch_offset(L,
false);
1932 beq(scratch, zero_reg, offset);
1936 if (r2.is(zero_reg)) {
1940 sltu(scratch, rs, r2);
1941 offset = shifted_branch_offset(L,
false);
1942 bne(scratch, zero_reg, offset);
1946 if (r2.is(zero_reg)) {
1947 offset = shifted_branch_offset(L,
false);
1950 sltu(scratch, r2, rs);
1951 offset = shifted_branch_offset(L,
false);
1952 beq(scratch, zero_reg, offset);
1962 BlockTrampolinePoolScope block_trampoline_pool(
this);
1965 offset = shifted_branch_offset(L,
false);
1972 offset = shifted_branch_offset(L,
false);
1973 beq(rs, r2, offset);
1979 offset = shifted_branch_offset(L,
false);
1980 bne(rs, r2, offset);
1984 if (rt.imm32_ == 0) {
1985 offset = shifted_branch_offset(L,
false);
1991 slt(scratch, r2, rs);
1992 offset = shifted_branch_offset(L,
false);
1993 bne(scratch, zero_reg, offset);
1997 if (rt.imm32_ == 0) {
1998 offset = shifted_branch_offset(L,
false);
2000 }
else if (is_int16(rt.imm32_)) {
2001 slti(scratch, rs, rt.imm32_);
2002 offset = shifted_branch_offset(L,
false);
2003 beq(scratch, zero_reg, offset);
2008 slt(scratch, rs, r2);
2009 offset = shifted_branch_offset(L,
false);
2010 beq(scratch, zero_reg, offset);
2014 if (rt.imm32_ == 0) {
2015 offset = shifted_branch_offset(L,
false);
2017 }
else if (is_int16(rt.imm32_)) {
2018 slti(scratch, rs, rt.imm32_);
2019 offset = shifted_branch_offset(L,
false);
2020 bne(scratch, zero_reg, offset);
2025 slt(scratch, rs, r2);
2026 offset = shifted_branch_offset(L,
false);
2027 bne(scratch, zero_reg, offset);
2031 if (rt.imm32_ == 0) {
2032 offset = shifted_branch_offset(L,
false);
2038 slt(scratch, r2, rs);
2039 offset = shifted_branch_offset(L,
false);
2040 beq(scratch, zero_reg, offset);
2045 if (rt.imm32_ == 0) {
2046 offset = shifted_branch_offset(L,
false);
2052 sltu(scratch, r2, rs);
2053 offset = shifted_branch_offset(L,
false);
2054 bne(scratch, zero_reg, offset);
2058 if (rt.imm32_ == 0) {
2059 offset = shifted_branch_offset(L,
false);
2061 }
else if (is_int16(rt.imm32_)) {
2062 sltiu(scratch, rs, rt.imm32_);
2063 offset = shifted_branch_offset(L,
false);
2064 beq(scratch, zero_reg, offset);
2069 sltu(scratch, rs, r2);
2070 offset = shifted_branch_offset(L,
false);
2071 beq(scratch, zero_reg, offset);
2075 if (rt.imm32_ == 0) {
2078 }
else if (is_int16(rt.imm32_)) {
2079 sltiu(scratch, rs, rt.imm32_);
2080 offset = shifted_branch_offset(L,
false);
2081 bne(scratch, zero_reg, offset);
2086 sltu(scratch, rs, r2);
2087 offset = shifted_branch_offset(L,
false);
2088 bne(scratch, zero_reg, offset);
2092 if (rt.imm32_ == 0) {
2093 offset = shifted_branch_offset(L,
false);
2099 sltu(scratch, r2, rs);
2100 offset = shifted_branch_offset(L,
false);
2101 beq(scratch, zero_reg, offset);
2109 ASSERT(is_int16(offset));
2117 BranchAndLinkShort(offset, bdslot);
2121 void MacroAssembler::BranchAndLink(
int16_t offset,
Condition cond, Register rs,
2124 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2128 void MacroAssembler::BranchAndLink(Label* L,
BranchDelaySlot bdslot) {
2129 if (L->is_bound()) {
2131 BranchAndLinkShort(L, bdslot);
2136 if (is_trampoline_emitted()) {
2139 BranchAndLinkShort(L, bdslot);
2145 void MacroAssembler::BranchAndLink(Label* L,
Condition cond, Register rs,
2148 if (L->is_bound()) {
2150 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2154 BranchShort(&skip, neg_cond, rs, rt);
2159 if (is_trampoline_emitted()) {
2162 BranchShort(&skip, neg_cond, rs, rt);
2166 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2175 void MacroAssembler::BranchAndLinkShort(
int16_t offset,
2186 Register rs,
const Operand& rt,
2188 BRANCH_ARGS_CHECK(cond, rs, rt);
2190 Register scratch = at;
2200 BlockTrampolinePoolScope block_trampoline_pool(
this);
2218 slt(scratch, r2, rs);
2219 addiu(scratch, scratch, -1);
2220 bgezal(scratch, offset);
2223 slt(scratch, rs, r2);
2224 addiu(scratch, scratch, -1);
2225 bltzal(scratch, offset);
2228 slt(scratch, rs, r2);
2229 addiu(scratch, scratch, -1);
2230 bgezal(scratch, offset);
2233 slt(scratch, r2, rs);
2234 addiu(scratch, scratch, -1);
2235 bltzal(scratch, offset);
2240 sltu(scratch, r2, rs);
2241 addiu(scratch, scratch, -1);
2242 bgezal(scratch, offset);
2245 sltu(scratch, rs, r2);
2246 addiu(scratch, scratch, -1);
2247 bltzal(scratch, offset);
2250 sltu(scratch, rs, r2);
2251 addiu(scratch, scratch, -1);
2252 bgezal(scratch, offset);
2255 sltu(scratch, r2, rs);
2256 addiu(scratch, scratch, -1);
2257 bltzal(scratch, offset);
2270 void MacroAssembler::BranchAndLinkShort(Label* L,
BranchDelaySlot bdslot) {
2271 bal(shifted_branch_offset(L,
false));
2279 void MacroAssembler::BranchAndLinkShort(Label* L,
Condition cond, Register rs,
2282 BRANCH_ARGS_CHECK(cond, rs, rt);
2286 Register scratch = at;
2295 BlockTrampolinePoolScope block_trampoline_pool(
this);
2298 offset = shifted_branch_offset(L,
false);
2304 offset = shifted_branch_offset(L,
false);
2310 offset = shifted_branch_offset(L,
false);
2316 slt(scratch, r2, rs);
2317 addiu(scratch, scratch, -1);
2318 offset = shifted_branch_offset(L,
false);
2319 bgezal(scratch, offset);
2322 slt(scratch, rs, r2);
2323 addiu(scratch, scratch, -1);
2324 offset = shifted_branch_offset(L,
false);
2325 bltzal(scratch, offset);
2328 slt(scratch, rs, r2);
2329 addiu(scratch, scratch, -1);
2330 offset = shifted_branch_offset(L,
false);
2331 bgezal(scratch, offset);
2334 slt(scratch, r2, rs);
2335 addiu(scratch, scratch, -1);
2336 offset = shifted_branch_offset(L,
false);
2337 bltzal(scratch, offset);
2342 sltu(scratch, r2, rs);
2343 addiu(scratch, scratch, -1);
2344 offset = shifted_branch_offset(L,
false);
2345 bgezal(scratch, offset);
2348 sltu(scratch, rs, r2);
2349 addiu(scratch, scratch, -1);
2350 offset = shifted_branch_offset(L,
false);
2351 bltzal(scratch, offset);
2354 sltu(scratch, rs, r2);
2355 addiu(scratch, scratch, -1);
2356 offset = shifted_branch_offset(L,
false);
2357 bgezal(scratch, offset);
2360 sltu(scratch, r2, rs);
2361 addiu(scratch, scratch, -1);
2362 offset = shifted_branch_offset(L,
false);
2363 bltzal(scratch, offset);
2371 ASSERT(is_int16(offset));
2379 void MacroAssembler::Jump(Register target,
2384 BlockTrampolinePoolScope block_trampoline_pool(
this);
2388 BRANCH_ARGS_CHECK(cond, rs, rt);
2398 void MacroAssembler::Jump(intptr_t target,
2399 RelocInfo::Mode rmode,
2410 li(t9, Operand(target, rmode));
2411 Jump(t9,
al, zero_reg, Operand(zero_reg), bd);
2416 void MacroAssembler::Jump(
Address target,
2417 RelocInfo::Mode rmode,
2422 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2423 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2427 void MacroAssembler::Jump(Handle<Code>
code,
2428 RelocInfo::Mode rmode,
2433 ASSERT(RelocInfo::IsCodeTarget(rmode));
2435 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2439 int MacroAssembler::CallSize(Register target,
2455 return size * kInstrSize;
2460 void MacroAssembler::Call(Register target,
2465 BlockTrampolinePoolScope block_trampoline_pool(
this);
2471 BRANCH_ARGS_CHECK(cond, rs, rt);
2479 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2480 SizeOfCodeGeneratedSince(&start));
2484 int MacroAssembler::CallSize(
Address target,
2485 RelocInfo::Mode rmode,
2490 int size = CallSize(t9, cond, rs, rt, bd);
2491 return size + 2 * kInstrSize;
2495 void MacroAssembler::Call(
Address target,
2496 RelocInfo::Mode rmode,
2501 BlockTrampolinePoolScope block_trampoline_pool(
this);
2507 positions_recorder()->WriteRecordedPositions();
2509 Call(t9, cond, rs, rt, bd);
2510 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2511 SizeOfCodeGeneratedSince(&start));
2515 int MacroAssembler::CallSize(Handle<Code> code,
2516 RelocInfo::Mode rmode,
2517 TypeFeedbackId ast_id,
2523 return CallSize(reinterpret_cast<Address>(code.location()),
2524 rmode, cond, rs, rt, bd);
2528 void MacroAssembler::Call(Handle<Code> code,
2529 RelocInfo::Mode rmode,
2530 TypeFeedbackId ast_id,
2535 BlockTrampolinePoolScope block_trampoline_pool(
this);
2538 ASSERT(RelocInfo::IsCodeTarget(rmode));
2539 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2540 SetRecordedAstId(ast_id);
2541 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2544 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2545 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2546 SizeOfCodeGeneratedSince(&start));
2550 void MacroAssembler::Ret(
Condition cond,
2554 Jump(ra, cond, rs, rt, bd);
2559 BlockTrampolinePoolScope block_trampoline_pool(
this);
2562 imm28 = jump_address(L);
2564 { BlockGrowBufferScope block_buf_growth(
this);
2567 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2577 BlockTrampolinePoolScope block_trampoline_pool(
this);
2580 imm32 = jump_address(L);
2581 { BlockGrowBufferScope block_buf_growth(
this);
2584 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2597 BlockTrampolinePoolScope block_trampoline_pool(
this);
2600 imm32 = jump_address(L);
2601 { BlockGrowBufferScope block_buf_growth(
this);
2604 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2616 void MacroAssembler::DropAndRet(
int drop) {
2618 addiu(
sp,
sp, drop * kPointerSize);
2621 void MacroAssembler::DropAndRet(
int drop,
2624 const Operand& r2) {
2640 void MacroAssembler::Drop(
int count,
2643 const Operand& op) {
2654 addiu(
sp,
sp, count * kPointerSize);
2663 void MacroAssembler::Swap(Register reg1,
2666 if (scratch.is(
no_reg)) {
2667 Xor(reg1, reg1, Operand(reg2));
2668 Xor(reg2, reg2, Operand(reg1));
2669 Xor(reg1, reg1, Operand(reg2));
2678 void MacroAssembler::Call(Label* target) {
2679 BranchAndLink(target);
2683 void MacroAssembler::Push(Handle<Object>
handle) {
2684 li(at, Operand(handle));
2689 #ifdef ENABLE_DEBUGGER_SUPPORT
2691 void MacroAssembler::DebugBreak() {
2692 PrepareCEntryArgs(0);
2693 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2695 ASSERT(AllowThisStubCall(&ces));
2699 #endif // ENABLE_DEBUGGER_SUPPORT
2705 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2706 int handler_index) {
2708 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2709 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2710 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2711 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2712 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2713 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2720 StackHandler::IndexField::encode(handler_index) |
2721 StackHandler::KindField::encode(kind);
2723 li(t2, Operand(state));
2726 if (kind == StackHandler::JS_ENTRY) {
2731 Push(zero_reg, zero_reg, t2, t1);
2733 MultiPush(t1.bit() | t2.bit() |
cp.bit() |
fp.
bit());
2737 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2745 void MacroAssembler::PopTryHandler() {
2748 Addu(
sp,
sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2749 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2754 void MacroAssembler::JumpToHandlerEntry() {
2760 srl(a2, a2, StackHandler::kKindWidth);
2771 void MacroAssembler::Throw(Register value) {
2773 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2775 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2776 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2777 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2778 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2784 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2794 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
2800 Branch(&done,
eq,
cp, Operand(zero_reg));
2804 JumpToHandlerEntry();
2808 void MacroAssembler::ThrowUncatchable(Register value) {
2810 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2811 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2812 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2813 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2814 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2815 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2818 if (!value.is(v0)) {
2822 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2826 Label fetch_next, check_kind;
2833 lw(a2,
MemOperand(
sp, StackHandlerConstants::kStateOffset));
2834 And(a2, a2, Operand(StackHandler::KindField::kMask));
2835 Branch(&fetch_next,
ne, a2, Operand(zero_reg));
2843 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
2845 JumpToHandlerEntry();
2849 void MacroAssembler::Allocate(
int object_size,
2855 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2856 if (!FLAG_inline_new) {
2857 if (emit_debug_code()) {
2860 li(scratch1, 0x7191);
2861 li(scratch2, 0x7291);
2867 ASSERT(!result.is(scratch1));
2868 ASSERT(!result.is(scratch2));
2869 ASSERT(!scratch1.is(scratch2));
2870 ASSERT(!scratch1.is(t9));
2871 ASSERT(!scratch2.is(t9));
2883 ExternalReference allocation_top =
2884 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2885 ExternalReference allocation_limit =
2886 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2889 reinterpret_cast<intptr_t
>(allocation_top.address());
2891 reinterpret_cast<intptr_t
>(allocation_limit.address());
2892 ASSERT((limit - top) == kPointerSize);
2895 Register topaddr = scratch1;
2896 li(topaddr, Operand(allocation_top));
2904 if (emit_debug_code()) {
2909 Check(
eq, kUnexpectedAllocationTop, result, Operand(t9));
2922 Branch(&aligned,
eq, scratch2, Operand(zero_reg));
2926 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2928 Addu(result, result, Operand(kDoubleSize / 2));
2934 Addu(scratch2, result, Operand(object_size));
2935 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
2945 void MacroAssembler::Allocate(Register object_size,
2951 if (!FLAG_inline_new) {
2952 if (emit_debug_code()) {
2955 li(scratch1, 0x7191);
2956 li(scratch2, 0x7291);
2962 ASSERT(!result.is(scratch1));
2963 ASSERT(!result.is(scratch2));
2964 ASSERT(!scratch1.is(scratch2));
2965 ASSERT(!object_size.is(t9));
2966 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2971 ExternalReference allocation_top =
2972 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2973 ExternalReference allocation_limit =
2974 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2976 reinterpret_cast<intptr_t
>(allocation_top.address());
2978 reinterpret_cast<intptr_t
>(allocation_limit.address());
2979 ASSERT((limit - top) == kPointerSize);
2982 Register topaddr = scratch1;
2983 li(topaddr, Operand(allocation_top));
2986 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2991 if (emit_debug_code()) {
2996 Check(
eq, kUnexpectedAllocationTop, result, Operand(t9));
3002 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3009 Branch(&aligned,
eq, scratch2, Operand(zero_reg));
3010 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3013 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3015 Addu(result, result, Operand(kDoubleSize / 2));
3022 if ((flags & SIZE_IN_WORDS) != 0) {
3024 Addu(scratch2, result, scratch2);
3026 Addu(scratch2, result, Operand(object_size));
3028 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
3031 if (emit_debug_code()) {
3033 Check(
eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3038 if ((flags & TAG_OBJECT) != 0) {
3044 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
3046 ExternalReference new_space_allocation_top =
3047 ExternalReference::new_space_allocation_top_address(isolate());
3053 li(scratch, Operand(new_space_allocation_top));
3055 Check(
less, kUndoAllocationOfNonAllocatedMemory,
3056 object, Operand(scratch));
3059 li(scratch, Operand(new_space_allocation_top));
3064 void MacroAssembler::AllocateTwoByteString(Register result,
3069 Label* gc_required) {
3073 sll(scratch1, length, 1);
3074 addiu(scratch1, scratch1,
3087 InitializeNewString(result,
3089 Heap::kStringMapRootIndex,
3095 void MacroAssembler::AllocateAsciiString(Register result,
3100 Label* gc_required) {
3117 InitializeNewString(result,
3119 Heap::kAsciiStringMapRootIndex,
3125 void MacroAssembler::AllocateTwoByteConsString(Register result,
3129 Label* gc_required) {
3130 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3132 InitializeNewString(result,
3134 Heap::kConsStringMapRootIndex,
3140 void MacroAssembler::AllocateAsciiConsString(Register result,
3144 Label* gc_required) {
3145 Label allocate_new_space, install_map;
3148 ExternalReference high_promotion_mode = ExternalReference::
3149 new_space_high_promotion_mode_active_address(isolate());
3150 li(scratch1, Operand(high_promotion_mode));
3152 Branch(&allocate_new_space,
eq, scratch1, Operand(zero_reg));
3154 Allocate(ConsString::kSize,
3163 bind(&allocate_new_space);
3164 Allocate(ConsString::kSize,
3173 InitializeNewString(result,
3175 Heap::kConsAsciiStringMapRootIndex,
3181 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3185 Label* gc_required) {
3186 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3189 InitializeNewString(result,
3191 Heap::kSlicedStringMapRootIndex,
3197 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3201 Label* gc_required) {
3202 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3205 InitializeNewString(result,
3207 Heap::kSlicedAsciiStringMapRootIndex,
3213 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3214 Label* not_unique_name) {
3218 Branch(&succeed,
eq, at, Operand(zero_reg));
3227 void MacroAssembler::AllocateHeapNumber(Register result,
3230 Register heap_number_map,
3235 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3239 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3243 sw(heap_number_map,
MemOperand(result, HeapObject::kMapOffset));
3248 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3252 Label* gc_required) {
3253 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3254 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3260 void MacroAssembler::CopyFields(Register dst,
3264 ASSERT((temps & dst.bit()) == 0);
3265 ASSERT((temps & src.bit()) == 0);
3271 if ((temps & (1 << i)) != 0) {
3278 for (
int i = 0; i < field_count; i++) {
3289 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3292 Branch(&byte_loop,
le, length, Operand(kPointerSize));
3293 bind(&align_loop_1);
3294 And(scratch, src, kPointerSize - 1);
3295 Branch(&word_loop,
eq, scratch, Operand(zero_reg));
3300 Subu(length, length, Operand(1));
3301 Branch(&align_loop_1,
ne, length, Operand(zero_reg));
3305 if (emit_debug_code()) {
3306 And(scratch, src, kPointerSize - 1);
3307 Assert(
eq, kExpectingAlignmentForCopyBytes,
3308 scratch, Operand(zero_reg));
3310 Branch(&byte_loop,
lt, length, Operand(kPointerSize));
3312 Addu(src, src, kPointerSize);
3317 srl(scratch, scratch, 8);
3319 srl(scratch, scratch, 8);
3321 srl(scratch, scratch, 8);
3325 Subu(length, length, Operand(kPointerSize));
3330 Branch(&done,
eq, length, Operand(zero_reg));
3336 Subu(length, length, Operand(1));
3337 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3342 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3343 Register end_offset,
3349 Addu(start_offset, start_offset, kPointerSize);
3351 Branch(&loop,
lt, start_offset, Operand(end_offset));
3355 void MacroAssembler::CheckFastElements(Register
map,
3363 Branch(fail, hi, scratch,
3364 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3368 void MacroAssembler::CheckFastObjectElements(Register map,
3376 Branch(fail,
ls, scratch,
3377 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3378 Branch(fail, hi, scratch,
3379 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3383 void MacroAssembler::CheckFastSmiElements(Register map,
3389 Branch(fail, hi, scratch,
3390 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3394 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3396 Register elements_reg,
3401 int elements_offset) {
3402 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3403 Register mantissa_reg = scratch2;
3404 Register exponent_reg = scratch3;
3407 JumpIfSmi(value_reg, &smi_value);
3412 Heap::kHeapNumberMapRootIndex,
3419 lw(exponent_reg,
FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3420 Branch(&maybe_nan,
ge, exponent_reg, Operand(scratch1));
3422 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3424 bind(&have_double_value);
3426 Addu(scratch1, scratch1, elements_reg);
3428 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3429 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3437 Branch(&is_nan,
gt, exponent_reg, Operand(scratch1));
3438 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3439 Branch(&have_double_value,
eq, mantissa_reg, Operand(zero_reg));
3442 LoadRoot(at, Heap::kNanValueRootIndex);
3445 jmp(&have_double_value);
3448 Addu(scratch1, elements_reg,
3452 Addu(scratch1, scratch1, scratch2);
3455 Register untagged_value = elements_reg;
3456 SmiUntag(untagged_value, value_reg);
3457 mtc1(untagged_value,
f2);
3464 void MacroAssembler::CompareMapAndBranch(Register
obj,
3467 Label* early_success,
3471 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3475 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3477 Label* early_success,
3480 Branch(branch_to, cond, obj_map, Operand(map));
3484 void MacroAssembler::CheckMap(Register obj,
3490 JumpIfSmi(obj, fail);
3493 CompareMapAndBranch(obj, scratch, map, &success,
ne, fail);
3498 void MacroAssembler::DispatchMap(Register obj,
3501 Handle<Code> success,
3505 JumpIfSmi(obj, &fail);
3508 Jump(success, RelocInfo::CODE_TARGET,
eq, scratch, Operand(map));
3513 void MacroAssembler::CheckMap(Register obj,
3515 Heap::RootListIndex index,
3519 JumpIfSmi(obj, fail);
3522 LoadRoot(at, index);
3523 Branch(fail,
ne, scratch, Operand(at));
3584 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
3585 const ParameterCount& actual,
3586 Handle<Code> code_constant,
3589 bool* definitely_mismatches,
3591 const CallWrapper& call_wrapper) {
3592 bool definitely_matches =
false;
3593 *definitely_mismatches =
false;
3594 Label regular_invoke;
3605 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3606 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3607 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(a3));
3609 if (expected.is_immediate()) {
3610 ASSERT(actual.is_immediate());
3611 if (expected.immediate() == actual.immediate()) {
3612 definitely_matches =
true;
3614 li(a0, Operand(actual.immediate()));
3615 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3616 if (expected.immediate() == sentinel) {
3621 definitely_matches =
true;
3623 *definitely_mismatches =
true;
3624 li(a2, Operand(expected.immediate()));
3627 }
else if (actual.is_immediate()) {
3628 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.immediate()));
3629 li(a0, Operand(actual.immediate()));
3631 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.reg()));
3634 if (!definitely_matches) {
3635 if (!code_constant.is_null()) {
3636 li(a3, Operand(code_constant));
3640 Handle<Code> adaptor =
3641 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3643 call_wrapper.BeforeCall(CallSize(adaptor));
3645 call_wrapper.AfterCall();
3646 if (!*definitely_mismatches) {
3650 Jump(adaptor, RelocInfo::CODE_TARGET);
3652 bind(®ular_invoke);
3657 void MacroAssembler::InvokeCode(Register code,
3658 const ParameterCount& expected,
3659 const ParameterCount& actual,
3661 const CallWrapper& call_wrapper) {
3667 bool definitely_mismatches =
false;
3668 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3669 &done, &definitely_mismatches, flag,
3671 if (!definitely_mismatches) {
3673 call_wrapper.BeforeCall(CallSize(code));
3675 call_wrapper.AfterCall();
3687 void MacroAssembler::InvokeFunction(Register
function,
3688 const ParameterCount& actual,
3690 const CallWrapper& call_wrapper) {
3696 Register expected_reg = a2;
3697 Register code_reg = a3;
3699 lw(code_reg,
FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3703 SharedFunctionInfo::kFormalParameterCountOffset));
3707 ParameterCount expected(expected_reg);
3708 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3712 void MacroAssembler::InvokeFunction(Register
function,
3713 const ParameterCount& expected,
3714 const ParameterCount& actual,
3716 const CallWrapper& call_wrapper) {
3730 InvokeCode(a3, expected, actual, flag, call_wrapper);
3734 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
3735 const ParameterCount& expected,
3736 const ParameterCount& actual,
3738 const CallWrapper& call_wrapper) {
3740 InvokeFunction(a1, expected, actual, flag, call_wrapper);
3744 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3749 IsInstanceJSObjectType(map, scratch, fail);
3753 void MacroAssembler::IsInstanceJSObjectType(Register map,
3762 void MacroAssembler::IsObjectJSStringType(Register
object,
3770 Branch(fail,
ne, scratch, Operand(zero_reg));
3774 void MacroAssembler::IsObjectNameType(Register
object,
3787 void MacroAssembler::TryGetFunctionPrototype(Register
function,
3791 bool miss_on_bound_function) {
3793 JumpIfSmi(
function, miss);
3796 GetObjectType(
function, result, scratch);
3799 if (miss_on_bound_function) {
3804 And(scratch, scratch,
3805 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3806 Branch(miss,
ne, scratch, Operand(zero_reg));
3812 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3813 Branch(&non_instance,
ne, scratch, Operand(zero_reg));
3822 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3823 Branch(miss,
eq, result, Operand(t8));
3827 GetObjectType(result, scratch, scratch);
3828 Branch(&done,
ne, scratch, Operand(
MAP_TYPE));
3836 bind(&non_instance);
3844 void MacroAssembler::GetObjectType(Register
object,
3846 Register type_reg) {
3855 void MacroAssembler::CallStub(CodeStub* stub,
3856 TypeFeedbackId ast_id,
3861 ASSERT(AllowThisStubCall(stub));
3862 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3867 void MacroAssembler::TailCallStub(CodeStub* stub,
3872 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond,
r1,
r2, bd);
3876 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3877 return ref0.address() - ref1.address();
3881 void MacroAssembler::CallApiFunctionAndReturn(
3882 Register function_address,
3883 ExternalReference thunk_ref,
3887 ExternalReference next_address =
3888 ExternalReference::handle_scope_next_address(isolate());
3889 const int kNextOffset = 0;
3890 const int kLimitOffset = AddressOffset(
3891 ExternalReference::handle_scope_limit_address(isolate()),
3893 const int kLevelOffset = AddressOffset(
3894 ExternalReference::handle_scope_level_address(isolate()),
3897 ASSERT(function_address.is(a1) || function_address.is(a2));
3899 Label profiler_disabled;
3900 Label end_profiler_check;
3901 bool* is_profiling_flag =
3902 isolate()->cpu_profiler()->is_profiling_address();
3904 li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3906 Branch(&profiler_disabled,
eq, t9, Operand(zero_reg));
3909 li(t9, Operand(thunk_ref));
3910 jmp(&end_profiler_check);
3912 bind(&profiler_disabled);
3913 mov(t9, function_address);
3914 bind(&end_profiler_check);
3917 li(
s3, Operand(next_address));
3921 Addu(
s2,
s2, Operand(1));
3924 if (FLAG_log_timer_events) {
3925 FrameScope frame(
this, StackFrame::MANUAL);
3926 PushSafepointRegisters();
3927 PrepareCallCFunction(1, a0);
3928 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3929 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3930 PopSafepointRegisters();
3936 DirectCEntryStub stub;
3937 stub.GenerateCall(
this, t9);
3939 if (FLAG_log_timer_events) {
3940 FrameScope frame(
this, StackFrame::MANUAL);
3941 PushSafepointRegisters();
3942 PrepareCallCFunction(1, a0);
3943 li(a0, Operand(ExternalReference::isolate_address(isolate())));
3944 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3945 PopSafepointRegisters();
3948 Label promote_scheduled_exception;
3949 Label exception_handled;
3950 Label delete_allocated_handles;
3951 Label leave_exit_frame;
3952 Label return_value_loaded;
3955 lw(v0, return_value_operand);
3956 bind(&return_value_loaded);
3961 if (emit_debug_code()) {
3963 Check(
eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(
s2));
3965 Subu(
s2,
s2, Operand(1));
3968 Branch(&delete_allocated_handles,
ne,
s1, Operand(at));
3971 bind(&leave_exit_frame);
3972 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3973 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3975 Branch(&promote_scheduled_exception,
ne, t0, Operand(t1));
3976 bind(&exception_handled);
3978 bool restore_context = context_restore_operand !=
NULL;
3979 if (restore_context) {
3980 lw(
cp, *context_restore_operand);
3982 li(
s0, Operand(stack_space));
3985 bind(&promote_scheduled_exception);
3988 CallExternalReference(
3989 ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
3992 jmp(&exception_handled);
3995 bind(&delete_allocated_handles);
3999 PrepareCallCFunction(1,
s1);
4000 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4001 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4004 jmp(&leave_exit_frame);
4008 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4009 return has_frame_ || !stub->SometimesSetsUpAFrame();
4013 void MacroAssembler::IllegalOperation(
int num_arguments) {
4014 if (num_arguments > 0) {
4015 addiu(
sp,
sp, num_arguments * kPointerSize);
4017 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4021 void MacroAssembler::IndexFromHash(Register hash,
4028 (1 << String::kArrayIndexValueBits));
4032 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4037 void MacroAssembler::ObjectToDoubleFPURegister(Register
object,
4041 Register heap_number_map,
4047 JumpIfNotSmi(
object, ¬_smi);
4050 mtc1(scratch1, result);
4051 cvt_d_w(result, result);
4057 Branch(not_number,
ne, scratch1, Operand(heap_number_map));
4061 Register exponent = scratch1;
4062 Register mask_reg = scratch2;
4064 li(mask_reg, HeapNumber::kExponentMask);
4066 And(exponent, exponent, mask_reg);
4067 Branch(not_number,
eq, exponent, Operand(mask_reg));
4074 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4076 Register scratch1) {
4078 mtc1(scratch1, value);
4079 cvt_d_w(value, value);
4083 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4086 Register overflow_dst,
4088 ASSERT(!dst.is(overflow_dst));
4089 ASSERT(!dst.is(scratch));
4090 ASSERT(!overflow_dst.is(scratch));
4091 ASSERT(!overflow_dst.is(left));
4092 ASSERT(!overflow_dst.is(right));
4094 if (left.is(right) && dst.is(left)) {
4099 ASSERT(!overflow_dst.is(t9));
4106 addu(dst, left, right);
4107 xor_(scratch, dst, scratch);
4108 xor_(overflow_dst, dst, right);
4109 and_(overflow_dst, overflow_dst, scratch);
4110 }
else if (dst.is(right)) {
4111 mov(scratch, right);
4112 addu(dst, left, right);
4113 xor_(scratch, dst, scratch);
4114 xor_(overflow_dst, dst, left);
4115 and_(overflow_dst, overflow_dst, scratch);
4117 addu(dst, left, right);
4118 xor_(overflow_dst, dst, left);
4119 xor_(scratch, dst, right);
4120 and_(overflow_dst, scratch, overflow_dst);
4125 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4128 Register overflow_dst,
4130 ASSERT(!dst.is(overflow_dst));
4131 ASSERT(!dst.is(scratch));
4132 ASSERT(!overflow_dst.is(scratch));
4133 ASSERT(!overflow_dst.is(left));
4134 ASSERT(!overflow_dst.is(right));
4135 ASSERT(!scratch.is(left));
4136 ASSERT(!scratch.is(right));
4140 if (left.is(right)) {
4142 mov(overflow_dst, zero_reg);
4148 subu(dst, left, right);
4149 xor_(overflow_dst, dst, scratch);
4150 xor_(scratch, scratch, right);
4151 and_(overflow_dst, scratch, overflow_dst);
4152 }
else if (dst.is(right)) {
4153 mov(scratch, right);
4154 subu(dst, left, right);
4155 xor_(overflow_dst, dst, left);
4156 xor_(scratch, left, scratch);
4157 and_(overflow_dst, scratch, overflow_dst);
4159 subu(dst, left, right);
4160 xor_(overflow_dst, dst, left);
4161 xor_(scratch, left, right);
4162 and_(overflow_dst, scratch, overflow_dst);
4167 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
4175 if (f->nargs >= 0 && f->nargs != num_arguments) {
4176 IllegalOperation(num_arguments);
4184 PrepareCEntryArgs(num_arguments);
4185 PrepareCEntryFunction(ExternalReference(f, isolate()));
4186 CEntryStub stub(1, save_doubles);
4191 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
4194 PrepareCEntryArgs(num_arguments);
4195 PrepareCEntryFunction(ext);
4202 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
4209 PrepareCEntryArgs(num_arguments);
4210 JumpToExternalReference(ext);
4214 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4217 TailCallExternalReference(ExternalReference(fid, isolate()),
4223 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4225 PrepareCEntryFunction(builtin);
4227 Jump(stub.GetCode(isolate()),
4228 RelocInfo::CODE_TARGET,
4236 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
4238 const CallWrapper& call_wrapper) {
4242 GetBuiltinEntry(t9,
id);
4244 call_wrapper.BeforeCall(CallSize(t9));
4246 call_wrapper.AfterCall();
4254 void MacroAssembler::GetBuiltinFunction(Register target,
4255 Builtins::JavaScript
id) {
4257 lw(target,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4261 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
4265 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
4267 GetBuiltinFunction(a1,
id);
4273 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
4274 Register scratch1, Register scratch2) {
4275 if (FLAG_native_code_counters && counter->Enabled()) {
4276 li(scratch1, Operand(value));
4277 li(scratch2, Operand(ExternalReference(counter)));
4283 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4284 Register scratch1, Register scratch2) {
4286 if (FLAG_native_code_counters && counter->Enabled()) {
4287 li(scratch2, Operand(ExternalReference(counter)));
4289 Addu(scratch1, scratch1, Operand(value));
4295 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
4296 Register scratch1, Register scratch2) {
4298 if (FLAG_native_code_counters && counter->Enabled()) {
4299 li(scratch2, Operand(ExternalReference(counter)));
4301 Subu(scratch1, scratch1, Operand(value));
4311 Register rs, Operand rt) {
4312 if (emit_debug_code())
4313 Check(cc, reason, rs, rt);
4317 void MacroAssembler::AssertFastElements(Register elements) {
4318 if (emit_debug_code()) {
4319 ASSERT(!elements.is(at));
4323 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4324 Branch(&ok,
eq, elements, Operand(at));
4325 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4326 Branch(&ok,
eq, elements, Operand(at));
4327 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4328 Branch(&ok,
eq, elements, Operand(at));
4329 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4337 Register rs, Operand rt) {
4339 Branch(&L, cc, rs, rt);
4352 RecordComment(
"Abort message: ");
4356 if (FLAG_trap_on_abort) {
4362 li(a0, Operand(Smi::FromInt(reason)));
4369 CallRuntime(Runtime::kAbort, 1);
4371 CallRuntime(Runtime::kAbort, 1);
4374 if (is_trampoline_pool_blocked()) {
4380 static const int kExpectedAbortInstructions = 10;
4381 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4382 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4383 while (abort_instructions++ < kExpectedAbortInstructions) {
4390 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
4391 if (context_chain_length > 0) {
4393 lw(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4394 for (
int i = 1; i < context_chain_length; i++) {
4395 lw(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4406 void MacroAssembler::LoadTransitionedArrayMapConditional(
4409 Register map_in_out,
4411 Label* no_map_match) {
4414 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4415 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4420 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4421 size_t offset = expected_kind * kPointerSize +
4422 FixedArrayBase::kHeaderSize;
4424 Branch(no_map_match,
ne, map_in_out, Operand(at));
4427 offset = transitioned_kind * kPointerSize +
4428 FixedArrayBase::kHeaderSize;
4433 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
4436 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4439 GlobalObject::kNativeContextOffset));
4441 lw(
function,
MemOperand(
function, Context::SlotOffset(index)));
4445 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
4449 lw(map,
FieldMemOperand(
function, JSFunction::kPrototypeOrInitialMapOffset));
4450 if (emit_debug_code()) {
4452 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail,
DO_SMI_CHECK);
4455 Abort(kGlobalFunctionsMustHaveInitialMap);
4466 Addu(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4468 PredictableCodeSizeScope predictible_code_size_scope(
4469 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4472 if (isolate()->IsCodePreAgingActive()) {
4474 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4475 nop(Assembler::CODE_AGE_MARKER_NOP);
4479 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4486 Push(ra,
fp,
cp, a1);
4487 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4489 Addu(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4496 addiu(
sp,
sp, -5 * kPointerSize);
4497 li(t8, Operand(Smi::FromInt(type)));
4506 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4514 addiu(
sp,
sp, 2 * kPointerSize);
4518 void MacroAssembler::EnterExitFrame(
bool save_doubles,
4521 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4522 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4523 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4535 addiu(
sp,
sp, -4 * kPointerSize);
4538 addiu(
fp,
sp, 2 * kPointerSize);
4540 if (emit_debug_code()) {
4541 sw(zero_reg,
MemOperand(
fp, ExitFrameConstants::kSPOffset));
4546 sw(t8,
MemOperand(
fp, ExitFrameConstants::kCodeOffset));
4549 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4551 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4554 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4557 ASSERT(kDoubleSize == frame_alignment);
4558 if (frame_alignment > 0) {
4560 And(
sp,
sp, Operand(-frame_alignment));
4562 int space = FPURegister::kMaxNumRegisters *
kDoubleSize;
4563 Subu(
sp,
sp, Operand(space));
4565 for (
int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4566 FPURegister reg = FPURegister::from_code(i);
4574 ASSERT(stack_space >= 0);
4575 Subu(
sp,
sp, Operand((stack_space + 2) * kPointerSize));
4576 if (frame_alignment > 0) {
4578 And(
sp,
sp, Operand(-frame_alignment));
4583 addiu(at,
sp, kPointerSize);
4588 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
4589 Register argument_count,
4590 bool restore_context,
4596 for (
int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4597 FPURegister reg = FPURegister::from_code(i);
4598 ldc1(reg,
MemOperand(t8, i * kDoubleSize + kPointerSize));
4603 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4607 if (restore_context) {
4608 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4612 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4619 lw(ra,
MemOperand(
sp, ExitFrameConstants::kCallerPCOffset));
4621 if (argument_count.is_valid()) {
4634 void MacroAssembler::InitializeNewString(Register
string,
4636 Heap::RootListIndex map_index,
4638 Register scratch2) {
4640 LoadRoot(scratch2, map_index);
4642 li(scratch1, Operand(String::kEmptyHashField));
4648 int MacroAssembler::ActivationFrameAlignment() {
4649 #if V8_HOST_ARCH_MIPS
4654 return OS::ActivationFrameAlignment();
4655 #else // V8_HOST_ARCH_MIPS
4660 return FLAG_sim_stack_alignment;
4661 #endif // V8_HOST_ARCH_MIPS
4665 void MacroAssembler::AssertStackIsAligned() {
4666 if (emit_debug_code()) {
4667 const int frame_alignment = ActivationFrameAlignment();
4668 const int frame_alignment_mask = frame_alignment - 1;
4670 if (frame_alignment > kPointerSize) {
4671 Label alignment_as_expected;
4673 andi(at,
sp, frame_alignment_mask);
4674 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
4676 stop(
"Unexpected stack alignment");
4677 bind(&alignment_as_expected);
4683 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4686 Label* not_power_of_two_or_zero) {
4687 Subu(scratch, reg, Operand(1));
4689 scratch, Operand(zero_reg));
4690 and_(at, scratch, reg);
4691 Branch(not_power_of_two_or_zero,
ne, at, Operand(zero_reg));
4695 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register
overflow) {
4696 ASSERT(!reg.is(overflow));
4699 xor_(overflow, overflow, reg);
4703 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4705 Register overflow) {
4708 SmiTagCheckOverflow(dst, overflow);
4711 ASSERT(!dst.is(overflow));
4712 ASSERT(!src.is(overflow));
4714 xor_(overflow, dst, src);
4719 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4727 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4729 Label* non_smi_case) {
4734 void MacroAssembler::JumpIfSmi(Register value,
4740 Branch(bd, smi_label,
eq, scratch, Operand(zero_reg));
4743 void MacroAssembler::JumpIfNotSmi(Register value,
4744 Label* not_smi_label,
4749 Branch(bd, not_smi_label,
ne, scratch, Operand(zero_reg));
4753 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4755 Label* on_not_both_smi) {
4758 or_(at, reg1, reg2);
4759 JumpIfNotSmi(at, on_not_both_smi);
4763 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4765 Label* on_either_smi) {
4769 and_(at, reg1, reg2);
4770 JumpIfSmi(at, on_either_smi);
4774 void MacroAssembler::AssertNotSmi(Register
object) {
4775 if (emit_debug_code()) {
4778 Check(
ne, kOperandIsASmi, at, Operand(zero_reg));
4783 void MacroAssembler::AssertSmi(Register
object) {
4784 if (emit_debug_code()) {
4787 Check(
eq, kOperandIsASmi, at, Operand(zero_reg));
4792 void MacroAssembler::AssertString(Register
object) {
4793 if (emit_debug_code()) {
4796 Check(
ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4806 void MacroAssembler::AssertName(Register
object) {
4807 if (emit_debug_code()) {
4810 Check(
ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4820 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
4822 if (emit_debug_code()) {
4823 Label done_checking;
4824 AssertNotSmi(
object);
4825 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4826 Branch(&done_checking,
eq,
object, Operand(scratch));
4829 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4830 Assert(
eq, kExpectedUndefinedOrCell,
object, Operand(scratch));
4832 bind(&done_checking);
4837 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4838 if (emit_debug_code()) {
4840 LoadRoot(at, index);
4841 Check(
eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4846 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
4847 Register heap_number_map,
4849 Label* on_not_heap_number) {
4851 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4852 Branch(on_not_heap_number,
ne, scratch, Operand(heap_number_map));
4856 void MacroAssembler::LookupNumberStringCache(Register
object,
4863 Register number_string_cache = result;
4864 Register mask = scratch3;
4867 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4871 lw(mask,
FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4874 Addu(mask, mask, -1);
4881 Label load_result_from_cache;
4882 JumpIfSmi(
object, &is_smi);
4885 Heap::kHeapNumberMapRootIndex,
4893 lw(scratch2,
MemOperand(scratch1, kPointerSize));
4895 Xor(scratch1, scratch1, Operand(scratch2));
4896 And(scratch1, scratch1, Operand(mask));
4901 Addu(scratch1, number_string_cache, scratch1);
4903 Register probe = mask;
4905 JumpIfSmi(probe, not_found);
4912 Register scratch = scratch1;
4913 sra(scratch,
object, 1);
4914 And(scratch, mask, Operand(scratch));
4919 Addu(scratch, number_string_cache, scratch);
4923 Branch(not_found,
ne,
object, Operand(probe));
4926 bind(&load_result_from_cache);
4927 lw(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4929 IncrementCounter(isolate()->counters()->number_to_string_native(),
4936 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4949 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4957 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4964 And(scratch1, first, Operand(second));
4965 JumpIfSmi(scratch1, failure);
4966 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4974 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4980 const int kFlatAsciiStringMask =
4982 const int kFlatAsciiStringTag =
4984 ASSERT(kFlatAsciiStringTag <= 0xffff);
4985 andi(scratch1, first, kFlatAsciiStringMask);
4986 Branch(failure,
ne, scratch1, Operand(kFlatAsciiStringTag));
4987 andi(scratch2, second, kFlatAsciiStringMask);
4988 Branch(failure,
ne, scratch2, Operand(kFlatAsciiStringTag));
4992 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4995 const int kFlatAsciiStringMask =
4997 const int kFlatAsciiStringTag =
4999 And(scratch, type, Operand(kFlatAsciiStringMask));
5000 Branch(failure,
ne, scratch, Operand(kFlatAsciiStringTag));
5004 static const int kRegisterPassedArguments = 4;
5006 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
5007 int num_double_arguments) {
5008 int stack_passed_words = 0;
5009 num_reg_arguments += 2 * num_double_arguments;
5012 if (num_reg_arguments > kRegisterPassedArguments) {
5013 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5016 return stack_passed_words;
5020 void MacroAssembler::EmitSeqStringSetCharCheck(Register
string,
5024 uint32_t encoding_mask) {
5027 Check(
ne, kNonObject, at, Operand(zero_reg));
5033 li(scratch, Operand(encoding_mask));
5034 Check(
eq, kUnexpectedStringType, at, Operand(scratch));
5039 Label index_tag_ok, index_tag_bad;
5040 TrySmiTag(index, scratch, &index_tag_bad);
5041 Branch(&index_tag_ok);
5042 bind(&index_tag_bad);
5043 Abort(kIndexIsTooLarge);
5044 bind(&index_tag_ok);
5047 Check(
lt, kIndexIsTooLarge, index, Operand(at));
5049 ASSERT(Smi::FromInt(0) == 0);
5050 Check(
ge, kIndexIsNegative, index, Operand(zero_reg));
5052 SmiUntag(index, index);
5056 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
5057 int num_double_arguments,
5059 int frame_alignment = ActivationFrameAlignment();
5066 int stack_passed_arguments = CalculateStackPassedWords(
5067 num_reg_arguments, num_double_arguments);
5068 if (frame_alignment > kPointerSize) {
5072 Subu(
sp,
sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5074 And(
sp,
sp, Operand(-frame_alignment));
5075 sw(scratch,
MemOperand(
sp, stack_passed_arguments * kPointerSize));
5077 Subu(
sp,
sp, Operand(stack_passed_arguments * kPointerSize));
5082 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
5084 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5088 void MacroAssembler::CallCFunction(ExternalReference
function,
5089 int num_reg_arguments,
5090 int num_double_arguments) {
5091 li(t8, Operand(
function));
5092 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5096 void MacroAssembler::CallCFunction(Register
function,
5097 int num_reg_arguments,
5098 int num_double_arguments) {
5099 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
5103 void MacroAssembler::CallCFunction(ExternalReference
function,
5104 int num_arguments) {
5105 CallCFunction(
function, num_arguments, 0);
5109 void MacroAssembler::CallCFunction(Register
function,
5110 int num_arguments) {
5111 CallCFunction(
function, num_arguments, 0);
5115 void MacroAssembler::CallCFunctionHelper(Register
function,
5116 int num_reg_arguments,
5117 int num_double_arguments) {
5125 #if V8_HOST_ARCH_MIPS
5126 if (emit_debug_code()) {
5127 int frame_alignment = OS::ActivationFrameAlignment();
5128 int frame_alignment_mask = frame_alignment - 1;
5129 if (frame_alignment > kPointerSize) {
5131 Label alignment_as_expected;
5132 And(at,
sp, Operand(frame_alignment_mask));
5133 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
5136 stop(
"Unexpected alignment in CallCFunction");
5137 bind(&alignment_as_expected);
5140 #endif // V8_HOST_ARCH_MIPS
5146 if (!
function.is(t9)) {
5153 int stack_passed_arguments = CalculateStackPassedWords(
5154 num_reg_arguments, num_double_arguments);
5156 if (OS::ActivationFrameAlignment() > kPointerSize) {
5159 Addu(
sp,
sp, Operand(stack_passed_arguments *
sizeof(kPointerSize)));
5164 #undef BRANCH_ARGS_CHECK
5167 void MacroAssembler::PatchRelocatedValue(Register li_location,
5169 Register new_value) {
5172 if (emit_debug_code()) {
5174 Check(
eq, kTheInstructionToPatchShouldBeALui,
5175 scratch, Operand(
LUI));
5182 lw(scratch,
MemOperand(li_location, kInstrSize));
5184 if (emit_debug_code()) {
5186 Check(
eq, kTheInstructionToPatchShouldBeAnOri,
5187 scratch, Operand(
ORI));
5188 lw(scratch,
MemOperand(li_location, kInstrSize));
5191 sw(scratch,
MemOperand(li_location, kInstrSize));
5194 FlushICache(li_location, 2);
5197 void MacroAssembler::GetRelocatedValue(Register li_location,
5201 if (emit_debug_code()) {
5203 Check(
eq, kTheInstructionShouldBeALui,
5204 value, Operand(
LUI));
5211 lw(scratch,
MemOperand(li_location, kInstrSize));
5212 if (emit_debug_code()) {
5214 Check(
eq, kTheInstructionShouldBeAnOri,
5215 scratch, Operand(
ORI));
5216 lw(scratch,
MemOperand(li_location, kInstrSize));
5222 or_(value, value, scratch);
5226 void MacroAssembler::CheckPageFlag(
5231 Label* condition_met) {
5232 And(scratch,
object, Operand(~Page::kPageAlignmentMask));
5233 lw(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
5234 And(scratch, scratch, Operand(mask));
5235 Branch(condition_met, cc, scratch, Operand(zero_reg));
5239 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5241 Label* if_deprecated) {
5242 if (map->CanBeDeprecated()) {
5243 li(scratch, Operand(map));
5245 And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5246 Branch(if_deprecated,
ne, scratch, Operand(zero_reg));
5251 void MacroAssembler::JumpIfBlack(Register
object,
5255 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
5256 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5260 void MacroAssembler::HasColor(Register
object,
5261 Register bitmap_scratch,
5262 Register mask_scratch,
5269 GetMarkBits(
object, bitmap_scratch, mask_scratch);
5271 Label other_color, word_boundary;
5272 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5273 And(t8, t9, Operand(mask_scratch));
5274 Branch(&other_color, first_bit == 1 ?
eq :
ne, t8, Operand(zero_reg));
5276 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5277 Branch(&word_boundary,
eq, mask_scratch, Operand(zero_reg));
5278 And(t8, t9, Operand(mask_scratch));
5279 Branch(has_color, second_bit == 1 ? ne :
eq, t8, Operand(zero_reg));
5282 bind(&word_boundary);
5283 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5284 And(t9, t9, Operand(1));
5285 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5293 void MacroAssembler::JumpIfDataObject(Register value,
5295 Label* not_data_object) {
5297 Label is_data_object;
5299 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5300 Branch(&is_data_object, eq, t8, Operand(scratch));
5307 Branch(not_data_object, ne, t8, Operand(zero_reg));
5308 bind(&is_data_object);
5312 void MacroAssembler::GetMarkBits(Register addr_reg,
5313 Register bitmap_reg,
5314 Register mask_reg) {
5316 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5321 Addu(bitmap_reg, bitmap_reg, t8);
5323 sllv(mask_reg, t8, mask_reg);
5327 void MacroAssembler::EnsureNotWhite(
5329 Register bitmap_scratch,
5330 Register mask_scratch,
5331 Register load_scratch,
5332 Label* value_is_white_and_not_data) {
5334 GetMarkBits(value, bitmap_scratch, mask_scratch);
5337 ASSERT(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
5338 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5339 ASSERT(strcmp(Marking::kGreyBitPattern,
"11") == 0);
5340 ASSERT(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
5346 lw(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5347 And(t8, mask_scratch, load_scratch);
5348 Branch(&done, ne, t8, Operand(zero_reg));
5350 if (emit_debug_code()) {
5354 sll(t8, mask_scratch, 1);
5355 And(t8, load_scratch, t8);
5356 Branch(&ok, eq, t8, Operand(zero_reg));
5357 stop(
"Impossible marking bit pattern");
5363 Register map = load_scratch;
5364 Register length = load_scratch;
5365 Label is_data_object;
5369 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5372 Branch(&skip, ne, t8, Operand(map));
5373 li(length, HeapNumber::kSize);
5374 Branch(&is_data_object);
5383 Register instance_type = load_scratch;
5386 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5397 Branch(&skip, eq, t8, Operand(zero_reg));
5398 li(length, ExternalString::kSize);
5399 Branch(&is_data_object);
5413 Branch(&skip, eq, t8, Operand(zero_reg));
5420 bind(&is_data_object);
5423 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5424 Or(t8, t8, Operand(mask_scratch));
5425 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5427 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5428 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5429 Addu(t8, t8, Operand(length));
5430 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5442 RecordComment(
"Throw message: ");
5447 li(a0, Operand(Smi::FromInt(reason)));
5454 CallRuntime(Runtime::kHiddenThrowMessage, 1);
5456 CallRuntime(Runtime::kHiddenThrowMessage, 1);
5459 if (is_trampoline_pool_blocked()) {
5465 static const int kExpectedThrowMessageInstructions = 14;
5466 int throw_instructions = InstructionsGeneratedSince(&throw_start);
5467 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
5468 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
5475 void MacroAssembler::ThrowIf(
Condition cc,
5487 void MacroAssembler::LoadInstanceDescriptors(Register map,
5488 Register descriptors) {
5493 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5495 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5499 void MacroAssembler::EnumLength(Register dst, Register map) {
5502 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5506 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5507 Register empty_fixed_array_value = t2;
5508 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5518 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5527 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5535 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5538 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5539 Branch(call_runtime, ne, a2, Operand(at));
5543 Branch(&next, ne, a2, Operand(null_value));
5547 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5548 ASSERT(!output_reg.is(input_reg));
5550 li(output_reg, Operand(255));
5552 Branch(&done,
gt, input_reg, Operand(output_reg));
5555 mov(output_reg, zero_reg);
5556 mov(output_reg, input_reg);
5561 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5568 Move(temp_double_reg, 0.0);
5569 BranchF(&above_zero,
NULL,
gt, input_reg, temp_double_reg);
5572 mov(result_reg, zero_reg);
5577 Move(temp_double_reg, 255.0);
5578 BranchF(&in_bounds,
NULL,
le, input_reg, temp_double_reg);
5579 li(result_reg, Operand(255));
5584 cvt_w_d(temp_double_reg, input_reg);
5585 mfc1(result_reg, temp_double_reg);
5590 void MacroAssembler::TestJSArrayForAllocationMemento(
5591 Register receiver_reg,
5592 Register scratch_reg,
5593 Label* no_memento_found,
5595 Label* allocation_memento_present) {
5596 ExternalReference new_space_start =
5597 ExternalReference::new_space_start(isolate());
5598 ExternalReference new_space_allocation_top =
5599 ExternalReference::new_space_allocation_top_address(isolate());
5600 Addu(scratch_reg, receiver_reg,
5601 Operand(JSArray::kSize + AllocationMemento::kSize -
kHeapObjectTag));
5602 Branch(no_memento_found,
lt, scratch_reg, Operand(new_space_start));
5603 li(at, Operand(new_space_allocation_top));
5605 Branch(no_memento_found,
gt, scratch_reg, Operand(at));
5606 lw(scratch_reg,
MemOperand(scratch_reg, -AllocationMemento::kSize));
5607 if (allocation_memento_present) {
5608 Branch(allocation_memento_present, cond, scratch_reg,
5609 Operand(isolate()->factory()->allocation_memento_map()));
5621 if (reg1.is_valid()) regs |= reg1.bit();
5622 if (reg2.is_valid()) regs |= reg2.bit();
5623 if (reg3.is_valid()) regs |= reg3.bit();
5624 if (reg4.is_valid()) regs |= reg4.bit();
5625 if (reg5.is_valid()) regs |= reg5.bit();
5626 if (reg6.is_valid()) regs |= reg6.bit();
5628 for (
int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5629 Register candidate = Register::FromAllocationIndex(i);
5630 if (regs & candidate.bit())
continue;
5638 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5643 ASSERT(!scratch1.is(scratch0));
5644 Factory* factory = isolate()->factory();
5645 Register current = scratch0;
5649 Move(current,
object);
5655 Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
5658 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5662 bool AreAliased(Register r1, Register r2, Register
r3, Register
r4) {
5663 if (r1.is(r2))
return true;
5664 if (r1.is(r3))
return true;
5665 if (r1.is(r4))
return true;
5666 if (r2.is(r3))
return true;
5667 if (r2.is(r4))
return true;
5668 if (r3.is(r4))
return true;
5673 CodePatcher::CodePatcher(
byte* address,
int instructions)
5674 : address_(address),
5675 size_(instructions * Assembler::kInstrSize),
5676 masm_(
NULL, address, size_ + Assembler::kGap) {
5680 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5684 CodePatcher::~CodePatcher() {
5686 CPU::FlushICache(address_, size_);
5689 ASSERT(masm_.pc_ == address_ + size_);
5690 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5694 void CodePatcher::Emit(
Instr instr) {
5695 masm()->emit(instr);
5699 void CodePatcher::Emit(
Address addr) {
5700 masm()->emit(reinterpret_cast<Instr>(addr));
5704 void CodePatcher::ChangeBranchCondition(
Condition cond) {
5705 Instr instr = Assembler::instr_at(masm_.pc_);
5706 ASSERT(Assembler::IsBranch(instr));
5707 uint32_t opcode = Assembler::GetOpcodeField(instr);
5726 void MacroAssembler::TruncatingDiv(Register result,
5729 ASSERT(!dividend.is(result));
5730 ASSERT(!dividend.is(at));
5732 MultiplierAndShift ms(divisor);
5733 li(at, Operand(ms.multiplier()));
5734 Mult(dividend, Operand(at));
5736 if (divisor > 0 && ms.multiplier() < 0) {
5737 Addu(result, result, Operand(dividend));
5739 if (divisor < 0 && ms.multiplier() > 0) {
5740 Subu(result, result, Operand(dividend));
5742 if (ms.shift() > 0) sra(result, result, ms.shift());
5743 srl(at, dividend, 31);
5744 Addu(result, result, Operand(at));
5750 #endif // V8_TARGET_ARCH_MIPS
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const intptr_t kSmiTagMask
const intptr_t kDoubleAlignmentMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
const int kDoubleSizeLog2
TypeImpl< ZoneTypeConfig > Type
const uint32_t kIsNotInternalizedMask
#define ASSERT(condition)
const RegList kJSCallerSaved
const int kPointerSizeLog2
const uint32_t kStringRepresentationMask
const uint32_t kFCSRUnderflowFlagMask
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const intptr_t kObjectAlignmentMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const intptr_t kHeapObjectTagMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
kInstanceClassNameOffset flag
const uint32_t kNotStringTag
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
#define kLithiumScratchDouble
const uint32_t kFCSROverflowFlagMask
#define kNumSafepointSavedRegisters
const uint32_t kIsIndirectStringMask
const bool IsMipsSoftFloatABI
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool IsAligned(T value, U alignment)
const uint32_t kHoleNanLower32
int TenToThe(int exponent)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
#define UNIMPLEMENTED_MIPS()
const uint32_t kInternalizedTag
const uint32_t kFCSRInvalidOpFlagMask
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
const uint32_t kIsNotStringMask
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const char * GetBailoutReason(BailoutReason reason)
Handle< T > handle(T *t, Isolate *isolate)
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
const FPUControlRegister FCSR
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kSafepointRegisterStackIndexMap[kNumRegs]
const intptr_t kPointerAlignment
void CopyBytes(uint8_t *target, uint8_t *source)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
const uint32_t kOneByteStringTag
const uint32_t kIsIndirectStringTag
CheckForInexactConversion
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
Register ToRegister(int num)
#define kSafepointSavedRegisters
int NumberOfBitsSet(uint32_t x)
const intptr_t kDoubleAlignment
const uint32_t kFCSRInexactFlagMask
const uint32_t kStringEncodingMask
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)