32 #if defined(V8_TARGET_ARCH_MIPS)
43 : Assembler(arg_isolate, buffer, size),
44 generating_stub_(
false),
45 allow_stub_calls_(
true),
47 if (isolate() !=
NULL) {
48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
54 void MacroAssembler::LoadRoot(Register destination,
55 Heap::RootListIndex index) {
60 void MacroAssembler::LoadRoot(Register destination,
61 Heap::RootListIndex index,
63 Register src1,
const Operand& src2) {
69 void MacroAssembler::StoreRoot(Register source,
70 Heap::RootListIndex index) {
75 void MacroAssembler::StoreRoot(Register source,
76 Heap::RootListIndex index,
78 Register src1,
const Operand& src2) {
84 void MacroAssembler::LoadHeapObject(Register result,
85 Handle<HeapObject>
object) {
86 if (isolate()->heap()->InNewSpace(*
object)) {
87 Handle<JSGlobalPropertyCell> cell =
88 isolate()->factory()->NewJSGlobalPropertyCell(
object);
89 li(result, Operand(cell));
90 lw(result,
FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
92 li(result, Operand(
object));
98 void MacroAssembler::PushSafepointRegisters() {
103 if (num_unsaved > 0) {
110 void MacroAssembler::PopSafepointRegisters() {
113 if (num_unsaved > 0) {
119 void MacroAssembler::PushSafepointRegistersAndDoubles() {
120 PushSafepointRegisters();
121 Subu(
sp,
sp, Operand(FPURegister::kNumAllocatableRegisters *
kDoubleSize));
122 for (
int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123 FPURegister reg = FPURegister::FromAllocationIndex(i);
129 void MacroAssembler::PopSafepointRegistersAndDoubles() {
130 for (
int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131 FPURegister reg = FPURegister::FromAllocationIndex(i);
134 Addu(
sp,
sp, Operand(FPURegister::kNumAllocatableRegisters *
kDoubleSize));
135 PopSafepointRegisters();
139 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
141 sw(src, SafepointRegistersAndDoublesSlot(dst));
145 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146 sw(src, SafepointRegisterSlot(dst));
150 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151 lw(dst, SafepointRegisterSlot(src));
155 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
162 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
167 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
170 int doubles_size = FPURegister::kNumAllocatableRegisters *
kDoubleSize;
171 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
176 void MacroAssembler::InNewSpace(Register
object,
181 And(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
182 Branch(branch, cc, scratch,
183 Operand(ExternalReference::new_space_start(isolate())));
187 void MacroAssembler::RecordWriteField(
203 JumpIfSmi(value, &done);
211 if (emit_debug_code()) {
214 Branch(&ok,
eq, t8, Operand(zero_reg));
215 stop(
"Unaligned cell in write barrier");
224 remembered_set_action,
231 if (emit_debug_code()) {
232 li(value, Operand(BitCast<int32_t>(
kZapValue + 4)));
233 li(dst, Operand(BitCast<int32_t>(
kZapValue + 8)));
241 void MacroAssembler::RecordWrite(Register
object,
255 if (emit_debug_code()) {
258 eq,
"Wrong address or value passed to RecordWrite", at, Operand(value));
265 JumpIfSmi(value, &done);
270 MemoryChunk::kPointersToHereAreInterestingMask,
273 CheckPageFlag(
object,
275 MemoryChunk::kPointersFromHereAreInterestingMask,
283 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
293 if (emit_debug_code()) {
294 li(address, Operand(BitCast<int32_t>(
kZapValue + 12)));
295 li(value, Operand(BitCast<int32_t>(
kZapValue + 16)));
300 void MacroAssembler::RememberedSetHelper(Register
object,
304 RememberedSetFinalAction and_then) {
306 if (emit_debug_code()) {
308 JumpIfNotInNewSpace(
object, scratch, &ok);
309 stop(
"Remembered set pointer is in new space");
313 ExternalReference store_buffer =
314 ExternalReference::store_buffer_top(isolate());
315 li(t8, Operand(store_buffer));
324 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325 if (and_then == kFallThroughAtEnd) {
326 Branch(&done,
eq, t8, Operand(zero_reg));
328 ASSERT(and_then == kReturnAtEnd);
329 Ret(
eq, t8, Operand(zero_reg));
332 StoreBufferOverflowStub store_buffer_overflow =
333 StoreBufferOverflowStub(fp_mode);
334 CallStub(&store_buffer_overflow);
337 if (and_then == kReturnAtEnd) {
347 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
352 ASSERT(!holder_reg.is(scratch));
353 ASSERT(!holder_reg.is(at));
357 lw(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
360 Check(
ne,
"we should not have an empty lexical context",
361 scratch, Operand(zero_reg));
366 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
368 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
371 if (emit_debug_code()) {
376 LoadRoot(at, Heap::kNativeContextMapRootIndex);
377 Check(
eq,
"JSGlobalObject::native_context should be a native context.",
378 holder_reg, Operand(at));
383 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
384 Branch(&same_contexts,
eq, scratch, Operand(at));
387 if (emit_debug_code()) {
391 LoadRoot(at, Heap::kNullValueRootIndex);
392 Check(
ne,
"JSGlobalProxy::context() should not be null.",
393 holder_reg, Operand(at));
396 LoadRoot(at, Heap::kNativeContextMapRootIndex);
397 Check(
eq,
"JSGlobalObject::native_context should be a native context.",
398 holder_reg, Operand(at));
402 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
408 int token_offset = Context::kHeaderSize +
413 Branch(miss,
ne, scratch, Operand(at));
415 bind(&same_contexts);
419 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
421 LoadRoot(scratch, Heap::kHashSeedRootIndex);
425 xor_(reg0, reg0, scratch);
431 nor(scratch, reg0, zero_reg);
433 addu(reg0, scratch, at);
437 xor_(reg0, reg0, at);
441 addu(reg0, reg0, at);
445 xor_(reg0, reg0, at);
448 sll(scratch, reg0, 11);
450 addu(reg0, reg0, at);
451 addu(reg0, reg0, scratch);
455 xor_(reg0, reg0, at);
459 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
490 GetNumberHash(reg0, reg1);
493 lw(reg1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
495 Subu(reg1, reg1, Operand(1));
498 static const int kProbes = 4;
499 for (
int i = 0; i < kProbes; i++) {
504 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
506 and_(reg2, reg2, reg1);
509 ASSERT(SeededNumberDictionary::kEntrySize == 3);
511 addu(reg2, reg2, at);
515 addu(reg2, elements, at);
517 lw(at,
FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
518 if (i != kProbes - 1) {
519 Branch(&done,
eq, key, Operand(at));
521 Branch(miss,
ne, key, Operand(at));
528 const int kDetailsOffset =
529 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
531 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
532 Branch(miss,
ne, at, Operand(zero_reg));
535 const int kValueOffset =
536 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
544 void MacroAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
546 addu(rd, rs, rt.rm());
548 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
549 addiu(rd, rs, rt.imm32_);
560 void MacroAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
562 subu(rd, rs, rt.rm());
564 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
565 addiu(rd, rs, -rt.imm32_);
576 void MacroAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
582 mul(rd, rs, rt.rm());
598 void MacroAssembler::Mult(Register rs,
const Operand& rt) {
610 void MacroAssembler::Multu(Register rs,
const Operand& rt) {
622 void MacroAssembler::Div(Register rs,
const Operand& rt) {
634 void MacroAssembler::Divu(Register rs,
const Operand& rt) {
646 void MacroAssembler::And(Register rd, Register rs,
const Operand& rt) {
648 and_(rd, rs, rt.rm());
650 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
651 andi(rd, rs, rt.imm32_);
662 void MacroAssembler::Or(Register rd, Register rs,
const Operand& rt) {
664 or_(rd, rs, rt.rm());
666 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
667 ori(rd, rs, rt.imm32_);
678 void MacroAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
680 xor_(rd, rs, rt.rm());
682 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
683 xori(rd, rs, rt.imm32_);
694 void MacroAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
696 nor(rd, rs, rt.rm());
706 void MacroAssembler::Neg(Register rs,
const Operand& rt) {
711 xor_(rs, rt.rm(), at);
715 void MacroAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
717 slt(rd, rs, rt.rm());
719 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
720 slti(rd, rs, rt.imm32_);
731 void MacroAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
733 sltu(rd, rs, rt.rm());
735 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
736 sltiu(rd, rs, rt.imm32_);
747 void MacroAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
750 rotrv(rd, rs, rt.rm());
752 rotr(rd, rs, rt.imm32_);
756 subu(at, zero_reg, rt.rm());
758 srlv(rd, rs, rt.rm());
761 if (rt.imm32_ == 0) {
764 srl(at, rs, rt.imm32_);
765 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
774 void MacroAssembler::li(Register rd, Operand j,
LiFlags mode) {
776 BlockTrampolinePoolScope block_trampoline_pool(
this);
780 addiu(rd, zero_reg, j.imm32_);
781 }
else if (!(j.imm32_ &
kHiMask)) {
782 ori(rd, zero_reg, j.imm32_);
790 if (MustUseReg(j.rmode_)) {
791 RecordRelocInfo(j.rmode_, j.imm32_);
801 void MacroAssembler::MultiPush(
RegList regs) {
805 Subu(
sp,
sp, Operand(stack_offset));
807 if ((regs & (1 << i)) != 0) {
815 void MacroAssembler::MultiPushReversed(
RegList regs) {
819 Subu(
sp,
sp, Operand(stack_offset));
821 if ((regs & (1 << i)) != 0) {
829 void MacroAssembler::MultiPop(
RegList regs) {
833 if ((regs & (1 << i)) != 0) {
838 addiu(
sp,
sp, stack_offset);
842 void MacroAssembler::MultiPopReversed(
RegList regs) {
845 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
846 if ((regs & (1 << i)) != 0) {
851 addiu(
sp,
sp, stack_offset);
855 void MacroAssembler::MultiPushFPU(
RegList regs) {
856 CpuFeatures::Scope scope(
FPU);
860 Subu(
sp,
sp, Operand(stack_offset));
861 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
862 if ((regs & (1 << i)) != 0) {
864 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
870 void MacroAssembler::MultiPushReversedFPU(
RegList regs) {
871 CpuFeatures::Scope scope(
FPU);
875 Subu(
sp,
sp, Operand(stack_offset));
877 if ((regs & (1 << i)) != 0) {
879 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
885 void MacroAssembler::MultiPopFPU(
RegList regs) {
886 CpuFeatures::Scope scope(
FPU);
890 if ((regs & (1 << i)) != 0) {
891 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
895 addiu(
sp,
sp, stack_offset);
899 void MacroAssembler::MultiPopReversedFPU(
RegList regs) {
900 CpuFeatures::Scope scope(
FPU);
903 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
904 if ((regs & (1 << i)) != 0) {
905 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
909 addiu(
sp,
sp, stack_offset);
913 void MacroAssembler::FlushICache(Register address,
unsigned instructions) {
915 MultiPush(saved_regs);
916 AllowExternalCallThatCantCauseGC scope(
this);
920 PrepareCallCFunction(2, t0);
922 li(a1, instructions * kInstrSize);
923 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
924 MultiPop(saved_regs);
928 void MacroAssembler::Ext(Register rt,
936 ext_(rt, rs, pos, size);
940 int shift_left = 32 - (pos + size);
941 sll(rt, rs, shift_left);
943 int shift_right = 32 - size;
944 if (shift_right > 0) {
945 srl(rt, rt, shift_right);
951 void MacroAssembler::Ins(Register rt,
960 ins_(rt, rs, pos, size);
962 ASSERT(!rt.is(t8) && !rs.is(t8));
963 Subu(at, zero_reg, Operand(1));
964 srl(at, at, 32 - size);
968 nor(at, at, zero_reg);
975 void MacroAssembler::Cvt_d_uw(FPURegister fd,
977 FPURegister scratch) {
980 Cvt_d_uw(fd, t8, scratch);
984 void MacroAssembler::Cvt_d_uw(FPURegister fd,
986 FPURegister scratch) {
1005 Label conversion_done;
1009 Branch(&conversion_done,
eq, t9, Operand(zero_reg));
1013 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1014 mtc1(zero_reg, scratch);
1016 add_d(fd, fd, scratch);
1018 bind(&conversion_done);
1022 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1024 FPURegister scratch) {
1025 Trunc_uw_d(fs, t8, scratch);
1029 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1030 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1031 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1033 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1039 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1040 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1041 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1043 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1050 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1051 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1052 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1054 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1061 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1062 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1063 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1065 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1072 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1074 FPURegister scratch) {
1080 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1081 mtc1(zero_reg, scratch);
1084 Label simple_convert;
1085 BranchF(&simple_convert,
NULL,
lt, fd, scratch);
1089 sub_d(scratch, fd, scratch);
1090 trunc_w_d(scratch, scratch);
1092 Or(rs, rs, 1 << 31);
1097 bind(&simple_convert);
1098 trunc_w_d(scratch, fd);
1105 void MacroAssembler::BranchF(Label* target,
1119 c(
UN,
D, cmp1, cmp2);
1130 c(
OLT,
D, cmp1, cmp2);
1135 c(
ULE,
D, cmp1, cmp2);
1140 c(
ULT,
D, cmp1, cmp2);
1145 c(
OLE,
D, cmp1, cmp2);
1149 c(
EQ,
D, cmp1, cmp2);
1153 c(
EQ,
D, cmp1, cmp2);
1167 void MacroAssembler::Move(FPURegister dst,
double imm) {
1169 static const DoubleRepresentation minus_zero(-0.0);
1170 static const DoubleRepresentation
zero(0.0);
1171 DoubleRepresentation value(imm);
1174 if (value.bits ==
zero.bits && !force_load) {
1176 }
else if (value.bits == minus_zero.bits && !force_load) {
1180 DoubleAsTwoUInt32(imm, &lo, &hi);
1184 li(at, Operand(lo));
1187 mtc1(zero_reg, dst);
1192 li(at, Operand(hi));
1193 mtc1(at, dst.high());
1195 mtc1(zero_reg, dst.high());
1201 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1204 Branch(&done,
ne, rt, Operand(zero_reg));
1213 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1216 Branch(&done,
eq, rt, Operand(zero_reg));
1225 void MacroAssembler::Movt(Register rd, Register rs,
uint16_t cc) {
1230 ASSERT(!(rs.is(t8) || rd.is(t8)));
1232 Register scratch = t8;
1236 cfc1(scratch,
FCSR);
1240 srl(scratch, scratch, 16);
1241 andi(scratch, scratch, 0x0080);
1242 Branch(&done,
eq, scratch, Operand(zero_reg));
1251 void MacroAssembler::Movf(Register rd, Register rs,
uint16_t cc) {
1256 ASSERT(!(rs.is(t8) || rd.is(t8)));
1258 Register scratch = t8;
1262 cfc1(scratch,
FCSR);
1266 srl(scratch, scratch, 16);
1267 andi(scratch, scratch, 0x0080);
1268 Branch(&done,
ne, scratch, Operand(zero_reg));
1277 void MacroAssembler::Clz(Register rd, Register rs) {
1279 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1281 Register scratch = t9;
1287 and_(scratch, at, mask);
1288 Branch(&end,
ne, scratch, Operand(zero_reg));
1304 void MacroAssembler::ConvertToInt32(Register source,
1308 FPURegister double_scratch,
1310 Label right_exponent, done;
1314 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1317 mov(dest, zero_reg);
1322 const uint32_t non_smi_exponent =
1323 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1325 Branch(&right_exponent,
eq, scratch2, Operand(non_smi_exponent));
1328 Branch(not_int32,
gt, scratch2, Operand(non_smi_exponent));
1333 const uint32_t zero_exponent =
1334 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1335 Subu(scratch2, scratch2, Operand(zero_exponent));
1337 Branch(&done,
lt, scratch2, Operand(zero_reg));
1338 if (!CpuFeatures::IsSupported(
FPU)) {
1340 srl(dest, scratch2, HeapNumber::kExponentShift);
1343 li(at, Operand(30));
1344 subu(dest, at, dest);
1346 bind(&right_exponent);
1347 if (CpuFeatures::IsSupported(
FPU)) {
1348 CpuFeatures::Scope scope(
FPU);
1353 lwc1(double_scratch,
FieldMemOperand(source, HeapNumber::kMantissaOffset));
1354 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1355 trunc_w_d(double_scratch, double_scratch);
1356 mfc1(dest, double_scratch);
1360 And(scratch2, scratch, Operand(0x80000000));
1361 Or(dest, dest, Operand(scratch2));
1363 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1370 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1371 sll(scratch, scratch, shift_distance + 1);
1372 srl(scratch, scratch, 1);
1380 const int field_width = shift_distance;
1381 Ext(scratch2, scratch2, 32-shift_distance, field_width);
1382 Ins(scratch, scratch2, 0, field_width);
1384 srlv(scratch, scratch, dest);
1386 subu(scratch2, zero_reg, scratch);
1390 Movz(scratch, scratch2, dest);
1401 Register except_flag,
1404 CpuFeatures::Scope scope(
FPU);
1414 cfc1(scratch1,
FCSR);
1416 ctc1(zero_reg,
FCSR);
1419 switch (rounding_mode) {
1421 Round_w_d(result, double_input);
1424 Trunc_w_d(result, double_input);
1427 Ceil_w_d(result, double_input);
1430 Floor_w_d(result, double_input);
1435 cfc1(except_flag,
FCSR);
1437 ctc1(scratch1,
FCSR);
1440 And(except_flag, except_flag, Operand(except_mask));
1444 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1445 Register input_high,
1448 Label done, normal_exponent, restore_sign;
1452 HeapNumber::kExponentShift,
1453 HeapNumber::kExponentBits);
1456 Subu(scratch, result, HeapNumber::kExponentMask);
1457 Movz(result, zero_reg, scratch);
1458 Branch(&done,
eq, scratch, Operand(zero_reg));
1463 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1467 Branch(&normal_exponent,
le, result, Operand(zero_reg));
1468 mov(result, zero_reg);
1471 bind(&normal_exponent);
1472 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1474 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1477 Register
sign = result;
1483 Label high_shift_needed, high_shift_done;
1484 Branch(&high_shift_needed,
lt, scratch, Operand(32));
1485 mov(input_high, zero_reg);
1486 Branch(&high_shift_done);
1487 bind(&high_shift_needed);
1492 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1496 sllv(input_high, input_high, scratch);
1498 bind(&high_shift_done);
1501 Label pos_shift, shift_done;
1503 subu(scratch, at, scratch);
1504 Branch(&pos_shift,
ge, scratch, Operand(zero_reg));
1507 Subu(scratch, zero_reg, scratch);
1508 sllv(input_low, input_low, scratch);
1509 Branch(&shift_done);
1512 srlv(input_low, input_low, scratch);
1515 Or(input_high, input_high, Operand(input_low));
1520 Subu(result, zero_reg, input_high);
1521 Movz(result, input_high, scratch);
1526 void MacroAssembler::EmitECMATruncate(Register result,
1527 FPURegister double_input,
1528 FPURegister single_scratch,
1531 Register scratch3) {
1532 CpuFeatures::Scope scope(
FPU);
1533 ASSERT(!scratch2.is(result));
1534 ASSERT(!scratch3.is(result));
1535 ASSERT(!scratch3.is(scratch2));
1536 ASSERT(!scratch.is(result) &&
1537 !scratch.is(scratch2) &&
1538 !scratch.is(scratch3));
1539 ASSERT(!single_scratch.is(double_input));
1545 cfc1(scratch2,
FCSR);
1546 ctc1(zero_reg,
FCSR);
1548 trunc_w_d(single_scratch, double_input);
1549 mfc1(result, single_scratch);
1551 cfc1(scratch,
FCSR);
1552 ctc1(scratch2,
FCSR);
1558 Branch(&done,
eq, scratch, Operand(zero_reg));
1561 Register input_high = scratch2;
1562 Register input_low = scratch3;
1563 Move(input_low, input_high, double_input);
1564 EmitOutOfInt32RangeTruncate(result,
1572 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1574 int num_least_bits) {
1579 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1581 int num_least_bits) {
1582 And(dst, src, Operand((1 << num_least_bits) - 1));
1589 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1590 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1591 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1595 BranchShort(offset, bdslot);
1602 BranchShort(offset, cond, rs, rt, bdslot);
1607 if (L->is_bound()) {
1609 BranchShort(L, bdslot);
1614 if (is_trampoline_emitted()) {
1617 BranchShort(L, bdslot);
1623 void MacroAssembler::Branch(Label* L,
Condition cond, Register rs,
1626 if (L->is_bound()) {
1628 BranchShort(L, cond, rs, rt, bdslot);
1632 BranchShort(&skip, neg_cond, rs, rt);
1637 if (is_trampoline_emitted()) {
1640 BranchShort(&skip, neg_cond, rs, rt);
1644 BranchShort(L, cond, rs, rt, bdslot);
1650 void MacroAssembler::Branch(Label* L,
1653 Heap::RootListIndex index,
1655 LoadRoot(at, index);
1656 Branch(L, cond, rs, Operand(at), bdslot);
1669 void MacroAssembler::BranchShort(
int16_t offset,
Condition cond, Register rs,
1672 BRANCH_ARGS_CHECK(cond, rs, rt);
1673 ASSERT(!rs.is(zero_reg));
1675 Register scratch = at;
1686 beq(rs, r2, offset);
1689 bne(rs, r2, offset);
1693 if (r2.is(zero_reg)) {
1696 slt(scratch, r2, rs);
1697 bne(scratch, zero_reg, offset);
1701 if (r2.is(zero_reg)) {
1704 slt(scratch, rs, r2);
1705 beq(scratch, zero_reg, offset);
1709 if (r2.is(zero_reg)) {
1712 slt(scratch, rs, r2);
1713 bne(scratch, zero_reg, offset);
1717 if (r2.is(zero_reg)) {
1720 slt(scratch, r2, rs);
1721 beq(scratch, zero_reg, offset);
1726 if (r2.is(zero_reg)) {
1729 sltu(scratch, r2, rs);
1730 bne(scratch, zero_reg, offset);
1734 if (r2.is(zero_reg)) {
1737 sltu(scratch, rs, r2);
1738 beq(scratch, zero_reg, offset);
1742 if (r2.is(zero_reg)) {
1746 sltu(scratch, rs, r2);
1747 bne(scratch, zero_reg, offset);
1751 if (r2.is(zero_reg)) {
1754 sltu(scratch, r2, rs);
1755 beq(scratch, zero_reg, offset);
1774 beq(rs, r2, offset);
1781 bne(rs, r2, offset);
1785 if (rt.imm32_ == 0) {
1790 slt(scratch, r2, rs);
1791 bne(scratch, zero_reg, offset);
1795 if (rt.imm32_ == 0) {
1798 slti(scratch, rs, rt.imm32_);
1799 beq(scratch, zero_reg, offset);
1803 slt(scratch, rs, r2);
1804 beq(scratch, zero_reg, offset);
1808 if (rt.imm32_ == 0) {
1811 slti(scratch, rs, rt.imm32_);
1812 bne(scratch, zero_reg, offset);
1816 slt(scratch, rs, r2);
1817 bne(scratch, zero_reg, offset);
1821 if (rt.imm32_ == 0) {
1826 slt(scratch, r2, rs);
1827 beq(scratch, zero_reg, offset);
1832 if (rt.imm32_ == 0) {
1837 sltu(scratch, r2, rs);
1838 bne(scratch, zero_reg, offset);
1842 if (rt.imm32_ == 0) {
1845 sltiu(scratch, rs, rt.imm32_);
1846 beq(scratch, zero_reg, offset);
1850 sltu(scratch, rs, r2);
1851 beq(scratch, zero_reg, offset);
1855 if (rt.imm32_ == 0) {
1859 sltiu(scratch, rs, rt.imm32_);
1860 bne(scratch, zero_reg, offset);
1864 sltu(scratch, rs, r2);
1865 bne(scratch, zero_reg, offset);
1869 if (rt.imm32_ == 0) {
1874 sltu(scratch, r2, rs);
1875 beq(scratch, zero_reg, offset);
1892 b(shifted_branch_offset(L,
false));
1900 void MacroAssembler::BranchShort(Label* L,
Condition cond, Register rs,
1903 BRANCH_ARGS_CHECK(cond, rs, rt);
1907 Register scratch = at;
1915 offset = shifted_branch_offset(L,
false);
1919 offset = shifted_branch_offset(L,
false);
1920 beq(rs, r2, offset);
1923 offset = shifted_branch_offset(L,
false);
1924 bne(rs, r2, offset);
1928 if (r2.is(zero_reg)) {
1929 offset = shifted_branch_offset(L,
false);
1932 slt(scratch, r2, rs);
1933 offset = shifted_branch_offset(L,
false);
1934 bne(scratch, zero_reg, offset);
1938 if (r2.is(zero_reg)) {
1939 offset = shifted_branch_offset(L,
false);
1942 slt(scratch, rs, r2);
1943 offset = shifted_branch_offset(L,
false);
1944 beq(scratch, zero_reg, offset);
1948 if (r2.is(zero_reg)) {
1949 offset = shifted_branch_offset(L,
false);
1952 slt(scratch, rs, r2);
1953 offset = shifted_branch_offset(L,
false);
1954 bne(scratch, zero_reg, offset);
1958 if (r2.is(zero_reg)) {
1959 offset = shifted_branch_offset(L,
false);
1962 slt(scratch, r2, rs);
1963 offset = shifted_branch_offset(L,
false);
1964 beq(scratch, zero_reg, offset);
1969 if (r2.is(zero_reg)) {
1970 offset = shifted_branch_offset(L,
false);
1973 sltu(scratch, r2, rs);
1974 offset = shifted_branch_offset(L,
false);
1975 bne(scratch, zero_reg, offset);
1979 if (r2.is(zero_reg)) {
1980 offset = shifted_branch_offset(L,
false);
1983 sltu(scratch, rs, r2);
1984 offset = shifted_branch_offset(L,
false);
1985 beq(scratch, zero_reg, offset);
1989 if (r2.is(zero_reg)) {
1993 sltu(scratch, rs, r2);
1994 offset = shifted_branch_offset(L,
false);
1995 bne(scratch, zero_reg, offset);
1999 if (r2.is(zero_reg)) {
2000 offset = shifted_branch_offset(L,
false);
2003 sltu(scratch, r2, rs);
2004 offset = shifted_branch_offset(L,
false);
2005 beq(scratch, zero_reg, offset);
2017 offset = shifted_branch_offset(L,
false);
2024 offset = shifted_branch_offset(L,
false);
2025 beq(rs, r2, offset);
2031 offset = shifted_branch_offset(L,
false);
2032 bne(rs, r2, offset);
2036 if (rt.imm32_ == 0) {
2037 offset = shifted_branch_offset(L,
false);
2043 slt(scratch, r2, rs);
2044 offset = shifted_branch_offset(L,
false);
2045 bne(scratch, zero_reg, offset);
2049 if (rt.imm32_ == 0) {
2050 offset = shifted_branch_offset(L,
false);
2053 slti(scratch, rs, rt.imm32_);
2054 offset = shifted_branch_offset(L,
false);
2055 beq(scratch, zero_reg, offset);
2060 slt(scratch, rs, r2);
2061 offset = shifted_branch_offset(L,
false);
2062 beq(scratch, zero_reg, offset);
2066 if (rt.imm32_ == 0) {
2067 offset = shifted_branch_offset(L,
false);
2070 slti(scratch, rs, rt.imm32_);
2071 offset = shifted_branch_offset(L,
false);
2072 bne(scratch, zero_reg, offset);
2077 slt(scratch, rs, r2);
2078 offset = shifted_branch_offset(L,
false);
2079 bne(scratch, zero_reg, offset);
2083 if (rt.imm32_ == 0) {
2084 offset = shifted_branch_offset(L,
false);
2090 slt(scratch, r2, rs);
2091 offset = shifted_branch_offset(L,
false);
2092 beq(scratch, zero_reg, offset);
2097 if (rt.imm32_ == 0) {
2098 offset = shifted_branch_offset(L,
false);
2104 sltu(scratch, r2, rs);
2105 offset = shifted_branch_offset(L,
false);
2106 bne(scratch, zero_reg, offset);
2110 if (rt.imm32_ == 0) {
2111 offset = shifted_branch_offset(L,
false);
2114 sltiu(scratch, rs, rt.imm32_);
2115 offset = shifted_branch_offset(L,
false);
2116 beq(scratch, zero_reg, offset);
2121 sltu(scratch, rs, r2);
2122 offset = shifted_branch_offset(L,
false);
2123 beq(scratch, zero_reg, offset);
2127 if (rt.imm32_ == 0) {
2131 sltiu(scratch, rs, rt.imm32_);
2132 offset = shifted_branch_offset(L,
false);
2133 bne(scratch, zero_reg, offset);
2138 sltu(scratch, rs, r2);
2139 offset = shifted_branch_offset(L,
false);
2140 bne(scratch, zero_reg, offset);
2144 if (rt.imm32_ == 0) {
2145 offset = shifted_branch_offset(L,
false);
2151 sltu(scratch, r2, rs);
2152 offset = shifted_branch_offset(L,
false);
2153 beq(scratch, zero_reg, offset);
2169 BranchAndLinkShort(offset, bdslot);
2173 void MacroAssembler::BranchAndLink(
int16_t offset,
Condition cond, Register rs,
2176 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2180 void MacroAssembler::BranchAndLink(Label* L,
BranchDelaySlot bdslot) {
2181 if (L->is_bound()) {
2183 BranchAndLinkShort(L, bdslot);
2188 if (is_trampoline_emitted()) {
2191 BranchAndLinkShort(L, bdslot);
2197 void MacroAssembler::BranchAndLink(Label* L,
Condition cond, Register rs,
2200 if (L->is_bound()) {
2202 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2206 BranchShort(&skip, neg_cond, rs, rt);
2211 if (is_trampoline_emitted()) {
2214 BranchShort(&skip, neg_cond, rs, rt);
2218 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2227 void MacroAssembler::BranchAndLinkShort(
int16_t offset,
2238 Register rs,
const Operand& rt,
2240 BRANCH_ARGS_CHECK(cond, rs, rt);
2242 Register scratch = at;
2268 slt(scratch, r2, rs);
2269 addiu(scratch, scratch, -1);
2270 bgezal(scratch, offset);
2273 slt(scratch, rs, r2);
2274 addiu(scratch, scratch, -1);
2275 bltzal(scratch, offset);
2278 slt(scratch, rs, r2);
2279 addiu(scratch, scratch, -1);
2280 bgezal(scratch, offset);
2283 slt(scratch, r2, rs);
2284 addiu(scratch, scratch, -1);
2285 bltzal(scratch, offset);
2290 sltu(scratch, r2, rs);
2291 addiu(scratch, scratch, -1);
2292 bgezal(scratch, offset);
2295 sltu(scratch, rs, r2);
2296 addiu(scratch, scratch, -1);
2297 bltzal(scratch, offset);
2300 sltu(scratch, rs, r2);
2301 addiu(scratch, scratch, -1);
2302 bgezal(scratch, offset);
2305 sltu(scratch, r2, rs);
2306 addiu(scratch, scratch, -1);
2307 bltzal(scratch, offset);
2319 void MacroAssembler::BranchAndLinkShort(Label* L,
BranchDelaySlot bdslot) {
2320 bal(shifted_branch_offset(L,
false));
2328 void MacroAssembler::BranchAndLinkShort(Label* L,
Condition cond, Register rs,
2331 BRANCH_ARGS_CHECK(cond, rs, rt);
2335 Register scratch = at;
2345 offset = shifted_branch_offset(L,
false);
2351 offset = shifted_branch_offset(L,
false);
2357 offset = shifted_branch_offset(L,
false);
2363 slt(scratch, r2, rs);
2364 addiu(scratch, scratch, -1);
2365 offset = shifted_branch_offset(L,
false);
2366 bgezal(scratch, offset);
2369 slt(scratch, rs, r2);
2370 addiu(scratch, scratch, -1);
2371 offset = shifted_branch_offset(L,
false);
2372 bltzal(scratch, offset);
2375 slt(scratch, rs, r2);
2376 addiu(scratch, scratch, -1);
2377 offset = shifted_branch_offset(L,
false);
2378 bgezal(scratch, offset);
2381 slt(scratch, r2, rs);
2382 addiu(scratch, scratch, -1);
2383 offset = shifted_branch_offset(L,
false);
2384 bltzal(scratch, offset);
2389 sltu(scratch, r2, rs);
2390 addiu(scratch, scratch, -1);
2391 offset = shifted_branch_offset(L,
false);
2392 bgezal(scratch, offset);
2395 sltu(scratch, rs, r2);
2396 addiu(scratch, scratch, -1);
2397 offset = shifted_branch_offset(L,
false);
2398 bltzal(scratch, offset);
2401 sltu(scratch, rs, r2);
2402 addiu(scratch, scratch, -1);
2403 offset = shifted_branch_offset(L,
false);
2404 bgezal(scratch, offset);
2407 sltu(scratch, r2, rs);
2408 addiu(scratch, scratch, -1);
2409 offset = shifted_branch_offset(L,
false);
2410 bltzal(scratch, offset);
2426 void MacroAssembler::Jump(Register target,
2431 BlockTrampolinePoolScope block_trampoline_pool(
this);
2435 BRANCH_ARGS_CHECK(cond, rs, rt);
2445 void MacroAssembler::Jump(intptr_t target,
2446 RelocInfo::Mode rmode,
2457 li(t9, Operand(target, rmode));
2458 Jump(t9,
al, zero_reg, Operand(zero_reg), bd);
2463 void MacroAssembler::Jump(
Address target,
2464 RelocInfo::Mode rmode,
2469 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2470 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2474 void MacroAssembler::Jump(Handle<Code>
code,
2475 RelocInfo::Mode rmode,
2480 ASSERT(RelocInfo::IsCodeTarget(rmode));
2481 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2485 int MacroAssembler::CallSize(Register target,
2501 return size * kInstrSize;
2506 void MacroAssembler::Call(Register target,
2511 BlockTrampolinePoolScope block_trampoline_pool(
this);
2517 BRANCH_ARGS_CHECK(cond, rs, rt);
2525 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2526 SizeOfCodeGeneratedSince(&start));
2530 int MacroAssembler::CallSize(
Address target,
2531 RelocInfo::Mode rmode,
2536 int size = CallSize(t9, cond, rs, rt, bd);
2537 return size + 2 * kInstrSize;
2541 void MacroAssembler::Call(
Address target,
2542 RelocInfo::Mode rmode,
2547 BlockTrampolinePoolScope block_trampoline_pool(
this);
2553 positions_recorder()->WriteRecordedPositions();
2555 Call(t9, cond, rs, rt, bd);
2556 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2557 SizeOfCodeGeneratedSince(&start));
2561 int MacroAssembler::CallSize(Handle<Code> code,
2562 RelocInfo::Mode rmode,
2563 TypeFeedbackId ast_id,
2568 return CallSize(reinterpret_cast<Address>(code.location()),
2569 rmode, cond, rs, rt, bd);
2573 void MacroAssembler::Call(Handle<Code> code,
2574 RelocInfo::Mode rmode,
2575 TypeFeedbackId ast_id,
2580 BlockTrampolinePoolScope block_trampoline_pool(
this);
2583 ASSERT(RelocInfo::IsCodeTarget(rmode));
2584 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2585 SetRecordedAstId(ast_id);
2586 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2588 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2589 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2590 SizeOfCodeGeneratedSince(&start));
2594 void MacroAssembler::Ret(
Condition cond,
2598 Jump(ra, cond, rs, rt, bd);
2603 BlockTrampolinePoolScope block_trampoline_pool(
this);
2606 imm28 = jump_address(L);
2608 { BlockGrowBufferScope block_buf_growth(
this);
2611 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2621 BlockTrampolinePoolScope block_trampoline_pool(
this);
2624 imm32 = jump_address(L);
2625 { BlockGrowBufferScope block_buf_growth(
this);
2628 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2641 BlockTrampolinePoolScope block_trampoline_pool(
this);
2644 imm32 = jump_address(L);
2645 { BlockGrowBufferScope block_buf_growth(
this);
2648 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2659 void MacroAssembler::DropAndRet(
int drop) {
2661 addiu(
sp,
sp, drop * kPointerSize);
2664 void MacroAssembler::DropAndRet(
int drop,
2667 const Operand& r2) {
2683 void MacroAssembler::Drop(
int count,
2686 const Operand& op) {
2697 addiu(
sp,
sp, count * kPointerSize);
2706 void MacroAssembler::Swap(Register reg1,
2709 if (scratch.is(
no_reg)) {
2710 Xor(reg1, reg1, Operand(reg2));
2711 Xor(reg2, reg2, Operand(reg1));
2712 Xor(reg1, reg1, Operand(reg2));
2721 void MacroAssembler::Call(Label* target) {
2722 BranchAndLink(target);
2726 void MacroAssembler::Push(Handle<Object> handle) {
2727 li(at, Operand(handle));
2732 #ifdef ENABLE_DEBUGGER_SUPPORT
2734 void MacroAssembler::DebugBreak() {
2735 PrepareCEntryArgs(0);
2736 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2738 ASSERT(AllowThisStubCall(&ces));
2742 #endif // ENABLE_DEBUGGER_SUPPORT
2748 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2749 int handler_index) {
2751 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2752 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2753 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2754 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2755 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2756 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2763 StackHandler::IndexField::encode(handler_index) |
2764 StackHandler::KindField::encode(kind);
2766 li(t2, Operand(state));
2769 if (kind == StackHandler::JS_ENTRY) {
2774 Push(zero_reg, zero_reg, t2, t1);
2776 MultiPush(t1.bit() | t2.bit() |
cp.bit() |
fp.
bit());
2780 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2788 void MacroAssembler::PopTryHandler() {
2791 Addu(
sp,
sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2792 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2797 void MacroAssembler::JumpToHandlerEntry() {
2803 srl(a2, a2, StackHandler::kKindWidth);
2814 void MacroAssembler::Throw(Register value) {
2816 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2818 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2819 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2820 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2821 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2827 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2837 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
2843 Branch(&done,
eq,
cp, Operand(zero_reg));
2847 JumpToHandlerEntry();
2851 void MacroAssembler::ThrowUncatchable(Register value) {
2853 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2854 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2855 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2856 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2857 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2858 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2861 if (!value.is(v0)) {
2865 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2869 Label fetch_next, check_kind;
2876 lw(a2,
MemOperand(
sp, StackHandlerConstants::kStateOffset));
2877 And(a2, a2, Operand(StackHandler::KindField::kMask));
2878 Branch(&fetch_next,
ne, a2, Operand(zero_reg));
2886 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
2888 JumpToHandlerEntry();
2892 void MacroAssembler::AllocateInNewSpace(
int object_size,
2898 if (!FLAG_inline_new) {
2899 if (emit_debug_code()) {
2902 li(scratch1, 0x7191);
2903 li(scratch2, 0x7291);
2909 ASSERT(!result.is(scratch1));
2910 ASSERT(!result.is(scratch2));
2911 ASSERT(!scratch1.is(scratch2));
2912 ASSERT(!scratch1.is(t9));
2913 ASSERT(!scratch2.is(t9));
2925 ExternalReference new_space_allocation_top =
2926 ExternalReference::new_space_allocation_top_address(isolate());
2927 ExternalReference new_space_allocation_limit =
2928 ExternalReference::new_space_allocation_limit_address(isolate());
2930 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
2932 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
2933 ASSERT((limit - top) == kPointerSize);
2936 Register topaddr = scratch1;
2937 Register obj_size_reg = scratch2;
2938 li(topaddr, Operand(new_space_allocation_top));
2939 li(obj_size_reg, Operand(object_size));
2947 if (emit_debug_code()) {
2952 Check(
eq,
"Unexpected allocation top", result, Operand(t9));
2960 Addu(scratch2, result, Operand(obj_size_reg));
2961 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
2971 void MacroAssembler::AllocateInNewSpace(Register object_size,
2977 if (!FLAG_inline_new) {
2978 if (emit_debug_code()) {
2981 li(scratch1, 0x7191);
2982 li(scratch2, 0x7291);
2988 ASSERT(!result.is(scratch1));
2989 ASSERT(!result.is(scratch2));
2990 ASSERT(!scratch1.is(scratch2));
2991 ASSERT(!object_size.is(t9));
2992 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2997 ExternalReference new_space_allocation_top =
2998 ExternalReference::new_space_allocation_top_address(isolate());
2999 ExternalReference new_space_allocation_limit =
3000 ExternalReference::new_space_allocation_limit_address(isolate());
3002 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
3004 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
3005 ASSERT((limit - top) == kPointerSize);
3008 Register topaddr = scratch1;
3009 li(topaddr, Operand(new_space_allocation_top));
3012 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3017 if (emit_debug_code()) {
3022 Check(
eq,
"Unexpected allocation top", result, Operand(t9));
3031 if ((flags & SIZE_IN_WORDS) != 0) {
3033 Addu(scratch2, result, scratch2);
3035 Addu(scratch2, result, Operand(object_size));
3037 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
3040 if (emit_debug_code()) {
3042 Check(
eq,
"Unaligned allocation in new space", t9, Operand(zero_reg));
3047 if ((flags & TAG_OBJECT) != 0) {
3053 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
3055 ExternalReference new_space_allocation_top =
3056 ExternalReference::new_space_allocation_top_address(isolate());
3062 li(scratch, Operand(new_space_allocation_top));
3064 Check(
less,
"Undo allocation of non allocated memory",
3065 object, Operand(scratch));
3068 li(scratch, Operand(new_space_allocation_top));
3073 void MacroAssembler::AllocateTwoByteString(Register result,
3078 Label* gc_required) {
3082 sll(scratch1, length, 1);
3083 addiu(scratch1, scratch1,
3088 AllocateInNewSpace(scratch1,
3096 InitializeNewString(result,
3098 Heap::kStringMapRootIndex,
3104 void MacroAssembler::AllocateAsciiString(Register result,
3109 Label* gc_required) {
3118 AllocateInNewSpace(scratch1,
3126 InitializeNewString(result,
3128 Heap::kAsciiStringMapRootIndex,
3134 void MacroAssembler::AllocateTwoByteConsString(Register result,
3138 Label* gc_required) {
3139 AllocateInNewSpace(ConsString::kSize,
3145 InitializeNewString(result,
3147 Heap::kConsStringMapRootIndex,
3153 void MacroAssembler::AllocateAsciiConsString(Register result,
3157 Label* gc_required) {
3158 AllocateInNewSpace(ConsString::kSize,
3164 InitializeNewString(result,
3166 Heap::kConsAsciiStringMapRootIndex,
3172 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3176 Label* gc_required) {
3177 AllocateInNewSpace(SlicedString::kSize,
3184 InitializeNewString(result,
3186 Heap::kSlicedStringMapRootIndex,
3192 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3196 Label* gc_required) {
3197 AllocateInNewSpace(SlicedString::kSize,
3204 InitializeNewString(result,
3206 Heap::kSlicedAsciiStringMapRootIndex,
3214 void MacroAssembler::AllocateHeapNumber(Register result,
3217 Register heap_number_map,
3221 AllocateInNewSpace(HeapNumber::kSize,
3229 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3234 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3238 Label* gc_required) {
3239 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3240 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3246 void MacroAssembler::CopyFields(Register dst,
3250 ASSERT((temps & dst.bit()) == 0);
3251 ASSERT((temps & src.bit()) == 0);
3257 if ((temps & (1 << i)) != 0) {
3264 for (
int i = 0; i < field_count; i++) {
3271 void MacroAssembler::CopyBytes(Register src,
3275 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3279 Branch(&done,
eq, length, Operand(zero_reg));
3280 bind(&align_loop_1);
3281 And(scratch, src, kPointerSize - 1);
3282 Branch(&word_loop,
eq, scratch, Operand(zero_reg));
3287 Subu(length, length, Operand(1));
3288 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3292 if (emit_debug_code()) {
3293 And(scratch, src, kPointerSize - 1);
3294 Assert(
eq,
"Expecting alignment for CopyBytes",
3295 scratch, Operand(zero_reg));
3297 Branch(&byte_loop,
lt, length, Operand(kPointerSize));
3299 Addu(src, src, kPointerSize);
3304 srl(scratch, scratch, 8);
3306 srl(scratch, scratch, 8);
3308 srl(scratch, scratch, 8);
3312 Subu(length, length, Operand(kPointerSize));
3317 Branch(&done,
eq, length, Operand(zero_reg));
3323 Subu(length, length, Operand(1));
3324 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3329 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3330 Register end_offset,
3336 Addu(start_offset, start_offset, kPointerSize);
3338 Branch(&loop,
lt, start_offset, Operand(end_offset));
3342 void MacroAssembler::CheckFastElements(Register map,
3350 Branch(fail, hi, scratch,
3351 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3355 void MacroAssembler::CheckFastObjectElements(Register map,
3363 Branch(fail,
ls, scratch,
3364 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3365 Branch(fail, hi, scratch,
3366 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3370 void MacroAssembler::CheckFastSmiElements(Register map,
3376 Branch(fail, hi, scratch,
3377 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3381 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3383 Register receiver_reg,
3384 Register elements_reg,
3390 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3391 Register mantissa_reg = scratch2;
3392 Register exponent_reg = scratch3;
3395 JumpIfSmi(value_reg, &smi_value);
3400 Heap::kHeapNumberMapRootIndex,
3407 lw(exponent_reg,
FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3408 Branch(&maybe_nan,
ge, exponent_reg, Operand(scratch1));
3410 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3412 bind(&have_double_value);
3414 Addu(scratch1, scratch1, elements_reg);
3415 sw(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3416 uint32_t offset = FixedDoubleArray::kHeaderSize +
sizeof(
kHoleNanLower32);
3423 Branch(&is_nan,
gt, exponent_reg, Operand(scratch1));
3424 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3425 Branch(&have_double_value,
eq, mantissa_reg, Operand(zero_reg));
3428 uint64_t nan_int64 = BitCast<uint64_t>(
3429 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3430 li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3431 li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3432 jmp(&have_double_value);
3435 Addu(scratch1, elements_reg,
3438 Addu(scratch1, scratch1, scratch2);
3441 FloatingPointHelper::Destination destination;
3442 if (CpuFeatures::IsSupported(
FPU)) {
3443 destination = FloatingPointHelper::kFPURegisters;
3445 destination = FloatingPointHelper::kCoreRegisters;
3448 Register untagged_value = elements_reg;
3449 SmiUntag(untagged_value, value_reg);
3450 FloatingPointHelper::ConvertIntToDouble(
this,
3458 if (destination == FloatingPointHelper::kFPURegisters) {
3459 CpuFeatures::Scope scope(
FPU);
3463 sw(exponent_reg,
MemOperand(scratch1, Register::kSizeInBytes));
3469 void MacroAssembler::CompareMapAndBranch(Register obj,
3472 Label* early_success,
3477 CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
3481 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3483 Label* early_success,
3487 Operand right = Operand(map);
3492 Map* current_map = *map;
3495 current_map = current_map->LookupElementsTransitionMap(kind);
3496 if (!current_map)
break;
3497 Branch(early_success,
eq, obj_map, right);
3498 right = Operand(Handle<Map>(current_map));
3503 Branch(branch_to, cond, obj_map, right);
3507 void MacroAssembler::CheckMap(Register obj,
3514 JumpIfSmi(obj, fail);
3517 CompareMapAndBranch(obj, scratch, map, &success,
ne, fail, mode);
3522 void MacroAssembler::DispatchMap(Register obj,
3525 Handle<Code> success,
3529 JumpIfSmi(obj, &fail);
3532 Jump(success, RelocInfo::CODE_TARGET,
eq, scratch, Operand(map));
3537 void MacroAssembler::CheckMap(Register obj,
3539 Heap::RootListIndex index,
3543 JumpIfSmi(obj, fail);
3546 LoadRoot(at, index);
3547 Branch(fail,
ne, scratch, Operand(at));
3551 void MacroAssembler::GetCFunctionDoubleResult(
const DoubleRegister dst) {
3552 CpuFeatures::Scope scope(
FPU);
3561 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg) {
3562 CpuFeatures::Scope scope(
FPU);
3571 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg1,
3573 CpuFeatures::Scope scope(
FPU);
3575 if (dreg2.is(
f12)) {
3584 Move(a0, a1, dreg1);
3585 Move(a2, a3, dreg2);
3590 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg,
3592 CpuFeatures::Scope scope(
FPU);
3603 void MacroAssembler::SetCallKind(Register dst,
CallKind call_kind) {
3610 li(dst, Operand(Smi::FromInt(1)));
3612 li(dst, Operand(Smi::FromInt(0)));
3620 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
3621 const ParameterCount& actual,
3622 Handle<Code> code_constant,
3625 bool* definitely_mismatches,
3627 const CallWrapper& call_wrapper,
3629 bool definitely_matches =
false;
3630 *definitely_mismatches =
false;
3631 Label regular_invoke;
3643 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3644 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3645 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(a3));
3647 if (expected.is_immediate()) {
3648 ASSERT(actual.is_immediate());
3649 if (expected.immediate() == actual.immediate()) {
3650 definitely_matches =
true;
3652 li(a0, Operand(actual.immediate()));
3653 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3654 if (expected.immediate() == sentinel) {
3659 definitely_matches =
true;
3661 *definitely_mismatches =
true;
3662 li(a2, Operand(expected.immediate()));
3665 }
else if (actual.is_immediate()) {
3666 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.immediate()));
3667 li(a0, Operand(actual.immediate()));
3669 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.reg()));
3672 if (!definitely_matches) {
3673 if (!code_constant.is_null()) {
3674 li(a3, Operand(code_constant));
3678 Handle<Code> adaptor =
3679 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3681 call_wrapper.BeforeCall(CallSize(adaptor));
3682 SetCallKind(t1, call_kind);
3684 call_wrapper.AfterCall();
3685 if (!*definitely_mismatches) {
3689 SetCallKind(t1, call_kind);
3690 Jump(adaptor, RelocInfo::CODE_TARGET);
3692 bind(®ular_invoke);
3697 void MacroAssembler::InvokeCode(Register code,
3698 const ParameterCount& expected,
3699 const ParameterCount& actual,
3701 const CallWrapper& call_wrapper,
3708 bool definitely_mismatches =
false;
3709 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3710 &done, &definitely_mismatches, flag,
3711 call_wrapper, call_kind);
3712 if (!definitely_mismatches) {
3714 call_wrapper.BeforeCall(CallSize(code));
3715 SetCallKind(t1, call_kind);
3717 call_wrapper.AfterCall();
3720 SetCallKind(t1, call_kind);
3730 void MacroAssembler::InvokeCode(Handle<Code> code,
3731 const ParameterCount& expected,
3732 const ParameterCount& actual,
3733 RelocInfo::Mode rmode,
3741 bool definitely_mismatches =
false;
3742 InvokePrologue(expected, actual, code,
no_reg,
3743 &done, &definitely_mismatches, flag,
3744 NullCallWrapper(), call_kind);
3745 if (!definitely_mismatches) {
3747 SetCallKind(t1, call_kind);
3750 SetCallKind(t1, call_kind);
3760 void MacroAssembler::InvokeFunction(Register
function,
3761 const ParameterCount& actual,
3763 const CallWrapper& call_wrapper,
3770 Register expected_reg = a2;
3771 Register code_reg = a3;
3773 lw(code_reg,
FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3777 SharedFunctionInfo::kFormalParameterCountOffset));
3781 ParameterCount expected(expected_reg);
3782 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3786 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
3787 const ParameterCount& actual,
3789 const CallWrapper& call_wrapper,
3795 LoadHeapObject(a1,
function);
3798 ParameterCount expected(function->shared()->formal_parameter_count());
3803 InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3807 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3812 IsInstanceJSObjectType(map, scratch, fail);
3816 void MacroAssembler::IsInstanceJSObjectType(Register map,
3825 void MacroAssembler::IsObjectJSStringType(Register
object,
3833 Branch(fail,
ne, scratch, Operand(zero_reg));
3841 void MacroAssembler::TryGetFunctionPrototype(Register
function,
3845 bool miss_on_bound_function) {
3847 JumpIfSmi(
function, miss);
3850 GetObjectType(
function, result, scratch);
3853 if (miss_on_bound_function) {
3858 And(scratch, scratch,
3859 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3860 Branch(miss,
ne, scratch, Operand(zero_reg));
3866 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3867 Branch(&non_instance,
ne, scratch, Operand(zero_reg));
3876 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3877 Branch(miss,
eq, result, Operand(t8));
3881 GetObjectType(result, scratch, scratch);
3882 Branch(&done,
ne, scratch, Operand(
MAP_TYPE));
3890 bind(&non_instance);
3898 void MacroAssembler::GetObjectType(Register
object,
3900 Register type_reg) {
3909 void MacroAssembler::CallStub(CodeStub* stub,
3914 ASSERT(AllowThisStubCall(stub));
3920 void MacroAssembler::TailCallStub(CodeStub* stub) {
3921 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3922 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3926 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3927 return ref0.address() - ref1.address();
3931 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference
function,
3933 ExternalReference next_address =
3934 ExternalReference::handle_scope_next_address();
3935 const int kNextOffset = 0;
3936 const int kLimitOffset = AddressOffset(
3937 ExternalReference::handle_scope_limit_address(),
3939 const int kLevelOffset = AddressOffset(
3940 ExternalReference::handle_scope_level_address(),
3944 li(
s3, Operand(next_address));
3948 Addu(
s2,
s2, Operand(1));
3955 addiu(a0,
fp, ExitFrameConstants::kStackSpaceOffset);
3960 DirectCEntryStub stub;
3961 stub.GenerateCall(
this,
function);
3967 Label promote_scheduled_exception;
3968 Label delete_allocated_handles;
3969 Label leave_exit_frame;
3974 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3975 Branch(&skip,
eq, v0, Operand(zero_reg));
3983 if (emit_debug_code()) {
3985 Check(
eq,
"Unexpected level after return from api call", a1, Operand(
s2));
3987 Subu(
s2,
s2, Operand(1));
3990 Branch(&delete_allocated_handles,
ne,
s1, Operand(at));
3993 bind(&leave_exit_frame);
3994 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3995 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3997 Branch(&promote_scheduled_exception,
ne, t0, Operand(t1));
3998 li(
s0, Operand(stack_space));
3999 LeaveExitFrame(
false,
s0,
true);
4001 bind(&promote_scheduled_exception);
4002 TailCallExternalReference(
4003 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4008 bind(&delete_allocated_handles);
4012 PrepareCallCFunction(1,
s1);
4013 li(a0, Operand(ExternalReference::isolate_address()));
4014 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4017 jmp(&leave_exit_frame);
4021 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4022 if (!has_frame_ && stub->SometimesSetsUpAFrame())
return false;
4023 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
4027 void MacroAssembler::IllegalOperation(
int num_arguments) {
4028 if (num_arguments > 0) {
4029 addiu(
sp,
sp, num_arguments * kPointerSize);
4031 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4035 void MacroAssembler::IndexFromHash(Register hash,
4042 (1 << String::kArrayIndexValueBits));
4046 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4051 void MacroAssembler::ObjectToDoubleFPURegister(Register
object,
4055 Register heap_number_map,
4061 JumpIfNotSmi(
object, ¬_smi);
4064 mtc1(scratch1, result);
4065 cvt_d_w(result, result);
4071 Branch(not_number,
ne, scratch1, Operand(heap_number_map));
4075 Register exponent = scratch1;
4076 Register mask_reg = scratch2;
4078 li(mask_reg, HeapNumber::kExponentMask);
4080 And(exponent, exponent, mask_reg);
4081 Branch(not_number,
eq, exponent, Operand(mask_reg));
4088 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4090 Register scratch1) {
4092 mtc1(scratch1, value);
4093 cvt_d_w(value, value);
4097 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4100 Register overflow_dst,
4102 ASSERT(!dst.is(overflow_dst));
4103 ASSERT(!dst.is(scratch));
4104 ASSERT(!overflow_dst.is(scratch));
4105 ASSERT(!overflow_dst.is(left));
4106 ASSERT(!overflow_dst.is(right));
4108 if (left.is(right) && dst.is(left)) {
4113 ASSERT(!overflow_dst.is(t9));
4120 addu(dst, left, right);
4121 xor_(scratch, dst, scratch);
4122 xor_(overflow_dst, dst, right);
4123 and_(overflow_dst, overflow_dst, scratch);
4124 }
else if (dst.is(right)) {
4125 mov(scratch, right);
4126 addu(dst, left, right);
4127 xor_(scratch, dst, scratch);
4128 xor_(overflow_dst, dst, left);
4129 and_(overflow_dst, overflow_dst, scratch);
4131 addu(dst, left, right);
4132 xor_(overflow_dst, dst, left);
4133 xor_(scratch, dst, right);
4134 and_(overflow_dst, scratch, overflow_dst);
4139 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4142 Register overflow_dst,
4144 ASSERT(!dst.is(overflow_dst));
4145 ASSERT(!dst.is(scratch));
4146 ASSERT(!overflow_dst.is(scratch));
4147 ASSERT(!overflow_dst.is(left));
4148 ASSERT(!overflow_dst.is(right));
4149 ASSERT(!scratch.is(left));
4150 ASSERT(!scratch.is(right));
4154 if (left.is(right)) {
4156 mov(overflow_dst, zero_reg);
4162 subu(dst, left, right);
4163 xor_(overflow_dst, dst, scratch);
4164 xor_(scratch, scratch, right);
4165 and_(overflow_dst, scratch, overflow_dst);
4166 }
else if (dst.is(right)) {
4167 mov(scratch, right);
4168 subu(dst, left, right);
4169 xor_(overflow_dst, dst, left);
4170 xor_(scratch, left, scratch);
4171 and_(overflow_dst, scratch, overflow_dst);
4173 subu(dst, left, right);
4174 xor_(overflow_dst, dst, left);
4175 xor_(scratch, left, right);
4176 and_(overflow_dst, scratch, overflow_dst);
4181 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
4182 int num_arguments) {
4188 if (f->nargs >= 0 && f->nargs != num_arguments) {
4189 IllegalOperation(num_arguments);
4197 PrepareCEntryArgs(num_arguments);
4198 PrepareCEntryFunction(ExternalReference(f, isolate()));
4204 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId
id) {
4205 const Runtime::Function*
function = Runtime::FunctionForId(
id);
4206 PrepareCEntryArgs(function->nargs);
4207 PrepareCEntryFunction(ExternalReference(
function, isolate()));
4213 void MacroAssembler::CallRuntime(Runtime::FunctionId fid,
int num_arguments) {
4214 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4218 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
4221 PrepareCEntryArgs(num_arguments);
4222 PrepareCEntryFunction(ext);
4225 CallStub(&stub,
al, zero_reg, Operand(zero_reg), bd);
4229 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
4236 PrepareCEntryArgs(num_arguments);
4237 JumpToExternalReference(ext);
4241 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4244 TailCallExternalReference(ExternalReference(fid, isolate()),
4250 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4252 PrepareCEntryFunction(builtin);
4254 Jump(stub.GetCode(),
4255 RelocInfo::CODE_TARGET,
4263 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
4265 const CallWrapper& call_wrapper) {
4269 GetBuiltinEntry(t9,
id);
4271 call_wrapper.BeforeCall(CallSize(t9));
4274 call_wrapper.AfterCall();
4283 void MacroAssembler::GetBuiltinFunction(Register target,
4284 Builtins::JavaScript
id) {
4286 lw(target,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4290 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
4294 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
4296 GetBuiltinFunction(a1,
id);
4302 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
4303 Register scratch1, Register scratch2) {
4304 if (FLAG_native_code_counters && counter->Enabled()) {
4305 li(scratch1, Operand(value));
4306 li(scratch2, Operand(ExternalReference(counter)));
4312 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4313 Register scratch1, Register scratch2) {
4315 if (FLAG_native_code_counters && counter->Enabled()) {
4316 li(scratch2, Operand(ExternalReference(counter)));
4318 Addu(scratch1, scratch1, Operand(value));
4324 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
4325 Register scratch1, Register scratch2) {
4327 if (FLAG_native_code_counters && counter->Enabled()) {
4328 li(scratch2, Operand(ExternalReference(counter)));
4330 Subu(scratch1, scratch1, Operand(value));
4339 void MacroAssembler::Assert(
Condition cc,
const char* msg,
4340 Register rs, Operand rt) {
4341 if (emit_debug_code())
4342 Check(cc, msg, rs, rt);
4346 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4347 Heap::RootListIndex index) {
4348 if (emit_debug_code()) {
4349 LoadRoot(at, index);
4350 Check(
eq,
"Register did not match expected root", reg, Operand(at));
4355 void MacroAssembler::AssertFastElements(Register elements) {
4356 if (emit_debug_code()) {
4357 ASSERT(!elements.is(at));
4361 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4362 Branch(&ok,
eq, elements, Operand(at));
4363 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4364 Branch(&ok,
eq, elements, Operand(at));
4365 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4366 Branch(&ok,
eq, elements, Operand(at));
4367 Abort(
"JSObject with fast elements map has slow elements");
4374 void MacroAssembler::Check(
Condition cc,
const char* msg,
4375 Register rs, Operand rt) {
4377 Branch(&L, cc, rs, rt);
4384 void MacroAssembler::Abort(
const char* msg) {
4392 intptr_t
p1 =
reinterpret_cast<intptr_t
>(msg);
4394 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4397 RecordComment(
"Abort message: ");
4402 li(a0, Operand(p0));
4404 li(a0, Operand(Smi::FromInt(p1 - p0)));
4411 CallRuntime(Runtime::kAbort, 2);
4413 CallRuntime(Runtime::kAbort, 2);
4416 if (is_trampoline_pool_blocked()) {
4422 static const int kExpectedAbortInstructions = 14;
4423 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4424 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4425 while (abort_instructions++ < kExpectedAbortInstructions) {
4432 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
4433 if (context_chain_length > 0) {
4435 lw(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4436 for (
int i = 1; i < context_chain_length; i++) {
4437 lw(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4448 void MacroAssembler::LoadTransitionedArrayMapConditional(
4451 Register map_in_out,
4453 Label* no_map_match) {
4456 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4457 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4462 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4463 size_t offset = expected_kind * kPointerSize +
4464 FixedArrayBase::kHeaderSize;
4466 Branch(no_map_match,
ne, map_in_out, Operand(at));
4469 offset = transitioned_kind * kPointerSize +
4470 FixedArrayBase::kHeaderSize;
4475 void MacroAssembler::LoadInitialArrayMap(
4476 Register function_in, Register scratch,
4477 Register map_out,
bool can_have_holes) {
4478 ASSERT(!function_in.is(map_out));
4481 JSFunction::kPrototypeOrInitialMapOffset));
4482 if (!FLAG_smi_only_arrays) {
4489 }
else if (can_have_holes) {
4500 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
4503 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4506 GlobalObject::kNativeContextOffset));
4508 lw(
function,
MemOperand(
function, Context::SlotOffset(index)));
4512 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
4516 lw(map,
FieldMemOperand(
function, JSFunction::kPrototypeOrInitialMapOffset));
4517 if (emit_debug_code()) {
4519 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail,
DO_SMI_CHECK);
4522 Abort(
"Global functions must have initial map");
4528 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4529 addiu(
sp,
sp, -5 * kPointerSize);
4530 li(t8, Operand(Smi::FromInt(type)));
4537 addiu(
fp,
sp, 3 * kPointerSize);
4541 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4545 addiu(
sp,
sp, 2 * kPointerSize);
4549 void MacroAssembler::EnterExitFrame(
bool save_doubles,
4552 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4553 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4554 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4566 addiu(
sp,
sp, -4 * kPointerSize);
4569 addiu(
fp,
sp, 2 * kPointerSize);
4571 if (emit_debug_code()) {
4572 sw(zero_reg,
MemOperand(
fp, ExitFrameConstants::kSPOffset));
4577 sw(t8,
MemOperand(
fp, ExitFrameConstants::kCodeOffset));
4580 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4582 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4585 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4588 ASSERT(kDoubleSize == frame_alignment);
4589 if (frame_alignment > 0) {
4591 And(
sp,
sp, Operand(-frame_alignment));
4594 Subu(
sp,
sp, Operand(space));
4597 FPURegister reg = FPURegister::from_code(i);
4605 ASSERT(stack_space >= 0);
4606 Subu(
sp,
sp, Operand((stack_space + 2) * kPointerSize));
4607 if (frame_alignment > 0) {
4609 And(
sp,
sp, Operand(-frame_alignment));
4614 addiu(at,
sp, kPointerSize);
4619 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
4620 Register argument_count,
4627 FPURegister reg = FPURegister::from_code(i);
4628 ldc1(reg,
MemOperand(t8, i * kDoubleSize + kPointerSize));
4633 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4637 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4646 lw(ra,
MemOperand(
sp, ExitFrameConstants::kCallerPCOffset));
4648 if (argument_count.is_valid()) {
4661 void MacroAssembler::InitializeNewString(Register
string,
4663 Heap::RootListIndex map_index,
4665 Register scratch2) {
4667 LoadRoot(scratch2, map_index);
4669 li(scratch1, Operand(String::kEmptyHashField));
4675 int MacroAssembler::ActivationFrameAlignment() {
4676 #if defined(V8_HOST_ARCH_MIPS)
4681 return OS::ActivationFrameAlignment();
4682 #else // defined(V8_HOST_ARCH_MIPS)
4687 return FLAG_sim_stack_alignment;
4688 #endif // defined(V8_HOST_ARCH_MIPS)
4692 void MacroAssembler::AssertStackIsAligned() {
4693 if (emit_debug_code()) {
4694 const int frame_alignment = ActivationFrameAlignment();
4695 const int frame_alignment_mask = frame_alignment - 1;
4697 if (frame_alignment > kPointerSize) {
4698 Label alignment_as_expected;
4700 andi(at,
sp, frame_alignment_mask);
4701 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
4703 stop(
"Unexpected stack alignment");
4704 bind(&alignment_as_expected);
4710 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4713 Label* not_power_of_two_or_zero) {
4714 Subu(scratch, reg, Operand(1));
4716 scratch, Operand(zero_reg));
4717 and_(at, scratch, reg);
4718 Branch(not_power_of_two_or_zero,
ne, at, Operand(zero_reg));
4722 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register
overflow) {
4723 ASSERT(!reg.is(overflow));
4726 xor_(overflow, overflow, reg);
4730 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4732 Register overflow) {
4735 SmiTagCheckOverflow(dst, overflow);
4738 ASSERT(!dst.is(overflow));
4739 ASSERT(!src.is(overflow));
4741 xor_(overflow, dst, src);
4746 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4754 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4756 Label* non_smi_case) {
4761 void MacroAssembler::JumpIfSmi(Register value,
4767 Branch(bd, smi_label,
eq, scratch, Operand(zero_reg));
4770 void MacroAssembler::JumpIfNotSmi(Register value,
4771 Label* not_smi_label,
4776 Branch(bd, not_smi_label,
ne, scratch, Operand(zero_reg));
4780 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4782 Label* on_not_both_smi) {
4785 or_(at, reg1, reg2);
4786 JumpIfNotSmi(at, on_not_both_smi);
4790 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4792 Label* on_either_smi) {
4796 and_(at, reg1, reg2);
4797 JumpIfSmi(at, on_either_smi);
4801 void MacroAssembler::AssertNotSmi(Register
object) {
4802 if (emit_debug_code()) {
4805 Check(
ne,
"Operand is a smi", at, Operand(zero_reg));
4810 void MacroAssembler::AssertSmi(Register
object) {
4811 if (emit_debug_code()) {
4814 Check(
eq,
"Operand is a smi", at, Operand(zero_reg));
4819 void MacroAssembler::AssertString(Register
object) {
4820 if (emit_debug_code()) {
4823 Check(
ne,
"Operand is a smi and not a string", t0, Operand(zero_reg));
4833 void MacroAssembler::AssertRootValue(Register src,
4834 Heap::RootListIndex root_value_index,
4836 if (emit_debug_code()) {
4838 LoadRoot(at, root_value_index);
4839 Check(
eq, message, src, Operand(at));
4844 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
4845 Register heap_number_map,
4847 Label* on_not_heap_number) {
4849 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4850 Branch(on_not_heap_number,
ne, scratch, Operand(heap_number_map));
4854 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4867 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4875 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4882 And(scratch1, first, Operand(second));
4883 JumpIfSmi(scratch1, failure);
4884 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4892 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4898 int kFlatAsciiStringMask =
4901 ASSERT(kFlatAsciiStringTag <= 0xffff);
4902 andi(scratch1, first, kFlatAsciiStringMask);
4903 Branch(failure,
ne, scratch1, Operand(kFlatAsciiStringTag));
4904 andi(scratch2, second, kFlatAsciiStringMask);
4905 Branch(failure,
ne, scratch2, Operand(kFlatAsciiStringTag));
4909 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4912 int kFlatAsciiStringMask =
4915 And(scratch, type, Operand(kFlatAsciiStringMask));
4916 Branch(failure,
ne, scratch, Operand(kFlatAsciiStringTag));
4920 static const int kRegisterPassedArguments = 4;
4922 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
4923 int num_double_arguments) {
4924 int stack_passed_words = 0;
4925 num_reg_arguments += 2 * num_double_arguments;
4928 if (num_reg_arguments > kRegisterPassedArguments) {
4929 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4932 return stack_passed_words;
4936 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
4937 int num_double_arguments,
4939 int frame_alignment = ActivationFrameAlignment();
4946 int stack_passed_arguments = CalculateStackPassedWords(
4947 num_reg_arguments, num_double_arguments);
4948 if (frame_alignment > kPointerSize) {
4952 Subu(
sp,
sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4954 And(
sp,
sp, Operand(-frame_alignment));
4955 sw(scratch,
MemOperand(
sp, stack_passed_arguments * kPointerSize));
4957 Subu(
sp,
sp, Operand(stack_passed_arguments * kPointerSize));
4962 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
4964 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4968 void MacroAssembler::CallCFunction(ExternalReference
function,
4969 int num_reg_arguments,
4970 int num_double_arguments) {
4971 li(t8, Operand(
function));
4972 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
4976 void MacroAssembler::CallCFunction(Register
function,
4977 int num_reg_arguments,
4978 int num_double_arguments) {
4979 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
4983 void MacroAssembler::CallCFunction(ExternalReference
function,
4984 int num_arguments) {
4985 CallCFunction(
function, num_arguments, 0);
4989 void MacroAssembler::CallCFunction(Register
function,
4990 int num_arguments) {
4991 CallCFunction(
function, num_arguments, 0);
4995 void MacroAssembler::CallCFunctionHelper(Register
function,
4996 int num_reg_arguments,
4997 int num_double_arguments) {
5005 #if defined(V8_HOST_ARCH_MIPS)
5006 if (emit_debug_code()) {
5007 int frame_alignment = OS::ActivationFrameAlignment();
5008 int frame_alignment_mask = frame_alignment - 1;
5009 if (frame_alignment > kPointerSize) {
5011 Label alignment_as_expected;
5012 And(at,
sp, Operand(frame_alignment_mask));
5013 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
5016 stop(
"Unexpected alignment in CallCFunction");
5017 bind(&alignment_as_expected);
5020 #endif // V8_HOST_ARCH_MIPS
5026 if (!
function.is(t9)) {
5033 int stack_passed_arguments = CalculateStackPassedWords(
5034 num_reg_arguments, num_double_arguments);
5036 if (OS::ActivationFrameAlignment() > kPointerSize) {
5039 Addu(
sp,
sp, Operand(stack_passed_arguments *
sizeof(kPointerSize)));
5044 #undef BRANCH_ARGS_CHECK
5047 void MacroAssembler::PatchRelocatedValue(Register li_location,
5049 Register new_value) {
5052 if (emit_debug_code()) {
5054 Check(
eq,
"The instruction to patch should be a lui.",
5055 scratch, Operand(
LUI));
5062 lw(scratch,
MemOperand(li_location, kInstrSize));
5064 if (emit_debug_code()) {
5066 Check(
eq,
"The instruction to patch should be an ori.",
5067 scratch, Operand(
ORI));
5068 lw(scratch,
MemOperand(li_location, kInstrSize));
5071 sw(scratch,
MemOperand(li_location, kInstrSize));
5074 FlushICache(li_location, 2);
5077 void MacroAssembler::GetRelocatedValue(Register li_location,
5081 if (emit_debug_code()) {
5083 Check(
eq,
"The instruction should be a lui.",
5084 value, Operand(
LUI));
5091 lw(scratch,
MemOperand(li_location, kInstrSize));
5092 if (emit_debug_code()) {
5094 Check(
eq,
"The instruction should be an ori.",
5095 scratch, Operand(
ORI));
5096 lw(scratch,
MemOperand(li_location, kInstrSize));
5102 or_(value, value, scratch);
5106 void MacroAssembler::CheckPageFlag(
5111 Label* condition_met) {
5112 And(scratch,
object, Operand(~Page::kPageAlignmentMask));
5113 lw(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
5114 And(scratch, scratch, Operand(mask));
5115 Branch(condition_met, cc, scratch, Operand(zero_reg));
5119 void MacroAssembler::JumpIfBlack(Register
object,
5123 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
5124 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5128 void MacroAssembler::HasColor(Register
object,
5129 Register bitmap_scratch,
5130 Register mask_scratch,
5137 GetMarkBits(
object, bitmap_scratch, mask_scratch);
5139 Label other_color, word_boundary;
5140 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5141 And(t8, t9, Operand(mask_scratch));
5142 Branch(&other_color, first_bit == 1 ?
eq :
ne, t8, Operand(zero_reg));
5144 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5145 Branch(&word_boundary,
eq, mask_scratch, Operand(zero_reg));
5146 And(t8, t9, Operand(mask_scratch));
5147 Branch(has_color, second_bit == 1 ? ne :
eq, t8, Operand(zero_reg));
5150 bind(&word_boundary);
5151 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5152 And(t9, t9, Operand(1));
5153 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5161 void MacroAssembler::JumpIfDataObject(Register value,
5163 Label* not_data_object) {
5165 Label is_data_object;
5167 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5168 Branch(&is_data_object, eq, t8, Operand(scratch));
5175 Branch(not_data_object, ne, t8, Operand(zero_reg));
5176 bind(&is_data_object);
5180 void MacroAssembler::GetMarkBits(Register addr_reg,
5181 Register bitmap_reg,
5182 Register mask_reg) {
5184 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5189 Addu(bitmap_reg, bitmap_reg, t8);
5191 sllv(mask_reg, t8, mask_reg);
5195 void MacroAssembler::EnsureNotWhite(
5197 Register bitmap_scratch,
5198 Register mask_scratch,
5199 Register load_scratch,
5200 Label* value_is_white_and_not_data) {
5202 GetMarkBits(value, bitmap_scratch, mask_scratch);
5205 ASSERT(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
5206 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5207 ASSERT(strcmp(Marking::kGreyBitPattern,
"11") == 0);
5208 ASSERT(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
5214 lw(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5215 And(t8, mask_scratch, load_scratch);
5216 Branch(&done, ne, t8, Operand(zero_reg));
5218 if (emit_debug_code()) {
5222 sll(t8, mask_scratch, 1);
5223 And(t8, load_scratch, t8);
5224 Branch(&ok, eq, t8, Operand(zero_reg));
5225 stop(
"Impossible marking bit pattern");
5231 Register map = load_scratch;
5232 Register length = load_scratch;
5233 Label is_data_object;
5237 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5240 Branch(&skip, ne, t8, Operand(map));
5241 li(length, HeapNumber::kSize);
5242 Branch(&is_data_object);
5251 Register instance_type = load_scratch;
5254 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5265 Branch(&skip, eq, t8, Operand(zero_reg));
5266 li(length, ExternalString::kSize);
5267 Branch(&is_data_object);
5281 Branch(&skip, eq, t8, Operand(zero_reg));
5288 bind(&is_data_object);
5291 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5292 Or(t8, t8, Operand(mask_scratch));
5293 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5295 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5296 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5297 Addu(t8, t8, Operand(length));
5298 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5304 void MacroAssembler::LoadInstanceDescriptors(Register map,
5305 Register descriptors) {
5310 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5312 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5316 void MacroAssembler::EnumLength(Register dst, Register map) {
5319 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5323 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5324 Register empty_fixed_array_value = t2;
5325 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5334 Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
5343 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5350 Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5353 Branch(&next, ne, a2, Operand(null_value));
5357 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5358 ASSERT(!output_reg.is(input_reg));
5360 li(output_reg, Operand(255));
5362 Branch(&done,
gt, input_reg, Operand(output_reg));
5365 mov(output_reg, zero_reg);
5366 mov(output_reg, input_reg);
5371 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5378 Move(temp_double_reg, 0.0);
5379 BranchF(&above_zero,
NULL,
gt, input_reg, temp_double_reg);
5382 mov(result_reg, zero_reg);
5387 Move(temp_double_reg, 255.0);
5388 BranchF(&in_bounds,
NULL,
le, input_reg, temp_double_reg);
5389 li(result_reg, Operand(255));
5394 cvt_w_d(temp_double_reg, input_reg);
5395 mfc1(result_reg, temp_double_reg);
5400 bool AreAliased(Register r1, Register r2, Register
r3, Register
r4) {
5401 if (r1.is(r2))
return true;
5402 if (r1.is(r3))
return true;
5403 if (r1.is(r4))
return true;
5404 if (r2.is(r3))
return true;
5405 if (r2.is(r4))
return true;
5406 if (r3.is(r4))
return true;
5411 CodePatcher::CodePatcher(
byte* address,
int instructions)
5412 : address_(address),
5413 instructions_(instructions),
5414 size_(instructions * Assembler::kInstrSize),
5415 masm_(
NULL, address, size_ + Assembler::kGap) {
5419 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5423 CodePatcher::~CodePatcher() {
5425 CPU::FlushICache(address_, size_);
5428 ASSERT(masm_.pc_ == address_ + size_);
5429 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5433 void CodePatcher::Emit(
Instr instr) {
5434 masm()->emit(instr);
5438 void CodePatcher::Emit(
Address addr) {
5439 masm()->emit(reinterpret_cast<Instr>(addr));
5443 void CodePatcher::ChangeBranchCondition(
Condition cond) {
5444 Instr instr = Assembler::instr_at(masm_.pc_);
5445 ASSERT(Assembler::IsBranch(instr));
5446 uint32_t opcode = Assembler::GetOpcodeField(instr);
5467 #endif // V8_TARGET_ARCH_MIPS
const RegList kSafepointSavedRegisters
const intptr_t kSmiTagMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
const int kDoubleSizeLog2
const int kNumSafepointSavedRegisters
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const RegList kJSCallerSaved
const int kPointerSizeLog2
const uint32_t kStringRepresentationMask
const uint32_t kFCSRUnderflowFlagMask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
const intptr_t kObjectAlignmentMask
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
const uint32_t kNotStringTag
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kFCSROverflowFlagMask
const uint32_t kIsIndirectStringMask
const bool IsMipsSoftFloatABI
bool IsAligned(T value, U alignment)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
const uint32_t kHoleNanLower32
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
int TenToThe(int exponent)
MacroAssembler(Isolate *isolate, void *buffer, int size)
#define UNIMPLEMENTED_MIPS()
activate correct semantics for inheriting readonliness false
const uint32_t kFCSRInvalidOpFlagMask
const uint32_t kIsNotStringMask
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
const FPUControlRegister FCSR
const int kSafepointRegisterStackIndexMap[kNumRegs]
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const uint32_t kIsIndirectStringTag
CheckForInexactConversion
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
const uint32_t kAsciiStringTag
Register ToRegister(int num)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
int NumberOfBitsSet(uint32_t x)
const uint32_t kFCSRInexactFlagMask
const uint32_t kStringEncodingMask
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag