32 #if defined(V8_TARGET_ARCH_MIPS)
43 : Assembler(arg_isolate, buffer, size),
44 generating_stub_(
false),
45 allow_stub_calls_(
true),
47 if (isolate() !=
NULL) {
48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
54 void MacroAssembler::LoadRoot(Register destination,
55 Heap::RootListIndex index) {
60 void MacroAssembler::LoadRoot(Register destination,
61 Heap::RootListIndex index,
63 Register src1,
const Operand& src2) {
69 void MacroAssembler::StoreRoot(Register source,
70 Heap::RootListIndex index) {
75 void MacroAssembler::StoreRoot(Register source,
76 Heap::RootListIndex index,
78 Register src1,
const Operand& src2) {
84 void MacroAssembler::LoadHeapObject(Register result,
85 Handle<HeapObject>
object) {
86 if (isolate()->heap()->InNewSpace(*
object)) {
87 Handle<JSGlobalPropertyCell> cell =
88 isolate()->factory()->NewJSGlobalPropertyCell(
object);
89 li(result, Operand(cell));
90 lw(result,
FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
92 li(result, Operand(
object));
98 void MacroAssembler::PushSafepointRegisters() {
103 if (num_unsaved > 0) {
110 void MacroAssembler::PopSafepointRegisters() {
113 if (num_unsaved > 0) {
119 void MacroAssembler::PushSafepointRegistersAndDoubles() {
120 PushSafepointRegisters();
121 Subu(
sp,
sp, Operand(FPURegister::kNumAllocatableRegisters *
kDoubleSize));
122 for (
int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123 FPURegister reg = FPURegister::FromAllocationIndex(i);
129 void MacroAssembler::PopSafepointRegistersAndDoubles() {
130 for (
int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131 FPURegister reg = FPURegister::FromAllocationIndex(i);
134 Addu(
sp,
sp, Operand(FPURegister::kNumAllocatableRegisters *
kDoubleSize));
135 PopSafepointRegisters();
139 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
141 sw(src, SafepointRegistersAndDoublesSlot(dst));
145 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146 sw(src, SafepointRegisterSlot(dst));
150 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151 lw(dst, SafepointRegisterSlot(src));
155 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
162 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
167 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
170 int doubles_size = FPURegister::kNumAllocatableRegisters *
kDoubleSize;
171 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
176 void MacroAssembler::InNewSpace(Register
object,
181 And(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
182 Branch(branch, cc, scratch,
183 Operand(ExternalReference::new_space_start(isolate())));
187 void MacroAssembler::RecordWriteField(
203 JumpIfSmi(value, &done);
211 if (emit_debug_code()) {
214 Branch(&ok,
eq, t8, Operand(zero_reg));
215 stop(
"Unaligned cell in write barrier");
224 remembered_set_action,
231 if (emit_debug_code()) {
232 li(value, Operand(BitCast<int32_t>(
kZapValue + 4)));
233 li(dst, Operand(BitCast<int32_t>(
kZapValue + 8)));
241 void MacroAssembler::RecordWrite(Register
object,
255 if (emit_debug_code()) {
258 eq,
"Wrong address or value passed to RecordWrite", at, Operand(value));
265 JumpIfSmi(value, &done);
270 MemoryChunk::kPointersToHereAreInterestingMask,
273 CheckPageFlag(
object,
275 MemoryChunk::kPointersFromHereAreInterestingMask,
283 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
293 if (emit_debug_code()) {
294 li(address, Operand(BitCast<int32_t>(
kZapValue + 12)));
295 li(value, Operand(BitCast<int32_t>(
kZapValue + 16)));
300 void MacroAssembler::RememberedSetHelper(Register
object,
304 RememberedSetFinalAction and_then) {
306 if (emit_debug_code()) {
308 JumpIfNotInNewSpace(
object, scratch, &ok);
309 stop(
"Remembered set pointer is in new space");
313 ExternalReference store_buffer =
314 ExternalReference::store_buffer_top(isolate());
315 li(t8, Operand(store_buffer));
324 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325 if (and_then == kFallThroughAtEnd) {
326 Branch(&done,
eq, t8, Operand(zero_reg));
328 ASSERT(and_then == kReturnAtEnd);
329 Ret(
eq, t8, Operand(zero_reg));
332 StoreBufferOverflowStub store_buffer_overflow =
333 StoreBufferOverflowStub(fp_mode);
334 CallStub(&store_buffer_overflow);
337 if (and_then == kReturnAtEnd) {
347 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
352 ASSERT(!holder_reg.is(scratch));
353 ASSERT(!holder_reg.is(at));
357 lw(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
360 Check(
ne,
"we should not have an empty lexical context",
361 scratch, Operand(zero_reg));
365 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX *
kPointerSize;
367 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
370 if (emit_debug_code()) {
375 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
376 Check(
eq,
"JSGlobalObject::global_context should be a global context.",
377 holder_reg, Operand(at));
383 Branch(&same_contexts,
eq, scratch, Operand(at));
386 if (emit_debug_code()) {
390 LoadRoot(at, Heap::kNullValueRootIndex);
391 Check(
ne,
"JSGlobalProxy::context() should not be null.",
392 holder_reg, Operand(at));
395 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
396 Check(
eq,
"JSGlobalObject::global_context should be a global context.",
397 holder_reg, Operand(at));
407 int token_offset = Context::kHeaderSize +
412 Branch(miss,
ne, scratch, Operand(at));
414 bind(&same_contexts);
418 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
420 LoadRoot(scratch, Heap::kHashSeedRootIndex);
424 xor_(reg0, reg0, scratch);
430 nor(scratch, reg0, zero_reg);
432 addu(reg0, scratch, at);
436 xor_(reg0, reg0, at);
440 addu(reg0, reg0, at);
444 xor_(reg0, reg0, at);
447 sll(scratch, reg0, 11);
449 addu(reg0, reg0, at);
450 addu(reg0, reg0, scratch);
454 xor_(reg0, reg0, at);
458 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
489 GetNumberHash(reg0, reg1);
492 lw(reg1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
494 Subu(reg1, reg1, Operand(1));
497 static const int kProbes = 4;
498 for (
int i = 0; i < kProbes; i++) {
503 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
505 and_(reg2, reg2, reg1);
508 ASSERT(SeededNumberDictionary::kEntrySize == 3);
510 addu(reg2, reg2, at);
514 addu(reg2, elements, at);
516 lw(at,
FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
517 if (i != kProbes - 1) {
518 Branch(&done,
eq, key, Operand(at));
520 Branch(miss,
ne, key, Operand(at));
527 const int kDetailsOffset =
528 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
530 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
531 Branch(miss,
ne, at, Operand(zero_reg));
534 const int kValueOffset =
535 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
543 void MacroAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
545 addu(rd, rs, rt.rm());
547 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
548 addiu(rd, rs, rt.imm32_);
559 void MacroAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
561 subu(rd, rs, rt.rm());
563 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
564 addiu(rd, rs, -rt.imm32_);
575 void MacroAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
581 mul(rd, rs, rt.rm());
597 void MacroAssembler::Mult(Register rs,
const Operand& rt) {
609 void MacroAssembler::Multu(Register rs,
const Operand& rt) {
621 void MacroAssembler::Div(Register rs,
const Operand& rt) {
633 void MacroAssembler::Divu(Register rs,
const Operand& rt) {
645 void MacroAssembler::And(Register rd, Register rs,
const Operand& rt) {
647 and_(rd, rs, rt.rm());
649 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
650 andi(rd, rs, rt.imm32_);
661 void MacroAssembler::Or(Register rd, Register rs,
const Operand& rt) {
663 or_(rd, rs, rt.rm());
665 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
666 ori(rd, rs, rt.imm32_);
677 void MacroAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
679 xor_(rd, rs, rt.rm());
681 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
682 xori(rd, rs, rt.imm32_);
693 void MacroAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
695 nor(rd, rs, rt.rm());
705 void MacroAssembler::Neg(Register rs,
const Operand& rt) {
710 xor_(rs, rt.rm(), at);
714 void MacroAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
716 slt(rd, rs, rt.rm());
718 if (
is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
719 slti(rd, rs, rt.imm32_);
730 void MacroAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
732 sltu(rd, rs, rt.rm());
734 if (
is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
735 sltiu(rd, rs, rt.imm32_);
746 void MacroAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
749 rotrv(rd, rs, rt.rm());
751 rotr(rd, rs, rt.imm32_);
755 subu(at, zero_reg, rt.rm());
757 srlv(rd, rs, rt.rm());
760 if (rt.imm32_ == 0) {
763 srl(at, rs, rt.imm32_);
764 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
773 void MacroAssembler::li(Register rd, Operand j,
LiFlags mode) {
775 BlockTrampolinePoolScope block_trampoline_pool(
this);
779 addiu(rd, zero_reg, j.imm32_);
780 }
else if (!(j.imm32_ &
kHiMask)) {
781 ori(rd, zero_reg, j.imm32_);
789 if (MustUseReg(j.rmode_)) {
790 RecordRelocInfo(j.rmode_, j.imm32_);
800 void MacroAssembler::MultiPush(
RegList regs) {
804 Subu(
sp,
sp, Operand(stack_offset));
806 if ((regs & (1 << i)) != 0) {
814 void MacroAssembler::MultiPushReversed(
RegList regs) {
818 Subu(
sp,
sp, Operand(stack_offset));
820 if ((regs & (1 << i)) != 0) {
828 void MacroAssembler::MultiPop(
RegList regs) {
832 if ((regs & (1 << i)) != 0) {
837 addiu(
sp,
sp, stack_offset);
841 void MacroAssembler::MultiPopReversed(
RegList regs) {
844 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
845 if ((regs & (1 << i)) != 0) {
850 addiu(
sp,
sp, stack_offset);
854 void MacroAssembler::MultiPushFPU(
RegList regs) {
855 CpuFeatures::Scope scope(
FPU);
859 Subu(
sp,
sp, Operand(stack_offset));
860 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
861 if ((regs & (1 << i)) != 0) {
863 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
869 void MacroAssembler::MultiPushReversedFPU(
RegList regs) {
870 CpuFeatures::Scope scope(
FPU);
874 Subu(
sp,
sp, Operand(stack_offset));
876 if ((regs & (1 << i)) != 0) {
878 sdc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
884 void MacroAssembler::MultiPopFPU(
RegList regs) {
885 CpuFeatures::Scope scope(
FPU);
889 if ((regs & (1 << i)) != 0) {
890 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
894 addiu(
sp,
sp, stack_offset);
898 void MacroAssembler::MultiPopReversedFPU(
RegList regs) {
899 CpuFeatures::Scope scope(
FPU);
902 for (
int16_t i = kNumRegisters - 1; i >= 0; i--) {
903 if ((regs & (1 << i)) != 0) {
904 ldc1(FPURegister::from_code(i),
MemOperand(
sp, stack_offset));
908 addiu(
sp,
sp, stack_offset);
912 void MacroAssembler::FlushICache(Register address,
unsigned instructions) {
914 MultiPush(saved_regs);
915 AllowExternalCallThatCantCauseGC scope(
this);
919 PrepareCallCFunction(2, t0);
921 li(a1, instructions * kInstrSize);
922 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
923 MultiPop(saved_regs);
927 void MacroAssembler::Ext(Register rt,
935 ext_(rt, rs, pos, size);
939 int shift_left = 32 - (pos + size);
940 sll(rt, rs, shift_left);
942 int shift_right = 32 - size;
943 if (shift_right > 0) {
944 srl(rt, rt, shift_right);
950 void MacroAssembler::Ins(Register rt,
959 ins_(rt, rs, pos, size);
961 ASSERT(!rt.is(t8) && !rs.is(t8));
962 Subu(at, zero_reg, Operand(1));
963 srl(at, at, 32 - size);
967 nor(at, at, zero_reg);
974 void MacroAssembler::Cvt_d_uw(FPURegister fd,
976 FPURegister scratch) {
979 Cvt_d_uw(fd, t8, scratch);
983 void MacroAssembler::Cvt_d_uw(FPURegister fd,
985 FPURegister scratch) {
1004 Label conversion_done;
1008 Branch(&conversion_done,
eq, t9, Operand(zero_reg));
1012 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1013 mtc1(zero_reg, scratch);
1015 add_d(fd, fd, scratch);
1017 bind(&conversion_done);
1021 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1023 FPURegister scratch) {
1024 Trunc_uw_d(fs, t8, scratch);
1028 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1029 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1030 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1032 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1038 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1039 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1040 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1042 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1049 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1050 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1051 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1053 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1060 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1061 if (kArchVariant ==
kLoongson && fd.is(fs)) {
1062 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1064 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1071 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1073 FPURegister scratch) {
1079 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1080 mtc1(zero_reg, scratch);
1083 Label simple_convert;
1084 BranchF(&simple_convert,
NULL,
lt, fd, scratch);
1088 sub_d(scratch, fd, scratch);
1089 trunc_w_d(scratch, scratch);
1091 Or(rs, rs, 1 << 31);
1096 bind(&simple_convert);
1097 trunc_w_d(scratch, fd);
1104 void MacroAssembler::BranchF(Label* target,
1118 c(
UN,
D, cmp1, cmp2);
1129 c(
OLT,
D, cmp1, cmp2);
1134 c(
ULE,
D, cmp1, cmp2);
1139 c(
ULT,
D, cmp1, cmp2);
1144 c(
OLE,
D, cmp1, cmp2);
1148 c(
EQ,
D, cmp1, cmp2);
1152 c(
EQ,
D, cmp1, cmp2);
1166 void MacroAssembler::Move(FPURegister dst,
double imm) {
1168 static const DoubleRepresentation minus_zero(-0.0);
1169 static const DoubleRepresentation
zero(0.0);
1170 DoubleRepresentation value(imm);
1173 if (value.bits ==
zero.bits && !force_load) {
1175 }
else if (value.bits == minus_zero.bits && !force_load) {
1179 DoubleAsTwoUInt32(imm, &lo, &hi);
1183 li(at, Operand(lo));
1186 mtc1(zero_reg, dst);
1191 li(at, Operand(hi));
1192 mtc1(at, dst.high());
1194 mtc1(zero_reg, dst.high());
1200 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1203 Branch(&done,
ne, rt, Operand(zero_reg));
1212 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1215 Branch(&done,
eq, rt, Operand(zero_reg));
1224 void MacroAssembler::Movt(Register rd, Register rs,
uint16_t cc) {
1229 ASSERT(!(rs.is(t8) || rd.is(t8)));
1231 Register scratch = t8;
1235 cfc1(scratch,
FCSR);
1239 srl(scratch, scratch, 16);
1240 andi(scratch, scratch, 0x0080);
1241 Branch(&done,
eq, scratch, Operand(zero_reg));
1250 void MacroAssembler::Movf(Register rd, Register rs,
uint16_t cc) {
1255 ASSERT(!(rs.is(t8) || rd.is(t8)));
1257 Register scratch = t8;
1261 cfc1(scratch,
FCSR);
1265 srl(scratch, scratch, 16);
1266 andi(scratch, scratch, 0x0080);
1267 Branch(&done,
ne, scratch, Operand(zero_reg));
1276 void MacroAssembler::Clz(Register rd, Register rs) {
1278 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1280 Register scratch = t9;
1286 and_(scratch, at, mask);
1287 Branch(&end,
ne, scratch, Operand(zero_reg));
1303 void MacroAssembler::ConvertToInt32(Register source,
1307 FPURegister double_scratch,
1309 Label right_exponent, done;
1313 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1316 mov(dest, zero_reg);
1321 const uint32_t non_smi_exponent =
1322 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1324 Branch(&right_exponent,
eq, scratch2, Operand(non_smi_exponent));
1327 Branch(not_int32,
gt, scratch2, Operand(non_smi_exponent));
1332 const uint32_t zero_exponent =
1333 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1334 Subu(scratch2, scratch2, Operand(zero_exponent));
1336 Branch(&done,
lt, scratch2, Operand(zero_reg));
1337 if (!CpuFeatures::IsSupported(
FPU)) {
1339 srl(dest, scratch2, HeapNumber::kExponentShift);
1342 li(at, Operand(30));
1343 subu(dest, at, dest);
1345 bind(&right_exponent);
1346 if (CpuFeatures::IsSupported(
FPU)) {
1347 CpuFeatures::Scope scope(
FPU);
1352 lwc1(double_scratch,
FieldMemOperand(source, HeapNumber::kMantissaOffset));
1353 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1354 trunc_w_d(double_scratch, double_scratch);
1355 mfc1(dest, double_scratch);
1359 And(scratch2, scratch, Operand(0x80000000));
1360 Or(dest, dest, Operand(scratch2));
1362 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1369 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1370 sll(scratch, scratch, shift_distance + 1);
1371 srl(scratch, scratch, 1);
1379 const int field_width = shift_distance;
1380 Ext(scratch2, scratch2, 32-shift_distance, field_width);
1381 Ins(scratch, scratch2, 0, field_width);
1383 srlv(scratch, scratch, dest);
1385 subu(scratch2, zero_reg, scratch);
1389 Movz(scratch, scratch2, dest);
1400 Register except_flag,
1403 CpuFeatures::Scope scope(
FPU);
1413 cfc1(scratch1,
FCSR);
1415 ctc1(zero_reg,
FCSR);
1418 switch (rounding_mode) {
1420 Round_w_d(result, double_input);
1423 Trunc_w_d(result, double_input);
1426 Ceil_w_d(result, double_input);
1429 Floor_w_d(result, double_input);
1434 cfc1(except_flag,
FCSR);
1436 ctc1(scratch1,
FCSR);
1439 And(except_flag, except_flag, Operand(except_mask));
1443 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1444 Register input_high,
1447 Label done, normal_exponent, restore_sign;
1451 HeapNumber::kExponentShift,
1452 HeapNumber::kExponentBits);
1455 Subu(scratch, result, HeapNumber::kExponentMask);
1456 Movz(result, zero_reg, scratch);
1457 Branch(&done,
eq, scratch, Operand(zero_reg));
1462 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1466 Branch(&normal_exponent,
le, result, Operand(zero_reg));
1467 mov(result, zero_reg);
1470 bind(&normal_exponent);
1471 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1473 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1476 Register
sign = result;
1482 Label high_shift_needed, high_shift_done;
1483 Branch(&high_shift_needed,
lt, scratch, Operand(32));
1484 mov(input_high, zero_reg);
1485 Branch(&high_shift_done);
1486 bind(&high_shift_needed);
1491 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1495 sllv(input_high, input_high, scratch);
1497 bind(&high_shift_done);
1500 Label pos_shift, shift_done;
1502 subu(scratch, at, scratch);
1503 Branch(&pos_shift,
ge, scratch, Operand(zero_reg));
1506 Subu(scratch, zero_reg, scratch);
1507 sllv(input_low, input_low, scratch);
1508 Branch(&shift_done);
1511 srlv(input_low, input_low, scratch);
1514 Or(input_high, input_high, Operand(input_low));
1519 Subu(result, zero_reg, input_high);
1520 Movz(result, input_high, scratch);
1525 void MacroAssembler::EmitECMATruncate(Register result,
1526 FPURegister double_input,
1527 FPURegister single_scratch,
1530 Register scratch3) {
1531 CpuFeatures::Scope scope(
FPU);
1532 ASSERT(!scratch2.is(result));
1533 ASSERT(!scratch3.is(result));
1534 ASSERT(!scratch3.is(scratch2));
1535 ASSERT(!scratch.is(result) &&
1536 !scratch.is(scratch2) &&
1537 !scratch.is(scratch3));
1538 ASSERT(!single_scratch.is(double_input));
1544 cfc1(scratch2,
FCSR);
1545 ctc1(zero_reg,
FCSR);
1547 trunc_w_d(single_scratch, double_input);
1548 mfc1(result, single_scratch);
1550 cfc1(scratch,
FCSR);
1551 ctc1(scratch2,
FCSR);
1557 Branch(&done,
eq, scratch, Operand(zero_reg));
1560 Register input_high = scratch2;
1561 Register input_low = scratch3;
1562 Move(input_low, input_high, double_input);
1563 EmitOutOfInt32RangeTruncate(result,
1571 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1573 int num_least_bits) {
1578 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1580 int num_least_bits) {
1581 And(dst, src, Operand((1 << num_least_bits) - 1));
1588 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1589 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1590 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1594 BranchShort(offset, bdslot);
1601 BranchShort(offset, cond, rs, rt, bdslot);
1606 if (L->is_bound()) {
1608 BranchShort(L, bdslot);
1613 if (is_trampoline_emitted()) {
1616 BranchShort(L, bdslot);
1622 void MacroAssembler::Branch(Label* L,
Condition cond, Register rs,
1625 if (L->is_bound()) {
1627 BranchShort(L, cond, rs, rt, bdslot);
1631 BranchShort(&skip, neg_cond, rs, rt);
1636 if (is_trampoline_emitted()) {
1639 BranchShort(&skip, neg_cond, rs, rt);
1643 BranchShort(L, cond, rs, rt, bdslot);
1649 void MacroAssembler::Branch(Label* L,
1652 Heap::RootListIndex index,
1654 LoadRoot(at, index);
1655 Branch(L, cond, rs, Operand(at), bdslot);
1668 void MacroAssembler::BranchShort(
int16_t offset,
Condition cond, Register rs,
1671 BRANCH_ARGS_CHECK(cond, rs, rt);
1672 ASSERT(!rs.is(zero_reg));
1674 Register scratch = at;
1685 beq(rs, r2, offset);
1688 bne(rs, r2, offset);
1692 if (r2.is(zero_reg)) {
1695 slt(scratch, r2, rs);
1696 bne(scratch, zero_reg, offset);
1700 if (r2.is(zero_reg)) {
1703 slt(scratch, rs, r2);
1704 beq(scratch, zero_reg, offset);
1708 if (r2.is(zero_reg)) {
1711 slt(scratch, rs, r2);
1712 bne(scratch, zero_reg, offset);
1716 if (r2.is(zero_reg)) {
1719 slt(scratch, r2, rs);
1720 beq(scratch, zero_reg, offset);
1725 if (r2.is(zero_reg)) {
1728 sltu(scratch, r2, rs);
1729 bne(scratch, zero_reg, offset);
1733 if (r2.is(zero_reg)) {
1736 sltu(scratch, rs, r2);
1737 beq(scratch, zero_reg, offset);
1741 if (r2.is(zero_reg)) {
1745 sltu(scratch, rs, r2);
1746 bne(scratch, zero_reg, offset);
1750 if (r2.is(zero_reg)) {
1753 sltu(scratch, r2, rs);
1754 beq(scratch, zero_reg, offset);
1773 beq(rs, r2, offset);
1780 bne(rs, r2, offset);
1784 if (rt.imm32_ == 0) {
1789 slt(scratch, r2, rs);
1790 bne(scratch, zero_reg, offset);
1794 if (rt.imm32_ == 0) {
1797 slti(scratch, rs, rt.imm32_);
1798 beq(scratch, zero_reg, offset);
1802 slt(scratch, rs, r2);
1803 beq(scratch, zero_reg, offset);
1807 if (rt.imm32_ == 0) {
1810 slti(scratch, rs, rt.imm32_);
1811 bne(scratch, zero_reg, offset);
1815 slt(scratch, rs, r2);
1816 bne(scratch, zero_reg, offset);
1820 if (rt.imm32_ == 0) {
1825 slt(scratch, r2, rs);
1826 beq(scratch, zero_reg, offset);
1831 if (rt.imm32_ == 0) {
1836 sltu(scratch, r2, rs);
1837 bne(scratch, zero_reg, offset);
1841 if (rt.imm32_ == 0) {
1844 sltiu(scratch, rs, rt.imm32_);
1845 beq(scratch, zero_reg, offset);
1849 sltu(scratch, rs, r2);
1850 beq(scratch, zero_reg, offset);
1854 if (rt.imm32_ == 0) {
1858 sltiu(scratch, rs, rt.imm32_);
1859 bne(scratch, zero_reg, offset);
1863 sltu(scratch, rs, r2);
1864 bne(scratch, zero_reg, offset);
1868 if (rt.imm32_ == 0) {
1873 sltu(scratch, r2, rs);
1874 beq(scratch, zero_reg, offset);
1891 b(shifted_branch_offset(L,
false));
1899 void MacroAssembler::BranchShort(Label* L,
Condition cond, Register rs,
1902 BRANCH_ARGS_CHECK(cond, rs, rt);
1906 Register scratch = at;
1914 offset = shifted_branch_offset(L,
false);
1918 offset = shifted_branch_offset(L,
false);
1919 beq(rs, r2, offset);
1922 offset = shifted_branch_offset(L,
false);
1923 bne(rs, r2, offset);
1927 if (r2.is(zero_reg)) {
1928 offset = shifted_branch_offset(L,
false);
1931 slt(scratch, r2, rs);
1932 offset = shifted_branch_offset(L,
false);
1933 bne(scratch, zero_reg, offset);
1937 if (r2.is(zero_reg)) {
1938 offset = shifted_branch_offset(L,
false);
1941 slt(scratch, rs, r2);
1942 offset = shifted_branch_offset(L,
false);
1943 beq(scratch, zero_reg, offset);
1947 if (r2.is(zero_reg)) {
1948 offset = shifted_branch_offset(L,
false);
1951 slt(scratch, rs, r2);
1952 offset = shifted_branch_offset(L,
false);
1953 bne(scratch, zero_reg, offset);
1957 if (r2.is(zero_reg)) {
1958 offset = shifted_branch_offset(L,
false);
1961 slt(scratch, r2, rs);
1962 offset = shifted_branch_offset(L,
false);
1963 beq(scratch, zero_reg, offset);
1968 if (r2.is(zero_reg)) {
1969 offset = shifted_branch_offset(L,
false);
1972 sltu(scratch, r2, rs);
1973 offset = shifted_branch_offset(L,
false);
1974 bne(scratch, zero_reg, offset);
1978 if (r2.is(zero_reg)) {
1979 offset = shifted_branch_offset(L,
false);
1982 sltu(scratch, rs, r2);
1983 offset = shifted_branch_offset(L,
false);
1984 beq(scratch, zero_reg, offset);
1988 if (r2.is(zero_reg)) {
1992 sltu(scratch, rs, r2);
1993 offset = shifted_branch_offset(L,
false);
1994 bne(scratch, zero_reg, offset);
1998 if (r2.is(zero_reg)) {
1999 offset = shifted_branch_offset(L,
false);
2002 sltu(scratch, r2, rs);
2003 offset = shifted_branch_offset(L,
false);
2004 beq(scratch, zero_reg, offset);
2016 offset = shifted_branch_offset(L,
false);
2023 offset = shifted_branch_offset(L,
false);
2024 beq(rs, r2, offset);
2030 offset = shifted_branch_offset(L,
false);
2031 bne(rs, r2, offset);
2035 if (rt.imm32_ == 0) {
2036 offset = shifted_branch_offset(L,
false);
2042 slt(scratch, r2, rs);
2043 offset = shifted_branch_offset(L,
false);
2044 bne(scratch, zero_reg, offset);
2048 if (rt.imm32_ == 0) {
2049 offset = shifted_branch_offset(L,
false);
2052 slti(scratch, rs, rt.imm32_);
2053 offset = shifted_branch_offset(L,
false);
2054 beq(scratch, zero_reg, offset);
2059 slt(scratch, rs, r2);
2060 offset = shifted_branch_offset(L,
false);
2061 beq(scratch, zero_reg, offset);
2065 if (rt.imm32_ == 0) {
2066 offset = shifted_branch_offset(L,
false);
2069 slti(scratch, rs, rt.imm32_);
2070 offset = shifted_branch_offset(L,
false);
2071 bne(scratch, zero_reg, offset);
2076 slt(scratch, rs, r2);
2077 offset = shifted_branch_offset(L,
false);
2078 bne(scratch, zero_reg, offset);
2082 if (rt.imm32_ == 0) {
2083 offset = shifted_branch_offset(L,
false);
2089 slt(scratch, r2, rs);
2090 offset = shifted_branch_offset(L,
false);
2091 beq(scratch, zero_reg, offset);
2096 if (rt.imm32_ == 0) {
2097 offset = shifted_branch_offset(L,
false);
2103 sltu(scratch, r2, rs);
2104 offset = shifted_branch_offset(L,
false);
2105 bne(scratch, zero_reg, offset);
2109 if (rt.imm32_ == 0) {
2110 offset = shifted_branch_offset(L,
false);
2113 sltiu(scratch, rs, rt.imm32_);
2114 offset = shifted_branch_offset(L,
false);
2115 beq(scratch, zero_reg, offset);
2120 sltu(scratch, rs, r2);
2121 offset = shifted_branch_offset(L,
false);
2122 beq(scratch, zero_reg, offset);
2126 if (rt.imm32_ == 0) {
2130 sltiu(scratch, rs, rt.imm32_);
2131 offset = shifted_branch_offset(L,
false);
2132 bne(scratch, zero_reg, offset);
2137 sltu(scratch, rs, r2);
2138 offset = shifted_branch_offset(L,
false);
2139 bne(scratch, zero_reg, offset);
2143 if (rt.imm32_ == 0) {
2144 offset = shifted_branch_offset(L,
false);
2150 sltu(scratch, r2, rs);
2151 offset = shifted_branch_offset(L,
false);
2152 beq(scratch, zero_reg, offset);
2168 BranchAndLinkShort(offset, bdslot);
2172 void MacroAssembler::BranchAndLink(
int16_t offset,
Condition cond, Register rs,
2175 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2179 void MacroAssembler::BranchAndLink(Label* L,
BranchDelaySlot bdslot) {
2180 if (L->is_bound()) {
2182 BranchAndLinkShort(L, bdslot);
2187 if (is_trampoline_emitted()) {
2190 BranchAndLinkShort(L, bdslot);
2196 void MacroAssembler::BranchAndLink(Label* L,
Condition cond, Register rs,
2199 if (L->is_bound()) {
2201 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2205 BranchShort(&skip, neg_cond, rs, rt);
2210 if (is_trampoline_emitted()) {
2213 BranchShort(&skip, neg_cond, rs, rt);
2217 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2226 void MacroAssembler::BranchAndLinkShort(
int16_t offset,
2237 Register rs,
const Operand& rt,
2239 BRANCH_ARGS_CHECK(cond, rs, rt);
2241 Register scratch = at;
2267 slt(scratch, r2, rs);
2268 addiu(scratch, scratch, -1);
2269 bgezal(scratch, offset);
2272 slt(scratch, rs, r2);
2273 addiu(scratch, scratch, -1);
2274 bltzal(scratch, offset);
2277 slt(scratch, rs, r2);
2278 addiu(scratch, scratch, -1);
2279 bgezal(scratch, offset);
2282 slt(scratch, r2, rs);
2283 addiu(scratch, scratch, -1);
2284 bltzal(scratch, offset);
2289 sltu(scratch, r2, rs);
2290 addiu(scratch, scratch, -1);
2291 bgezal(scratch, offset);
2294 sltu(scratch, rs, r2);
2295 addiu(scratch, scratch, -1);
2296 bltzal(scratch, offset);
2299 sltu(scratch, rs, r2);
2300 addiu(scratch, scratch, -1);
2301 bgezal(scratch, offset);
2304 sltu(scratch, r2, rs);
2305 addiu(scratch, scratch, -1);
2306 bltzal(scratch, offset);
2318 void MacroAssembler::BranchAndLinkShort(Label* L,
BranchDelaySlot bdslot) {
2319 bal(shifted_branch_offset(L,
false));
2327 void MacroAssembler::BranchAndLinkShort(Label* L,
Condition cond, Register rs,
2330 BRANCH_ARGS_CHECK(cond, rs, rt);
2334 Register scratch = at;
2344 offset = shifted_branch_offset(L,
false);
2350 offset = shifted_branch_offset(L,
false);
2356 offset = shifted_branch_offset(L,
false);
2362 slt(scratch, r2, rs);
2363 addiu(scratch, scratch, -1);
2364 offset = shifted_branch_offset(L,
false);
2365 bgezal(scratch, offset);
2368 slt(scratch, rs, r2);
2369 addiu(scratch, scratch, -1);
2370 offset = shifted_branch_offset(L,
false);
2371 bltzal(scratch, offset);
2374 slt(scratch, rs, r2);
2375 addiu(scratch, scratch, -1);
2376 offset = shifted_branch_offset(L,
false);
2377 bgezal(scratch, offset);
2380 slt(scratch, r2, rs);
2381 addiu(scratch, scratch, -1);
2382 offset = shifted_branch_offset(L,
false);
2383 bltzal(scratch, offset);
2388 sltu(scratch, r2, rs);
2389 addiu(scratch, scratch, -1);
2390 offset = shifted_branch_offset(L,
false);
2391 bgezal(scratch, offset);
2394 sltu(scratch, rs, r2);
2395 addiu(scratch, scratch, -1);
2396 offset = shifted_branch_offset(L,
false);
2397 bltzal(scratch, offset);
2400 sltu(scratch, rs, r2);
2401 addiu(scratch, scratch, -1);
2402 offset = shifted_branch_offset(L,
false);
2403 bgezal(scratch, offset);
2406 sltu(scratch, r2, rs);
2407 addiu(scratch, scratch, -1);
2408 offset = shifted_branch_offset(L,
false);
2409 bltzal(scratch, offset);
2425 void MacroAssembler::Jump(Register target,
2430 BlockTrampolinePoolScope block_trampoline_pool(
this);
2434 BRANCH_ARGS_CHECK(cond, rs, rt);
2444 void MacroAssembler::Jump(intptr_t target,
2445 RelocInfo::Mode rmode,
2456 li(t9, Operand(target, rmode));
2457 Jump(t9,
al, zero_reg, Operand(zero_reg), bd);
2462 void MacroAssembler::Jump(
Address target,
2463 RelocInfo::Mode rmode,
2468 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2469 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2473 void MacroAssembler::Jump(Handle<Code> code,
2474 RelocInfo::Mode rmode,
2479 ASSERT(RelocInfo::IsCodeTarget(rmode));
2480 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2484 int MacroAssembler::CallSize(Register target,
2500 return size * kInstrSize;
2505 void MacroAssembler::Call(Register target,
2510 BlockTrampolinePoolScope block_trampoline_pool(
this);
2516 BRANCH_ARGS_CHECK(cond, rs, rt);
2524 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2525 SizeOfCodeGeneratedSince(&start));
2529 int MacroAssembler::CallSize(
Address target,
2530 RelocInfo::Mode rmode,
2535 int size = CallSize(t9, cond, rs, rt, bd);
2536 return size + 2 * kInstrSize;
2540 void MacroAssembler::Call(
Address target,
2541 RelocInfo::Mode rmode,
2546 BlockTrampolinePoolScope block_trampoline_pool(
this);
2552 positions_recorder()->WriteRecordedPositions();
2554 Call(t9, cond, rs, rt, bd);
2555 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2556 SizeOfCodeGeneratedSince(&start));
2560 int MacroAssembler::CallSize(Handle<Code> code,
2561 RelocInfo::Mode rmode,
2567 return CallSize(reinterpret_cast<Address>(code.location()),
2568 rmode, cond, rs, rt, bd);
2572 void MacroAssembler::Call(Handle<Code> code,
2573 RelocInfo::Mode rmode,
2579 BlockTrampolinePoolScope block_trampoline_pool(
this);
2582 ASSERT(RelocInfo::IsCodeTarget(rmode));
2583 if (rmode == RelocInfo::CODE_TARGET && ast_id !=
kNoASTId) {
2584 SetRecordedAstId(ast_id);
2585 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2587 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2588 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2589 SizeOfCodeGeneratedSince(&start));
2593 void MacroAssembler::Ret(
Condition cond,
2597 Jump(ra, cond, rs, rt, bd);
2602 BlockTrampolinePoolScope block_trampoline_pool(
this);
2605 imm28 = jump_address(L);
2607 { BlockGrowBufferScope block_buf_growth(
this);
2610 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2620 BlockTrampolinePoolScope block_trampoline_pool(
this);
2623 imm32 = jump_address(L);
2624 { BlockGrowBufferScope block_buf_growth(
this);
2627 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2640 BlockTrampolinePoolScope block_trampoline_pool(
this);
2643 imm32 = jump_address(L);
2644 { BlockGrowBufferScope block_buf_growth(
this);
2647 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2658 void MacroAssembler::DropAndRet(
int drop) {
2660 addiu(
sp,
sp, drop * kPointerSize);
2663 void MacroAssembler::DropAndRet(
int drop,
2666 const Operand& r2) {
2682 void MacroAssembler::Drop(
int count,
2685 const Operand& op) {
2696 addiu(
sp,
sp, count * kPointerSize);
2705 void MacroAssembler::Swap(Register reg1,
2708 if (scratch.is(
no_reg)) {
2709 Xor(reg1, reg1, Operand(reg2));
2710 Xor(reg2, reg2, Operand(reg1));
2711 Xor(reg1, reg1, Operand(reg2));
2720 void MacroAssembler::Call(Label* target) {
2721 BranchAndLink(target);
2725 void MacroAssembler::Push(Handle<Object> handle) {
2726 li(at, Operand(handle));
2731 #ifdef ENABLE_DEBUGGER_SUPPORT
2733 void MacroAssembler::DebugBreak() {
2734 PrepareCEntryArgs(0);
2735 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2737 ASSERT(AllowThisStubCall(&ces));
2741 #endif // ENABLE_DEBUGGER_SUPPORT
2747 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2748 int handler_index) {
2750 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2751 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2752 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2753 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2754 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2755 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2762 StackHandler::IndexField::encode(handler_index) |
2763 StackHandler::KindField::encode(kind);
2765 li(t2, Operand(state));
2768 if (kind == StackHandler::JS_ENTRY) {
2773 Push(zero_reg, zero_reg, t2, t1);
2775 MultiPush(t1.bit() | t2.bit() |
cp.
bit() |
fp.
bit());
2779 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2787 void MacroAssembler::PopTryHandler() {
2790 Addu(
sp,
sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2791 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2796 void MacroAssembler::JumpToHandlerEntry() {
2802 srl(a2, a2, StackHandler::kKindWidth);
2813 void MacroAssembler::Throw(Register value) {
2815 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2817 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2818 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2819 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2820 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2826 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2836 MultiPop(a1.bit() | a2.bit() |
cp.
bit() |
fp.
bit());
2842 Branch(&done,
eq,
cp, Operand(zero_reg));
2846 JumpToHandlerEntry();
2850 void MacroAssembler::ThrowUncatchable(Register value) {
2852 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2853 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2854 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2855 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2856 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2857 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2860 if (!value.is(v0)) {
2864 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2868 Label fetch_next, check_kind;
2875 lw(a2,
MemOperand(
sp, StackHandlerConstants::kStateOffset));
2876 And(a2, a2, Operand(StackHandler::KindField::kMask));
2877 Branch(&fetch_next,
ne, a2, Operand(zero_reg));
2885 MultiPop(a1.bit() | a2.bit() |
cp.
bit() |
fp.
bit());
2887 JumpToHandlerEntry();
2891 void MacroAssembler::AllocateInNewSpace(
int object_size,
2897 if (!FLAG_inline_new) {
2898 if (emit_debug_code()) {
2901 li(scratch1, 0x7191);
2902 li(scratch2, 0x7291);
2908 ASSERT(!result.is(scratch1));
2909 ASSERT(!result.is(scratch2));
2910 ASSERT(!scratch1.is(scratch2));
2911 ASSERT(!scratch1.is(t9));
2912 ASSERT(!scratch2.is(t9));
2924 ExternalReference new_space_allocation_top =
2925 ExternalReference::new_space_allocation_top_address(isolate());
2926 ExternalReference new_space_allocation_limit =
2927 ExternalReference::new_space_allocation_limit_address(isolate());
2929 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
2931 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
2932 ASSERT((limit - top) == kPointerSize);
2935 Register topaddr = scratch1;
2936 Register obj_size_reg = scratch2;
2937 li(topaddr, Operand(new_space_allocation_top));
2938 li(obj_size_reg, Operand(object_size));
2946 if (emit_debug_code()) {
2951 Check(
eq,
"Unexpected allocation top", result, Operand(t9));
2959 Addu(scratch2, result, Operand(obj_size_reg));
2960 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
2970 void MacroAssembler::AllocateInNewSpace(Register object_size,
2976 if (!FLAG_inline_new) {
2977 if (emit_debug_code()) {
2980 li(scratch1, 0x7191);
2981 li(scratch2, 0x7291);
2987 ASSERT(!result.is(scratch1));
2988 ASSERT(!result.is(scratch2));
2989 ASSERT(!scratch1.is(scratch2));
2990 ASSERT(!object_size.is(t9));
2991 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2996 ExternalReference new_space_allocation_top =
2997 ExternalReference::new_space_allocation_top_address(isolate());
2998 ExternalReference new_space_allocation_limit =
2999 ExternalReference::new_space_allocation_limit_address(isolate());
3001 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
3003 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
3004 ASSERT((limit - top) == kPointerSize);
3007 Register topaddr = scratch1;
3008 li(topaddr, Operand(new_space_allocation_top));
3011 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3016 if (emit_debug_code()) {
3021 Check(
eq,
"Unexpected allocation top", result, Operand(t9));
3030 if ((flags & SIZE_IN_WORDS) != 0) {
3032 Addu(scratch2, result, scratch2);
3034 Addu(scratch2, result, Operand(object_size));
3036 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
3039 if (emit_debug_code()) {
3041 Check(
eq,
"Unaligned allocation in new space", t9, Operand(zero_reg));
3046 if ((flags & TAG_OBJECT) != 0) {
3052 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
3054 ExternalReference new_space_allocation_top =
3055 ExternalReference::new_space_allocation_top_address(isolate());
3061 li(scratch, Operand(new_space_allocation_top));
3063 Check(
less,
"Undo allocation of non allocated memory",
3064 object, Operand(scratch));
3067 li(scratch, Operand(new_space_allocation_top));
3072 void MacroAssembler::AllocateTwoByteString(Register result,
3077 Label* gc_required) {
3081 sll(scratch1, length, 1);
3082 addiu(scratch1, scratch1,
3087 AllocateInNewSpace(scratch1,
3095 InitializeNewString(result,
3097 Heap::kStringMapRootIndex,
3103 void MacroAssembler::AllocateAsciiString(Register result,
3108 Label* gc_required) {
3117 AllocateInNewSpace(scratch1,
3125 InitializeNewString(result,
3127 Heap::kAsciiStringMapRootIndex,
3133 void MacroAssembler::AllocateTwoByteConsString(Register result,
3137 Label* gc_required) {
3138 AllocateInNewSpace(ConsString::kSize,
3144 InitializeNewString(result,
3146 Heap::kConsStringMapRootIndex,
3152 void MacroAssembler::AllocateAsciiConsString(Register result,
3156 Label* gc_required) {
3157 AllocateInNewSpace(ConsString::kSize,
3163 InitializeNewString(result,
3165 Heap::kConsAsciiStringMapRootIndex,
3171 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3175 Label* gc_required) {
3176 AllocateInNewSpace(SlicedString::kSize,
3183 InitializeNewString(result,
3185 Heap::kSlicedStringMapRootIndex,
3191 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3195 Label* gc_required) {
3196 AllocateInNewSpace(SlicedString::kSize,
3203 InitializeNewString(result,
3205 Heap::kSlicedAsciiStringMapRootIndex,
3213 void MacroAssembler::AllocateHeapNumber(Register result,
3216 Register heap_number_map,
3220 AllocateInNewSpace(HeapNumber::kSize,
3228 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3233 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3237 Label* gc_required) {
3238 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3239 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3245 void MacroAssembler::CopyFields(Register dst,
3249 ASSERT((temps & dst.bit()) == 0);
3250 ASSERT((temps & src.bit()) == 0);
3256 if ((temps & (1 << i)) != 0) {
3263 for (
int i = 0; i < field_count; i++) {
3270 void MacroAssembler::CopyBytes(Register src,
3274 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3278 Branch(&done,
eq, length, Operand(zero_reg));
3279 bind(&align_loop_1);
3280 And(scratch, src, kPointerSize - 1);
3281 Branch(&word_loop,
eq, scratch, Operand(zero_reg));
3286 Subu(length, length, Operand(1));
3287 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3291 if (emit_debug_code()) {
3292 And(scratch, src, kPointerSize - 1);
3293 Assert(
eq,
"Expecting alignment for CopyBytes",
3294 scratch, Operand(zero_reg));
3296 Branch(&byte_loop,
lt, length, Operand(kPointerSize));
3298 Addu(src, src, kPointerSize);
3303 srl(scratch, scratch, 8);
3305 srl(scratch, scratch, 8);
3307 srl(scratch, scratch, 8);
3311 Subu(length, length, Operand(kPointerSize));
3316 Branch(&done,
eq, length, Operand(zero_reg));
3322 Subu(length, length, Operand(1));
3323 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3328 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3329 Register end_offset,
3335 Addu(start_offset, start_offset, kPointerSize);
3337 Branch(&loop,
lt, start_offset, Operand(end_offset));
3341 void MacroAssembler::CheckFastElements(Register map,
3349 Branch(fail, hi, scratch,
3350 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3354 void MacroAssembler::CheckFastObjectElements(Register map,
3362 Branch(fail,
ls, scratch,
3363 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3364 Branch(fail, hi, scratch,
3365 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3369 void MacroAssembler::CheckFastSmiElements(Register map,
3375 Branch(fail, hi, scratch,
3376 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3380 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3382 Register receiver_reg,
3383 Register elements_reg,
3389 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3390 Register mantissa_reg = scratch2;
3391 Register exponent_reg = scratch3;
3394 JumpIfSmi(value_reg, &smi_value);
3399 Heap::kHeapNumberMapRootIndex,
3406 lw(exponent_reg,
FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3407 Branch(&maybe_nan,
ge, exponent_reg, Operand(scratch1));
3409 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3411 bind(&have_double_value);
3413 Addu(scratch1, scratch1, elements_reg);
3414 sw(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3415 uint32_t offset = FixedDoubleArray::kHeaderSize +
sizeof(
kHoleNanLower32);
3422 Branch(&is_nan,
gt, exponent_reg, Operand(scratch1));
3423 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3424 Branch(&have_double_value,
eq, mantissa_reg, Operand(zero_reg));
3427 uint64_t nan_int64 = BitCast<uint64_t>(
3428 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3429 li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3430 li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3431 jmp(&have_double_value);
3434 Addu(scratch1, elements_reg,
3437 Addu(scratch1, scratch1, scratch2);
3440 FloatingPointHelper::Destination destination;
3441 if (CpuFeatures::IsSupported(
FPU)) {
3442 destination = FloatingPointHelper::kFPURegisters;
3444 destination = FloatingPointHelper::kCoreRegisters;
3447 Register untagged_value = receiver_reg;
3448 SmiUntag(untagged_value, value_reg);
3449 FloatingPointHelper::ConvertIntToDouble(
this,
3457 if (destination == FloatingPointHelper::kFPURegisters) {
3458 CpuFeatures::Scope scope(
FPU);
3462 sw(exponent_reg,
MemOperand(scratch1, Register::kSizeInBytes));
3468 void MacroAssembler::CompareMapAndBranch(Register obj,
3471 Label* early_success,
3476 CompareMapAndBranch(scratch, map, early_success, cond, branch_to, mode);
3480 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3482 Label* early_success,
3486 Operand right = Operand(map);
3491 Map* current_map = *map;
3494 current_map = current_map->LookupElementsTransitionMap(kind);
3495 if (!current_map)
break;
3496 Branch(early_success,
eq, obj_map, right);
3497 right = Operand(Handle<Map>(current_map));
3502 Branch(branch_to, cond, obj_map, right);
3506 void MacroAssembler::CheckMap(Register obj,
3513 JumpIfSmi(obj, fail);
3516 CompareMapAndBranch(obj, scratch, map, &success,
ne, fail, mode);
3521 void MacroAssembler::DispatchMap(Register obj,
3524 Handle<Code> success,
3528 JumpIfSmi(obj, &fail);
3531 Jump(success, RelocInfo::CODE_TARGET,
eq, scratch, Operand(map));
3536 void MacroAssembler::CheckMap(Register obj,
3538 Heap::RootListIndex index,
3542 JumpIfSmi(obj, fail);
3545 LoadRoot(at, index);
3546 Branch(fail,
ne, scratch, Operand(at));
3550 void MacroAssembler::GetCFunctionDoubleResult(
const DoubleRegister dst) {
3551 CpuFeatures::Scope scope(
FPU);
3560 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg) {
3561 CpuFeatures::Scope scope(
FPU);
3570 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg1,
3572 CpuFeatures::Scope scope(
FPU);
3574 if (dreg2.is(
f12)) {
3583 Move(a0, a1, dreg1);
3584 Move(a2, a3, dreg2);
3589 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg,
3591 CpuFeatures::Scope scope(
FPU);
3602 void MacroAssembler::SetCallKind(Register dst,
CallKind call_kind) {
3609 li(dst, Operand(Smi::FromInt(1)));
3611 li(dst, Operand(Smi::FromInt(0)));
3619 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
3620 const ParameterCount& actual,
3621 Handle<Code> code_constant,
3624 bool* definitely_mismatches,
3626 const CallWrapper& call_wrapper,
3628 bool definitely_matches =
false;
3629 *definitely_mismatches =
false;
3630 Label regular_invoke;
3642 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3643 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3644 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(a3));
3646 if (expected.is_immediate()) {
3647 ASSERT(actual.is_immediate());
3648 if (expected.immediate() == actual.immediate()) {
3649 definitely_matches =
true;
3651 li(a0, Operand(actual.immediate()));
3652 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3653 if (expected.immediate() == sentinel) {
3658 definitely_matches =
true;
3660 *definitely_mismatches =
true;
3661 li(a2, Operand(expected.immediate()));
3664 }
else if (actual.is_immediate()) {
3665 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.immediate()));
3666 li(a0, Operand(actual.immediate()));
3668 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.reg()));
3671 if (!definitely_matches) {
3672 if (!code_constant.is_null()) {
3673 li(a3, Operand(code_constant));
3677 Handle<Code> adaptor =
3678 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3680 call_wrapper.BeforeCall(CallSize(adaptor));
3681 SetCallKind(t1, call_kind);
3683 call_wrapper.AfterCall();
3684 if (!*definitely_mismatches) {
3688 SetCallKind(t1, call_kind);
3689 Jump(adaptor, RelocInfo::CODE_TARGET);
3691 bind(®ular_invoke);
3696 void MacroAssembler::InvokeCode(Register code,
3697 const ParameterCount& expected,
3698 const ParameterCount& actual,
3700 const CallWrapper& call_wrapper,
3707 bool definitely_mismatches =
false;
3708 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3709 &done, &definitely_mismatches, flag,
3710 call_wrapper, call_kind);
3711 if (!definitely_mismatches) {
3713 call_wrapper.BeforeCall(CallSize(code));
3714 SetCallKind(t1, call_kind);
3716 call_wrapper.AfterCall();
3719 SetCallKind(t1, call_kind);
3729 void MacroAssembler::InvokeCode(Handle<Code> code,
3730 const ParameterCount& expected,
3731 const ParameterCount& actual,
3732 RelocInfo::Mode rmode,
3740 bool definitely_mismatches =
false;
3741 InvokePrologue(expected, actual, code,
no_reg,
3742 &done, &definitely_mismatches, flag,
3743 NullCallWrapper(), call_kind);
3744 if (!definitely_mismatches) {
3746 SetCallKind(t1, call_kind);
3749 SetCallKind(t1, call_kind);
3759 void MacroAssembler::InvokeFunction(Register
function,
3760 const ParameterCount& actual,
3762 const CallWrapper& call_wrapper,
3769 Register expected_reg = a2;
3770 Register code_reg = a3;
3772 lw(code_reg,
FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3776 SharedFunctionInfo::kFormalParameterCountOffset));
3780 ParameterCount expected(expected_reg);
3781 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3785 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
3786 const ParameterCount& actual,
3788 const CallWrapper& call_wrapper,
3794 LoadHeapObject(a1,
function);
3797 ParameterCount expected(function->shared()->formal_parameter_count());
3802 InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3806 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3811 IsInstanceJSObjectType(map, scratch, fail);
3815 void MacroAssembler::IsInstanceJSObjectType(Register map,
3824 void MacroAssembler::IsObjectJSStringType(Register
object,
3832 Branch(fail,
ne, scratch, Operand(zero_reg));
3840 void MacroAssembler::TryGetFunctionPrototype(Register
function,
3844 bool miss_on_bound_function) {
3846 JumpIfSmi(
function, miss);
3849 GetObjectType(
function, result, scratch);
3852 if (miss_on_bound_function) {
3857 And(scratch, scratch,
3858 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3859 Branch(miss,
ne, scratch, Operand(zero_reg));
3865 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3866 Branch(&non_instance,
ne, scratch, Operand(zero_reg));
3875 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3876 Branch(miss,
eq, result, Operand(t8));
3880 GetObjectType(result, scratch, scratch);
3881 Branch(&done,
ne, scratch, Operand(
MAP_TYPE));
3889 bind(&non_instance);
3897 void MacroAssembler::GetObjectType(Register
object,
3899 Register type_reg) {
3908 void MacroAssembler::CallStub(CodeStub* stub,
3913 ASSERT(AllowThisStubCall(stub));
3914 Call(stub->GetCode(), RelocInfo::CODE_TARGET,
kNoASTId, cond,
r1,
r2, bd);
3918 void MacroAssembler::TailCallStub(CodeStub* stub) {
3919 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3920 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3924 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3925 return ref0.address() - ref1.address();
3929 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference
function,
3931 ExternalReference next_address =
3932 ExternalReference::handle_scope_next_address();
3933 const int kNextOffset = 0;
3934 const int kLimitOffset = AddressOffset(
3935 ExternalReference::handle_scope_limit_address(),
3937 const int kLevelOffset = AddressOffset(
3938 ExternalReference::handle_scope_level_address(),
3942 li(
s3, Operand(next_address));
3946 Addu(
s2,
s2, Operand(1));
3953 addiu(a0,
fp, ExitFrameConstants::kStackSpaceOffset);
3958 DirectCEntryStub stub;
3959 stub.GenerateCall(
this,
function);
3965 Label promote_scheduled_exception;
3966 Label delete_allocated_handles;
3967 Label leave_exit_frame;
3972 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3973 Branch(&skip,
eq, v0, Operand(zero_reg));
3981 if (emit_debug_code()) {
3983 Check(
eq,
"Unexpected level after return from api call", a1, Operand(
s2));
3985 Subu(
s2,
s2, Operand(1));
3988 Branch(&delete_allocated_handles,
ne,
s1, Operand(at));
3991 bind(&leave_exit_frame);
3992 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3993 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3995 Branch(&promote_scheduled_exception,
ne, t0, Operand(t1));
3996 li(
s0, Operand(stack_space));
3997 LeaveExitFrame(
false,
s0,
true);
3999 bind(&promote_scheduled_exception);
4000 TailCallExternalReference(
4001 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4006 bind(&delete_allocated_handles);
4010 PrepareCallCFunction(1,
s1);
4011 li(a0, Operand(ExternalReference::isolate_address()));
4012 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4015 jmp(&leave_exit_frame);
4019 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4020 if (!has_frame_ && stub->SometimesSetsUpAFrame())
return false;
4021 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
4025 void MacroAssembler::IllegalOperation(
int num_arguments) {
4026 if (num_arguments > 0) {
4027 addiu(
sp,
sp, num_arguments * kPointerSize);
4029 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4033 void MacroAssembler::IndexFromHash(Register hash,
4040 (1 << String::kArrayIndexValueBits));
4044 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4049 void MacroAssembler::ObjectToDoubleFPURegister(Register
object,
4053 Register heap_number_map,
4059 JumpIfNotSmi(
object, ¬_smi);
4062 mtc1(scratch1, result);
4063 cvt_d_w(result, result);
4069 Branch(not_number,
ne, scratch1, Operand(heap_number_map));
4073 Register exponent = scratch1;
4074 Register mask_reg = scratch2;
4076 li(mask_reg, HeapNumber::kExponentMask);
4078 And(exponent, exponent, mask_reg);
4079 Branch(not_number,
eq, exponent, Operand(mask_reg));
4086 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4088 Register scratch1) {
4090 mtc1(scratch1, value);
4091 cvt_d_w(value, value);
4095 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4098 Register overflow_dst,
4100 ASSERT(!dst.is(overflow_dst));
4101 ASSERT(!dst.is(scratch));
4102 ASSERT(!overflow_dst.is(scratch));
4103 ASSERT(!overflow_dst.is(left));
4104 ASSERT(!overflow_dst.is(right));
4106 if (left.is(right) && dst.is(left)) {
4111 ASSERT(!overflow_dst.is(t9));
4118 addu(dst, left, right);
4119 xor_(scratch, dst, scratch);
4120 xor_(overflow_dst, dst, right);
4121 and_(overflow_dst, overflow_dst, scratch);
4122 }
else if (dst.is(right)) {
4123 mov(scratch, right);
4124 addu(dst, left, right);
4125 xor_(scratch, dst, scratch);
4126 xor_(overflow_dst, dst, left);
4127 and_(overflow_dst, overflow_dst, scratch);
4129 addu(dst, left, right);
4130 xor_(overflow_dst, dst, left);
4131 xor_(scratch, dst, right);
4132 and_(overflow_dst, scratch, overflow_dst);
4137 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4140 Register overflow_dst,
4142 ASSERT(!dst.is(overflow_dst));
4143 ASSERT(!dst.is(scratch));
4144 ASSERT(!overflow_dst.is(scratch));
4145 ASSERT(!overflow_dst.is(left));
4146 ASSERT(!overflow_dst.is(right));
4147 ASSERT(!scratch.is(left));
4148 ASSERT(!scratch.is(right));
4152 if (left.is(right)) {
4154 mov(overflow_dst, zero_reg);
4160 subu(dst, left, right);
4161 xor_(overflow_dst, dst, scratch);
4162 xor_(scratch, scratch, right);
4163 and_(overflow_dst, scratch, overflow_dst);
4164 }
else if (dst.is(right)) {
4165 mov(scratch, right);
4166 subu(dst, left, right);
4167 xor_(overflow_dst, dst, left);
4168 xor_(scratch, left, scratch);
4169 and_(overflow_dst, scratch, overflow_dst);
4171 subu(dst, left, right);
4172 xor_(overflow_dst, dst, left);
4173 xor_(scratch, left, right);
4174 and_(overflow_dst, scratch, overflow_dst);
4179 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
4180 int num_arguments) {
4186 if (f->nargs >= 0 && f->nargs != num_arguments) {
4187 IllegalOperation(num_arguments);
4195 PrepareCEntryArgs(num_arguments);
4196 PrepareCEntryFunction(ExternalReference(f, isolate()));
4202 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId
id) {
4203 const Runtime::Function*
function = Runtime::FunctionForId(
id);
4204 PrepareCEntryArgs(function->nargs);
4205 PrepareCEntryFunction(ExternalReference(
function, isolate()));
4211 void MacroAssembler::CallRuntime(Runtime::FunctionId fid,
int num_arguments) {
4212 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4216 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
4219 PrepareCEntryArgs(num_arguments);
4220 PrepareCEntryFunction(ext);
4223 CallStub(&stub,
al, zero_reg, Operand(zero_reg), bd);
4227 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
4234 PrepareCEntryArgs(num_arguments);
4235 JumpToExternalReference(ext);
4239 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4242 TailCallExternalReference(ExternalReference(fid, isolate()),
4248 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4250 PrepareCEntryFunction(builtin);
4252 Jump(stub.GetCode(),
4253 RelocInfo::CODE_TARGET,
4261 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
4263 const CallWrapper& call_wrapper) {
4267 GetBuiltinEntry(t9,
id);
4269 call_wrapper.BeforeCall(CallSize(t9));
4272 call_wrapper.AfterCall();
4281 void MacroAssembler::GetBuiltinFunction(Register target,
4282 Builtins::JavaScript
id) {
4284 lw(target,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4288 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
4292 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
4294 GetBuiltinFunction(a1,
id);
4300 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
4301 Register scratch1, Register scratch2) {
4302 if (FLAG_native_code_counters && counter->Enabled()) {
4303 li(scratch1, Operand(value));
4304 li(scratch2, Operand(ExternalReference(counter)));
4310 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4311 Register scratch1, Register scratch2) {
4313 if (FLAG_native_code_counters && counter->Enabled()) {
4314 li(scratch2, Operand(ExternalReference(counter)));
4316 Addu(scratch1, scratch1, Operand(value));
4322 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
4323 Register scratch1, Register scratch2) {
4325 if (FLAG_native_code_counters && counter->Enabled()) {
4326 li(scratch2, Operand(ExternalReference(counter)));
4328 Subu(scratch1, scratch1, Operand(value));
4337 void MacroAssembler::Assert(
Condition cc,
const char* msg,
4338 Register rs, Operand rt) {
4339 if (emit_debug_code())
4340 Check(cc, msg, rs, rt);
4344 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4345 Heap::RootListIndex index) {
4346 if (emit_debug_code()) {
4347 LoadRoot(at, index);
4348 Check(
eq,
"Register did not match expected root", reg, Operand(at));
4353 void MacroAssembler::AssertFastElements(Register elements) {
4354 if (emit_debug_code()) {
4355 ASSERT(!elements.is(at));
4359 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4360 Branch(&ok,
eq, elements, Operand(at));
4361 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4362 Branch(&ok,
eq, elements, Operand(at));
4363 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4364 Branch(&ok,
eq, elements, Operand(at));
4365 Abort(
"JSObject with fast elements map has slow elements");
4372 void MacroAssembler::Check(
Condition cc,
const char* msg,
4373 Register rs, Operand rt) {
4375 Branch(&L, cc, rs, rt);
4382 void MacroAssembler::Abort(
const char* msg) {
4390 intptr_t
p1 =
reinterpret_cast<intptr_t
>(msg);
4392 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4395 RecordComment(
"Abort message: ");
4400 li(a0, Operand(p0));
4402 li(a0, Operand(Smi::FromInt(p1 - p0)));
4409 CallRuntime(Runtime::kAbort, 2);
4411 CallRuntime(Runtime::kAbort, 2);
4414 if (is_trampoline_pool_blocked()) {
4420 static const int kExpectedAbortInstructions = 14;
4421 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4422 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4423 while (abort_instructions++ < kExpectedAbortInstructions) {
4430 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
4431 if (context_chain_length > 0) {
4433 lw(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4434 for (
int i = 1; i < context_chain_length; i++) {
4435 lw(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4446 void MacroAssembler::LoadTransitionedArrayMapConditional(
4449 Register map_in_out,
4451 Label* no_map_match) {
4453 lw(scratch,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4454 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
4459 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4460 size_t offset = expected_kind * kPointerSize +
4461 FixedArrayBase::kHeaderSize;
4462 Branch(no_map_match,
ne, map_in_out, Operand(scratch));
4465 offset = transitioned_kind * kPointerSize +
4466 FixedArrayBase::kHeaderSize;
4471 void MacroAssembler::LoadInitialArrayMap(
4472 Register function_in, Register scratch,
4473 Register map_out,
bool can_have_holes) {
4474 ASSERT(!function_in.is(map_out));
4477 JSFunction::kPrototypeOrInitialMapOffset));
4478 if (!FLAG_smi_only_arrays) {
4485 }
else if (can_have_holes) {
4496 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
4498 lw(
function,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4501 GlobalObject::kGlobalContextOffset));
4503 lw(
function,
MemOperand(
function, Context::SlotOffset(index)));
4507 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
4511 lw(map,
FieldMemOperand(
function, JSFunction::kPrototypeOrInitialMapOffset));
4512 if (emit_debug_code()) {
4514 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail,
DO_SMI_CHECK);
4517 Abort(
"Global functions must have initial map");
4523 void MacroAssembler::EnterFrame(StackFrame::Type
type) {
4524 addiu(
sp,
sp, -5 * kPointerSize);
4525 li(t8, Operand(Smi::FromInt(type)));
4532 addiu(
fp,
sp, 3 * kPointerSize);
4536 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4540 addiu(
sp,
sp, 2 * kPointerSize);
4544 void MacroAssembler::EnterExitFrame(
bool save_doubles,
4547 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4548 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4549 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4561 addiu(
sp,
sp, -4 * kPointerSize);
4564 addiu(
fp,
sp, 2 * kPointerSize);
4566 if (emit_debug_code()) {
4567 sw(zero_reg,
MemOperand(
fp, ExitFrameConstants::kSPOffset));
4572 sw(t8,
MemOperand(
fp, ExitFrameConstants::kCodeOffset));
4575 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4577 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4580 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4583 ASSERT(kDoubleSize == frame_alignment);
4584 if (frame_alignment > 0) {
4586 And(
sp,
sp, Operand(-frame_alignment));
4589 Subu(
sp,
sp, Operand(space));
4592 FPURegister reg = FPURegister::from_code(i);
4600 ASSERT(stack_space >= 0);
4601 Subu(
sp,
sp, Operand((stack_space + 2) * kPointerSize));
4602 if (frame_alignment > 0) {
4604 And(
sp,
sp, Operand(-frame_alignment));
4609 addiu(at,
sp, kPointerSize);
4614 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
4615 Register argument_count,
4622 FPURegister reg = FPURegister::from_code(i);
4623 ldc1(reg,
MemOperand(t8, i * kDoubleSize + kPointerSize));
4628 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4632 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4641 lw(ra,
MemOperand(
sp, ExitFrameConstants::kCallerPCOffset));
4643 if (argument_count.is_valid()) {
4656 void MacroAssembler::InitializeNewString(Register
string,
4658 Heap::RootListIndex map_index,
4660 Register scratch2) {
4662 LoadRoot(scratch2, map_index);
4664 li(scratch1, Operand(String::kEmptyHashField));
4670 int MacroAssembler::ActivationFrameAlignment() {
4671 #if defined(V8_HOST_ARCH_MIPS)
4676 return OS::ActivationFrameAlignment();
4677 #else // defined(V8_HOST_ARCH_MIPS)
4682 return FLAG_sim_stack_alignment;
4683 #endif // defined(V8_HOST_ARCH_MIPS)
4687 void MacroAssembler::AssertStackIsAligned() {
4688 if (emit_debug_code()) {
4689 const int frame_alignment = ActivationFrameAlignment();
4690 const int frame_alignment_mask = frame_alignment - 1;
4692 if (frame_alignment > kPointerSize) {
4693 Label alignment_as_expected;
4695 andi(at,
sp, frame_alignment_mask);
4696 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
4698 stop(
"Unexpected stack alignment");
4699 bind(&alignment_as_expected);
4705 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4708 Label* not_power_of_two_or_zero) {
4709 Subu(scratch, reg, Operand(1));
4711 scratch, Operand(zero_reg));
4712 and_(at, scratch, reg);
4713 Branch(not_power_of_two_or_zero,
ne, at, Operand(zero_reg));
4717 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register
overflow) {
4718 ASSERT(!reg.is(overflow));
4721 xor_(overflow, overflow, reg);
4725 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4727 Register overflow) {
4730 SmiTagCheckOverflow(dst, overflow);
4733 ASSERT(!dst.is(overflow));
4734 ASSERT(!src.is(overflow));
4736 xor_(overflow, dst, src);
4741 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4749 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4751 Label* non_smi_case) {
4756 void MacroAssembler::JumpIfSmi(Register value,
4762 Branch(bd, smi_label,
eq, scratch, Operand(zero_reg));
4765 void MacroAssembler::JumpIfNotSmi(Register value,
4766 Label* not_smi_label,
4771 Branch(bd, not_smi_label,
ne, scratch, Operand(zero_reg));
4775 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4777 Label* on_not_both_smi) {
4780 or_(at, reg1, reg2);
4781 JumpIfNotSmi(at, on_not_both_smi);
4785 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4787 Label* on_either_smi) {
4791 and_(at, reg1, reg2);
4792 JumpIfSmi(at, on_either_smi);
4796 void MacroAssembler::AbortIfSmi(Register
object) {
4799 Assert(
ne,
"Operand is a smi", at, Operand(zero_reg));
4803 void MacroAssembler::AbortIfNotSmi(Register
object) {
4806 Assert(
eq,
"Operand is a smi", at, Operand(zero_reg));
4810 void MacroAssembler::AbortIfNotString(Register
object) {
4813 Assert(
ne,
"Operand is not a string", t0, Operand(zero_reg));
4822 void MacroAssembler::AbortIfNotRootValue(Register src,
4823 Heap::RootListIndex root_value_index,
4824 const char* message) {
4826 LoadRoot(at, root_value_index);
4827 Assert(
eq, message, src, Operand(at));
4831 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
4832 Register heap_number_map,
4834 Label* on_not_heap_number) {
4836 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4837 Branch(on_not_heap_number,
ne, scratch, Operand(heap_number_map));
4841 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4854 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4862 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4869 And(scratch1, first, Operand(second));
4870 JumpIfSmi(scratch1, failure);
4871 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4879 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4885 int kFlatAsciiStringMask =
4888 ASSERT(kFlatAsciiStringTag <= 0xffff);
4889 andi(scratch1, first, kFlatAsciiStringMask);
4890 Branch(failure,
ne, scratch1, Operand(kFlatAsciiStringTag));
4891 andi(scratch2, second, kFlatAsciiStringMask);
4892 Branch(failure,
ne, scratch2, Operand(kFlatAsciiStringTag));
4896 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4899 int kFlatAsciiStringMask =
4902 And(scratch, type, Operand(kFlatAsciiStringMask));
4903 Branch(failure,
ne, scratch, Operand(kFlatAsciiStringTag));
4907 static const int kRegisterPassedArguments = 4;
4909 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
4910 int num_double_arguments) {
4911 int stack_passed_words = 0;
4912 num_reg_arguments += 2 * num_double_arguments;
4915 if (num_reg_arguments > kRegisterPassedArguments) {
4916 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4919 return stack_passed_words;
4923 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
4924 int num_double_arguments,
4926 int frame_alignment = ActivationFrameAlignment();
4933 int stack_passed_arguments = CalculateStackPassedWords(
4934 num_reg_arguments, num_double_arguments);
4935 if (frame_alignment > kPointerSize) {
4939 Subu(
sp,
sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4941 And(
sp,
sp, Operand(-frame_alignment));
4942 sw(scratch,
MemOperand(
sp, stack_passed_arguments * kPointerSize));
4944 Subu(
sp,
sp, Operand(stack_passed_arguments * kPointerSize));
4949 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
4951 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4955 void MacroAssembler::CallCFunction(ExternalReference
function,
4956 int num_reg_arguments,
4957 int num_double_arguments) {
4958 li(t8, Operand(
function));
4959 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
4963 void MacroAssembler::CallCFunction(Register
function,
4964 int num_reg_arguments,
4965 int num_double_arguments) {
4966 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
4970 void MacroAssembler::CallCFunction(ExternalReference
function,
4971 int num_arguments) {
4972 CallCFunction(
function, num_arguments, 0);
4976 void MacroAssembler::CallCFunction(Register
function,
4977 int num_arguments) {
4978 CallCFunction(
function, num_arguments, 0);
4982 void MacroAssembler::CallCFunctionHelper(Register
function,
4983 int num_reg_arguments,
4984 int num_double_arguments) {
4992 #if defined(V8_HOST_ARCH_MIPS)
4993 if (emit_debug_code()) {
4994 int frame_alignment = OS::ActivationFrameAlignment();
4995 int frame_alignment_mask = frame_alignment - 1;
4996 if (frame_alignment > kPointerSize) {
4998 Label alignment_as_expected;
4999 And(at,
sp, Operand(frame_alignment_mask));
5000 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
5003 stop(
"Unexpected alignment in CallCFunction");
5004 bind(&alignment_as_expected);
5007 #endif // V8_HOST_ARCH_MIPS
5013 if (!
function.is(t9)) {
5020 int stack_passed_arguments = CalculateStackPassedWords(
5021 num_reg_arguments, num_double_arguments);
5023 if (OS::ActivationFrameAlignment() > kPointerSize) {
5026 Addu(
sp,
sp, Operand(stack_passed_arguments *
sizeof(kPointerSize)));
5031 #undef BRANCH_ARGS_CHECK
5034 void MacroAssembler::PatchRelocatedValue(Register li_location,
5036 Register new_value) {
5039 if (emit_debug_code()) {
5041 Check(
eq,
"The instruction to patch should be a lui.",
5042 scratch, Operand(
LUI));
5049 lw(scratch,
MemOperand(li_location, kInstrSize));
5051 if (emit_debug_code()) {
5053 Check(
eq,
"The instruction to patch should be an ori.",
5054 scratch, Operand(
ORI));
5055 lw(scratch,
MemOperand(li_location, kInstrSize));
5058 sw(scratch,
MemOperand(li_location, kInstrSize));
5061 FlushICache(li_location, 2);
5064 void MacroAssembler::GetRelocatedValue(Register li_location,
5068 if (emit_debug_code()) {
5070 Check(
eq,
"The instruction should be a lui.",
5071 value, Operand(
LUI));
5078 lw(scratch,
MemOperand(li_location, kInstrSize));
5079 if (emit_debug_code()) {
5081 Check(
eq,
"The instruction should be an ori.",
5082 scratch, Operand(
ORI));
5083 lw(scratch,
MemOperand(li_location, kInstrSize));
5089 or_(value, value, scratch);
5093 void MacroAssembler::CheckPageFlag(
5098 Label* condition_met) {
5099 And(scratch,
object, Operand(~Page::kPageAlignmentMask));
5100 lw(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
5101 And(scratch, scratch, Operand(mask));
5102 Branch(condition_met, cc, scratch, Operand(zero_reg));
5106 void MacroAssembler::JumpIfBlack(Register
object,
5110 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
5111 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5115 void MacroAssembler::HasColor(Register
object,
5116 Register bitmap_scratch,
5117 Register mask_scratch,
5124 GetMarkBits(
object, bitmap_scratch, mask_scratch);
5126 Label other_color, word_boundary;
5127 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5128 And(t8, t9, Operand(mask_scratch));
5129 Branch(&other_color, first_bit == 1 ?
eq :
ne, t8, Operand(zero_reg));
5131 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5132 Branch(&word_boundary,
eq, mask_scratch, Operand(zero_reg));
5133 And(t8, t9, Operand(mask_scratch));
5134 Branch(has_color, second_bit == 1 ? ne :
eq, t8, Operand(zero_reg));
5137 bind(&word_boundary);
5138 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5139 And(t9, t9, Operand(1));
5140 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5148 void MacroAssembler::JumpIfDataObject(Register value,
5150 Label* not_data_object) {
5152 Label is_data_object;
5154 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5155 Branch(&is_data_object, eq, t8, Operand(scratch));
5162 Branch(not_data_object, ne, t8, Operand(zero_reg));
5163 bind(&is_data_object);
5167 void MacroAssembler::GetMarkBits(Register addr_reg,
5168 Register bitmap_reg,
5169 Register mask_reg) {
5171 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5176 Addu(bitmap_reg, bitmap_reg, t8);
5178 sllv(mask_reg, t8, mask_reg);
5182 void MacroAssembler::EnsureNotWhite(
5184 Register bitmap_scratch,
5185 Register mask_scratch,
5186 Register load_scratch,
5187 Label* value_is_white_and_not_data) {
5189 GetMarkBits(value, bitmap_scratch, mask_scratch);
5192 ASSERT(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
5193 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5194 ASSERT(strcmp(Marking::kGreyBitPattern,
"11") == 0);
5195 ASSERT(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
5201 lw(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5202 And(t8, mask_scratch, load_scratch);
5203 Branch(&done, ne, t8, Operand(zero_reg));
5205 if (emit_debug_code()) {
5209 sll(t8, mask_scratch, 1);
5210 And(t8, load_scratch, t8);
5211 Branch(&ok, eq, t8, Operand(zero_reg));
5212 stop(
"Impossible marking bit pattern");
5218 Register map = load_scratch;
5219 Register length = load_scratch;
5220 Label is_data_object;
5224 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5227 Branch(&skip, ne, t8, Operand(map));
5228 li(length, HeapNumber::kSize);
5229 Branch(&is_data_object);
5238 Register instance_type = load_scratch;
5241 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5252 Branch(&skip, eq, t8, Operand(zero_reg));
5253 li(length, ExternalString::kSize);
5254 Branch(&is_data_object);
5268 Branch(&skip, eq, t8, Operand(zero_reg));
5275 bind(&is_data_object);
5278 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5279 Or(t8, t8, Operand(mask_scratch));
5280 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5282 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5283 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5284 Addu(t8, t8, Operand(length));
5285 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5291 void MacroAssembler::LoadInstanceDescriptors(Register map,
5292 Register descriptors) {
5296 JumpIfNotSmi(descriptors, ¬_smi);
5297 LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
5302 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5305 Register empty_fixed_array_value = t2;
5306 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5307 Register empty_descriptor_array_value = t3;
5308 LoadRoot(empty_descriptor_array_value,
5309 Heap::kEmptyDescriptorArrayRootIndex);
5316 Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5322 lw(a3,
FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
5323 JumpIfSmi(a3, call_runtime);
5328 lw(a3,
FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
5329 JumpIfSmi(a3, call_runtime);
5332 Label check_prototype;
5333 Branch(&check_prototype, eq, a1, Operand(a0));
5334 lw(a3,
FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
5335 Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
5338 bind(&check_prototype);
5340 Branch(&next, ne, a1, Operand(null_value));
5344 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5345 ASSERT(!output_reg.is(input_reg));
5347 li(output_reg, Operand(255));
5349 Branch(&done,
gt, input_reg, Operand(output_reg));
5352 mov(output_reg, zero_reg);
5353 mov(output_reg, input_reg);
5358 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5365 Move(temp_double_reg, 0.0);
5366 BranchF(&above_zero,
NULL,
gt, input_reg, temp_double_reg);
5369 mov(result_reg, zero_reg);
5374 Move(temp_double_reg, 255.0);
5375 BranchF(&in_bounds,
NULL,
le, input_reg, temp_double_reg);
5376 li(result_reg, Operand(255));
5381 round_w_d(temp_double_reg, input_reg);
5382 mfc1(result_reg, temp_double_reg);
5387 bool AreAliased(Register r1, Register r2, Register
r3, Register
r4) {
5388 if (r1.is(r2))
return true;
5389 if (r1.is(r3))
return true;
5390 if (r1.is(r4))
return true;
5391 if (r2.is(r3))
return true;
5392 if (r2.is(r4))
return true;
5393 if (r3.is(r4))
return true;
5398 CodePatcher::CodePatcher(
byte* address,
int instructions)
5399 : address_(address),
5400 instructions_(instructions),
5401 size_(instructions * Assembler::kInstrSize),
5402 masm_(
NULL, address, size_ + Assembler::kGap) {
5406 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5410 CodePatcher::~CodePatcher() {
5412 CPU::FlushICache(address_, size_);
5415 ASSERT(masm_.pc_ == address_ + size_);
5416 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5420 void CodePatcher::Emit(
Instr instr) {
5421 masm()->emit(instr);
5425 void CodePatcher::Emit(
Address addr) {
5426 masm()->emit(reinterpret_cast<Instr>(addr));
5430 void CodePatcher::ChangeBranchCondition(
Condition cond) {
5431 Instr instr = Assembler::instr_at(masm_.pc_);
5432 ASSERT(Assembler::IsBranch(instr));
5433 uint32_t opcode = Assembler::GetOpcodeField(instr);
5454 #endif // V8_TARGET_ARCH_MIPS
const RegList kSafepointSavedRegisters
const intptr_t kSmiTagMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
const int kDoubleSizeLog2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 instructions(ARM only)") DEFINE_bool(enable_armv7
const int kNumSafepointSavedRegisters
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const RegList kJSCallerSaved
const int kPointerSizeLog2
const uint32_t kStringRepresentationMask
const uint32_t kFCSRUnderflowFlagMask
const intptr_t kObjectAlignmentMask
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
const uint32_t kNotStringTag
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kFCSROverflowFlagMask
const uint32_t kIsIndirectStringMask
const bool IsMipsSoftFloatABI
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
bool IsAligned(T value, U alignment)
const uint32_t kHoleNanLower32
int TenToThe(int exponent)
MacroAssembler(Isolate *isolate, void *buffer, int size)
#define UNIMPLEMENTED_MIPS()
const uint32_t kFCSRInvalidOpFlagMask
const uint32_t kIsNotStringMask
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
const FPUControlRegister FCSR
const int kSafepointRegisterStackIndexMap[kNumRegs]
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
const uint32_t kIsIndirectStringTag
CheckForInexactConversion
const uint32_t kFCSRFlagMask
#define STATIC_ASSERT(test)
const uint32_t kAsciiStringTag
Register ToRegister(int num)
int NumberOfBitsSet(uint32_t x)
const uint32_t kFCSRInexactFlagMask
const uint32_t kStringEncodingMask