30 #if defined(V8_TARGET_ARCH_MIPS)
41 #define __ ACCESS_MASM(masm)
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61 Register scratch1, Register scratch2,
62 Label* not_a_heap_number) {
64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65 __ Branch(not_a_heap_number,
ne, scratch1, Operand(scratch2));
71 Label check_heap_number, call_builtin;
72 __ JumpIfNotSmi(a0, &check_heap_number);
76 __ bind(&check_heap_number);
77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
81 __ bind(&call_builtin);
90 Counters* counters = masm->isolate()->counters();
105 __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
120 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
121 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
132 Label check_optimized;
133 Label install_unoptimized;
134 if (FLAG_cache_optimized_code) {
138 __ Branch(&check_optimized,
ne, at, Operand(zero_reg));
140 __ bind(&install_unoptimized);
141 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
150 __ bind(&check_optimized);
152 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
157 Label install_optimized;
161 __ Branch(&install_optimized,
eq, a2, Operand(t1));
169 __ Branch(&install_unoptimized,
eq, t0,
171 __ Subu(t0, t0, Operand(
175 __ Addu(t1, t1, Operand(at));
177 __ Branch(&loop,
ne, a2, Operand(t1));
181 __ Addu(t1, t1, Operand(at));
185 __ bind(&install_optimized);
186 __ IncrementCounter(counters->fast_new_closure_install_optimized(),
205 __ RecordWriteContextSlot(
218 __ LoadRoot(t0, Heap::kFalseValueRootIndex);
220 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
241 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
255 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
256 for (
int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
266 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
289 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
298 Label after_sentinel;
299 __ JumpIfNotSmi(a3, &after_sentinel);
300 if (FLAG_debug_code) {
301 const char*
message =
"Expected 0 as a Smi sentinel";
302 __ Assert(
eq, message, a3, Operand(zero_reg));
307 __ bind(&after_sentinel);
317 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
318 for (
int i = 0; i < slots_; i++) {
328 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
332 static void GenerateFastCloneShallowArrayCommon(
333 MacroAssembler* masm,
342 int elements_size = 0;
346 : FixedArray::SizeFor(length);
352 __ AllocateInNewSpace(size,
371 __ Addu(a2, v0, Operand(JSArray::kSize));
396 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
397 __ Branch(&slow_case,
eq, a3, Operand(t1));
401 Label double_elements, check_fast_elements;
404 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
405 __ Branch(&check_fast_elements,
ne, v0, Operand(t1));
406 GenerateFastCloneShallowArrayCommon(masm, 0,
411 __ bind(&check_fast_elements);
412 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
413 __ Branch(&double_elements,
ne, v0, Operand(t1));
414 GenerateFastCloneShallowArrayCommon(masm, length_,
419 __ bind(&double_elements);
424 if (FLAG_debug_code) {
428 message =
"Expected (writable) fixed array";
429 expected_map_index = Heap::kFixedArrayMapRootIndex;
431 message =
"Expected (writable) fixed double array";
432 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
435 message =
"Expected copy-on-write fixed array";
436 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
441 __ LoadRoot(at, expected_map_index);
442 __ Assert(
eq, message, a3, Operand(at));
446 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
452 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
473 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
474 __ Branch(&slow_case,
eq, a3, Operand(t0));
485 __ AllocateInNewSpace(size, v0, a1, a2, &slow_case,
TAG_OBJECT);
495 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
504 class ConvertToDoubleStub :
public CodeStub {
506 ConvertToDoubleStub(Register result_reg_1,
507 Register result_reg_2,
509 Register scratch_reg)
510 : result1_(result_reg_1),
511 result2_(result_reg_2),
513 zeros_(scratch_reg) { }
522 class ModeBits:
public BitField<OverwriteMode, 0, 2> {};
523 class OpBits:
public BitField<Token::Value, 2, 14> {};
525 Major MajorKey() {
return ConvertToDouble; }
528 return result1_.code() +
529 (result2_.code() << 4) +
530 (source_.code() << 8) +
531 (zeros_.code() << 12);
534 void Generate(MacroAssembler* masm);
538 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
539 #ifndef BIG_ENDIAN_FLOATING_POINT
540 Register exponent = result1_;
541 Register mantissa = result2_;
543 Register exponent = result2_;
544 Register mantissa = result1_;
555 __ subu(at, zero_reg, source_);
556 __ Movn(source_, at, exponent);
561 __ Branch(¬_special,
gt, source_, Operand(1));
564 const uint32_t exponent_word_for_1 =
567 __ Or(at, exponent, Operand(exponent_word_for_1));
568 __ Movn(exponent, at, source_);
571 __ mov(mantissa, zero_reg);
573 __ bind(¬_special);
576 __ Clz(zeros_, source_);
580 __ subu(mantissa, mantissa, zeros_);
581 __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
582 __ Or(exponent, exponent, mantissa);
585 __ Addu(zeros_, zeros_, Operand(1));
587 __ sllv(source_, source_, zeros_);
594 __ or_(exponent, exponent, source_);
603 CpuFeatures::Scope scope(
FPU);
605 __ mtc1(scratch1,
f14);
608 __ mtc1(scratch1,
f12);
611 __ Move(a2, a3,
f14);
612 __ Move(a0, a1,
f12);
617 __ mov(scratch1, a0);
618 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
620 __ Call(stub1.GetCode());
622 __ mov(scratch1, a1);
623 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
624 __ Call(stub2.GetCode());
631 MacroAssembler* masm,
633 Register heap_number_map,
639 LoadNumber(masm, destination,
640 a0,
f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
643 LoadNumber(masm, destination,
644 a1,
f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
649 Destination destination,
654 Register heap_number_map,
658 __ AssertRootValue(heap_number_map,
659 Heap::kHeapNumberMapRootIndex,
660 "HeapNumberMap register clobbered.");
665 __ UntagAndJumpIfSmi(scratch1,
object, &is_smi);
667 __ JumpIfNotHeapNumber(
object, heap_number_map, scratch1, not_number);
672 CpuFeatures::Scope scope(
FPU);
691 CpuFeatures::Scope scope(
FPU);
693 __ mtc1(scratch1, dst);
694 __ cvt_d_w(dst, dst);
697 __ Move(dst1, dst2, dst);
702 __ mov(scratch1,
object);
703 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
705 __ Call(stub.GetCode());
716 Register heap_number_map,
720 FPURegister double_scratch,
722 __ AssertRootValue(heap_number_map,
723 Heap::kHeapNumberMapRootIndex,
724 "HeapNumberMap register clobbered.");
726 Label not_in_int32_range;
728 __ UntagAndJumpIfSmi(dst,
object, &done);
730 __ Branch(not_number,
ne, scratch1, Operand(heap_number_map));
731 __ ConvertToInt32(
object,
736 ¬_in_int32_range);
739 __ bind(¬_in_int32_range);
743 __ EmitOutOfInt32RangeTruncate(dst,
753 Register int_scratch,
754 Destination destination,
755 FPURegister double_dst,
759 FPURegister single_scratch) {
760 ASSERT(!int_scratch.is(scratch2));
761 ASSERT(!int_scratch.is(dst1));
762 ASSERT(!int_scratch.is(dst2));
767 CpuFeatures::Scope scope(
FPU);
768 __ mtc1(int_scratch, single_scratch);
769 __ cvt_d_w(double_dst, single_scratch);
771 __ Move(dst1, dst2, double_dst);
774 Label fewer_than_20_useful_bits;
780 __ mov(dst2, int_scratch);
781 __ mov(dst1, int_scratch);
782 __ Branch(&done,
eq, int_scratch, Operand(zero_reg));
788 __ Branch(&skip_sub,
ge, dst2, Operand(zero_reg));
789 __ Subu(int_scratch, zero_reg, int_scratch);
795 __ Clz(dst1, int_scratch);
797 __ Subu(dst1, scratch2, dst1);
801 __ Ins(dst2, scratch2,
805 __ li(scratch2, Operand(1));
806 __ sllv(scratch2, scratch2, dst1);
808 __ Xor(scratch2, scratch2, at);
809 __ And(int_scratch, int_scratch, scratch2);
813 __ Branch(&fewer_than_20_useful_bits,
lt, scratch2, Operand(zero_reg));
815 __ srlv(at, int_scratch, scratch2);
816 __ or_(dst2, dst2, at);
818 __ subu(scratch2, at, scratch2);
819 __ sllv(dst1, int_scratch, scratch2);
822 __ bind(&fewer_than_20_useful_bits);
824 __ subu(scratch2, at, dst1);
825 __ sllv(scratch2, int_scratch, scratch2);
826 __ Or(dst2, dst2, scratch2);
828 __ mov(dst1, zero_reg);
836 Destination destination,
840 Register heap_number_map,
843 FPURegister single_scratch,
845 ASSERT(!scratch1.is(
object) && !scratch2.is(
object));
846 ASSERT(!scratch1.is(scratch2));
847 ASSERT(!heap_number_map.is(
object) &&
848 !heap_number_map.is(scratch1) &&
849 !heap_number_map.is(scratch2));
851 Label done, obj_is_not_smi;
853 __ JumpIfNotSmi(
object, &obj_is_not_smi);
854 __ SmiUntag(scratch1,
object);
856 scratch2, single_scratch);
859 __ bind(&obj_is_not_smi);
860 __ AssertRootValue(heap_number_map,
861 Heap::kHeapNumberMapRootIndex,
862 "HeapNumberMap register clobbered.");
863 __ JumpIfNotHeapNumber(
object, heap_number_map, scratch1, not_int32);
867 CpuFeatures::Scope scope(
FPU);
871 Register except_flag = scratch2;
880 __ Branch(not_int32,
ne, except_flag, Operand(zero_reg));
883 __ Move(dst1, dst2, double_dst);
887 ASSERT(!scratch1.is(
object) && !scratch2.is(
object));
894 __ Or(scratch1, scratch1, Operand(dst2));
895 __ Branch(&done,
eq, scratch1, Operand(zero_reg));
913 Register heap_number_map,
920 ASSERT(!scratch1.is(
object) && !scratch2.is(
object) && !scratch3.is(
object));
921 ASSERT(!scratch1.is(scratch2) &&
922 !scratch1.is(scratch3) &&
923 !scratch2.is(scratch3));
927 __ UntagAndJumpIfSmi(dst,
object, &done);
929 __ AssertRootValue(heap_number_map,
930 Heap::kHeapNumberMapRootIndex,
931 "HeapNumberMap register clobbered.");
932 __ JumpIfNotHeapNumber(
object, heap_number_map, scratch1, not_int32);
937 CpuFeatures::Scope scope(
FPU);
941 FPURegister single_scratch = double_scratch.low();
942 Register except_flag = scratch2;
951 __ Branch(not_int32,
ne, except_flag, Operand(zero_reg));
953 __ mfc1(dst, single_scratch);
962 __ Or(dst, scratch2, Operand(dst));
963 __ Branch(&done,
eq, dst, Operand(zero_reg));
972 __ srlv(dst, dst, scratch3);
975 __ subu(scratch3, at, scratch3);
976 __ sllv(scratch2, scratch2, scratch3);
977 __ Or(dst, dst, scratch2);
982 __ Branch(&skip_sub,
ge, scratch1, Operand(zero_reg));
983 __ Subu(dst, zero_reg, dst);
1000 HeapNumber::kExponentShift,
1012 __ Branch(not_int32,
lt, scratch, Operand(zero_reg));
1019 __ srl(at, src1, 31);
1020 __ subu(tmp, scratch, at);
1021 __ Branch(not_int32,
gt, tmp, Operand(30));
1023 __ And(tmp, src2, 0x3fffff);
1024 __ Branch(not_int32,
ne, tmp, Operand(zero_reg));
1038 __ or_(dst, dst, at);
1042 __ subu(scratch, at, scratch);
1044 __ sllv(src1, src2, scratch);
1045 __ Subu(src1, src1, Operand(1));
1046 __ And(src1, dst, src1);
1047 __ Branch(not_int32,
ne, src1, Operand(zero_reg));
1052 MacroAssembler* masm,
1054 Register heap_number_result,
1068 __ PrepareCallCFunction(4, scratch);
1070 CpuFeatures::Scope scope(
FPU);
1076 __ Move(
f12, a0, a1);
1077 __ Move(
f14, a2, a3);
1080 AllowExternalCallThatCantCauseGC scope(masm);
1082 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1086 CpuFeatures::Scope scope(
FPU);
1097 __ mov(v0, heap_number_result);
1103 if (the_int_.
is(a1) &&
1104 the_heap_number_.
is(v0) &&
1109 if (the_int_.
is(a2) &&
1110 the_heap_number_.
is(v0) &&
1125 stub1.GetCode()->set_is_pregenerated(
true);
1126 stub2.GetCode()->set_is_pregenerated(
true);
1131 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1132 Label max_negative_int;
1137 __ And(sign_, the_int_, Operand(0x80000000u));
1138 __ Branch(&max_negative_int,
eq, the_int_, Operand(0x80000000u));
1142 uint32_t non_smi_exponent =
1144 __ li(scratch_, Operand(non_smi_exponent));
1146 __ or_(scratch_, scratch_, sign_);
1148 __ subu(at, zero_reg, the_int_);
1149 __ Movn(the_int_, at, sign_);
1154 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1156 __ srl(at, the_int_, shift_distance);
1157 __ or_(scratch_, scratch_, at);
1160 __ sll(scratch_, the_int_, 32 - shift_distance);
1165 __ bind(&max_negative_int);
1174 __ mov(scratch_, zero_reg);
1184 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1187 bool never_nan_nan) {
1188 Label not_identical;
1189 Label heap_number, return_equal;
1190 Register exp_mask_reg = t5;
1192 __ Branch(¬_identical,
ne, a0, Operand(a1));
1196 if (cc !=
eq || !never_nan_nan) {
1204 __ GetObjectType(a0, t4, t4);
1207 __ GetObjectType(a0, t4, t4);
1217 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1218 __ Branch(&return_equal,
ne, a0, Operand(t2));
1224 __ li(v0, Operand(
LESS));
1232 __ bind(&return_equal);
1237 __ li(v0, Operand(
LESS));
1239 __ mov(v0, zero_reg);
1243 if (cc !=
eq || !never_nan_nan) {
1247 if (cc !=
lt && cc !=
gt) {
1248 __ bind(&heap_number);
1257 __ And(t3, t2, Operand(exp_mask_reg));
1259 __ Branch(&return_equal,
ne, t3, Operand(exp_mask_reg));
1265 __ Or(v0, t3, Operand(t2));
1272 __ Ret(
eq, v0, Operand(zero_reg));
1276 __ li(v0, Operand(
LESS));
1284 __ bind(¬_identical);
1288 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1291 Label* both_loaded_as_doubles,
1294 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1295 (lhs.is(a1) && rhs.is(a0)));
1298 __ JumpIfSmi(lhs, &lhs_is_smi);
1301 __ GetObjectType(lhs, t4, t4);
1316 CpuFeatures::Scope scope(
FPU);
1328 ConvertToDoubleStub stub1(a1, a0, t6, t5);
1330 __ Call(stub1.GetCode());
1336 __ jmp(both_loaded_as_doubles);
1338 __ bind(&lhs_is_smi);
1340 __ GetObjectType(rhs, t4, t4);
1345 __ li(v0, Operand(1));
1355 CpuFeatures::Scope scope(
FPU);
1363 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1365 __ Call(stub2.GetCode());
1380 void EmitNanCheck(MacroAssembler* masm,
Condition cc) {
1383 CpuFeatures::Scope scope(
FPU);
1385 __ Move(t0, t1,
f14);
1386 __ Move(t2, t3,
f12);
1394 Register rhs_exponent = exp_first ? t0 : t1;
1395 Register lhs_exponent = exp_first ? t2 : t3;
1396 Register rhs_mantissa = exp_first ? t1 : t0;
1397 Register lhs_mantissa = exp_first ? t3 : t2;
1398 Label one_is_nan, neither_is_nan;
1399 Label lhs_not_nan_exp_mask_is_loaded;
1401 Register exp_mask_reg = t4;
1403 __ and_(t5, lhs_exponent, exp_mask_reg);
1404 __ Branch(&lhs_not_nan_exp_mask_is_loaded,
ne, t5, Operand(exp_mask_reg));
1407 __ Branch(&one_is_nan,
ne, t5, Operand(zero_reg));
1409 __ Branch(&one_is_nan,
ne, lhs_mantissa, Operand(zero_reg));
1412 __ bind(&lhs_not_nan_exp_mask_is_loaded);
1413 __ and_(t5, rhs_exponent, exp_mask_reg);
1415 __ Branch(&neither_is_nan,
ne, t5, Operand(exp_mask_reg));
1418 __ Branch(&one_is_nan,
ne, t5, Operand(zero_reg));
1420 __ Branch(&neither_is_nan,
eq, rhs_mantissa, Operand(zero_reg));
1422 __ bind(&one_is_nan);
1426 if (cc ==
lt || cc ==
le) {
1429 __ li(v0, Operand(
LESS));
1433 __ bind(&neither_is_nan);
1437 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
Condition cc) {
1444 Label return_result_not_equal, return_result_equal;
1450 CpuFeatures::Scope scope(
FPU);
1452 __ Move(t0, t1,
f14);
1453 __ Move(t2, t3,
f12);
1461 Register rhs_exponent = exp_first ? t0 : t1;
1462 Register lhs_exponent = exp_first ? t2 : t3;
1463 Register rhs_mantissa = exp_first ? t1 : t0;
1464 Register lhs_mantissa = exp_first ? t3 : t2;
1466 __ xor_(v0, rhs_mantissa, lhs_mantissa);
1467 __ Branch(&return_result_not_equal,
ne, v0, Operand(zero_reg));
1469 __ subu(v0, rhs_exponent, lhs_exponent);
1470 __ Branch(&return_result_equal,
eq, v0, Operand(zero_reg));
1474 __ or_(t4, rhs_exponent, lhs_exponent);
1475 __ or_(t4, t4, rhs_mantissa);
1477 __ Branch(&return_result_not_equal,
ne, t4, Operand(zero_reg));
1479 __ bind(&return_result_equal);
1485 __ bind(&return_result_not_equal);
1489 __ PrepareCallCFunction(0, 2, t4);
1496 __ Move(
f12, a0, a1);
1497 __ Move(
f14, a2, a3);
1500 AllowExternalCallThatCantCauseGC scope(masm);
1501 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1506 CpuFeatures::Scope scope(
FPU);
1507 Label
equal, less_than;
1520 __ bind(&less_than);
1521 __ li(v0, Operand(
LESS));
1527 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1534 Label first_non_object;
1537 __ GetObjectType(lhs, a2, a2);
1541 Label return_not_equal;
1542 __ bind(&return_not_equal);
1544 __ li(v0, Operand(1));
1546 __ bind(&first_non_object);
1550 __ GetObjectType(rhs, a3, a3);
1560 __ And(t2, a2, Operand(a3));
1562 __ Branch(&return_not_equal,
ne, t0, Operand(zero_reg));
1566 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1569 Label* both_loaded_as_doubles,
1570 Label* not_heap_numbers,
1572 __ GetObjectType(lhs, a3, a2);
1576 __ Branch(slow,
ne, a3, Operand(a2));
1581 CpuFeatures::Scope scope(
FPU);
1595 __ jmp(both_loaded_as_doubles);
1600 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1603 Label* possible_strings,
1604 Label* not_both_strings) {
1605 ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1606 (lhs.is(a1) && rhs.is(a0)));
1613 __ Branch(&object_test,
ne, at, Operand(zero_reg));
1615 __ Branch(possible_strings,
eq, at, Operand(zero_reg));
1616 __ GetObjectType(rhs, a3, a3);
1619 __ Branch(possible_strings,
eq, at, Operand(zero_reg));
1624 __ li(v0, Operand(1));
1626 __ bind(&object_test);
1628 __ GetObjectType(rhs, a2, a3);
1637 __ and_(a0, a2, a3);
1653 Register number_string_cache = result;
1654 Register mask = scratch3;
1657 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1664 __ Addu(mask, mask, -1);
1670 Isolate* isolate = masm->isolate();
1672 Label load_result_from_cache;
1673 if (!object_is_smi) {
1674 __ JumpIfSmi(
object, &is_smi);
1676 CpuFeatures::Scope scope(
FPU);
1679 Heap::kHeapNumberMapRootIndex,
1689 __ Xor(scratch1, scratch1, Operand(scratch2));
1690 __ And(scratch1, scratch1, Operand(mask));
1695 __ Addu(scratch1, number_string_cache, scratch1);
1697 Register probe = mask;
1700 __ JumpIfSmi(probe, not_found);
1704 __ Branch(not_found);
1709 __ Branch(not_found);
1714 Register scratch = scratch1;
1715 __ sra(scratch,
object, 1);
1716 __ And(scratch, mask, Operand(scratch));
1721 __ Addu(scratch, number_string_cache, scratch);
1724 Register probe = mask;
1726 __ Branch(not_found,
ne,
object, Operand(probe));
1729 __ bind(&load_result_from_cache);
1733 __ IncrementCounter(isolate->counters()->number_to_string_native(),
1740 void NumberToStringStub::Generate(MacroAssembler* masm) {
1751 __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1760 Label not_smis, both_loaded_as_doubles;
1763 if (include_smi_compare_) {
1764 Label not_two_smis, smi_done;
1766 __ JumpIfNotSmi(a2, ¬_two_smis);
1770 __ subu(v0, a1, a0);
1771 __ bind(¬_two_smis);
1772 }
else if (FLAG_debug_code) {
1775 __ Assert(
ne,
"CompareStub: unexpected smi operands.",
1776 a2, Operand(zero_reg));
1785 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1791 __ And(t2, lhs_, Operand(rhs_));
1792 __ JumpIfNotSmi(t2, ¬_smis, t0);
1801 EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1802 &both_loaded_as_doubles, &slow, strict_);
1804 __ bind(&both_loaded_as_doubles);
1809 Isolate* isolate = masm->isolate();
1811 CpuFeatures::Scope scope(
FPU);
1813 __ li(t0, Operand(
LESS));
1838 if (cc_ ==
lt || cc_ ==
le) {
1841 __ li(v0, Operand(
LESS));
1847 EmitNanCheck(masm, cc_);
1851 EmitTwoNonNanDoubleComparison(masm, cc_);
1860 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1863 Label check_for_symbols;
1864 Label flat_string_check;
1869 EmitCheckForTwoHeapNumbers(masm,
1872 &both_loaded_as_doubles,
1874 &flat_string_check);
1876 __ bind(&check_for_symbols);
1877 if (cc_ ==
eq && !strict_) {
1881 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1886 __ bind(&flat_string_check);
1888 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1890 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1912 __ Push(lhs_, rhs_);
1916 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1920 if (cc_ ==
lt || cc_ ==
le) {
1940 CpuFeatures::Scope scope(
FPU);
1943 const Register map = t5.is(tos_) ? t3 : t5;
1946 CheckOddball(masm,
UNDEFINED, Heap::kUndefinedValueRootIndex,
false);
1949 CheckOddball(masm,
BOOLEAN, Heap::kFalseValueRootIndex,
false);
1950 CheckOddball(masm,
BOOLEAN, Heap::kTrueValueRootIndex,
true);
1953 CheckOddball(masm,
NULL_TYPE, Heap::kNullValueRootIndex,
false);
1959 __ Ret(
eq, at, Operand(zero_reg));
1962 __ JumpIfSmi(tos_, &patch);
1972 __ Movn(tos_, zero_reg, at);
1973 __ Ret(
ne, at, Operand(zero_reg));
1996 Label not_heap_number;
1997 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1998 __ Branch(¬_heap_number,
ne, map, Operand(at));
1999 Label zero_or_nan, number;
2005 __ bind(&zero_or_nan);
2006 __ mov(tos_, zero_reg);
2009 __ bind(¬_heap_number);
2013 GenerateTypeTransition(masm);
2017 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
2023 __ LoadRoot(at, value);
2024 __ Subu(at, at, tos_);
2028 __ Movz(tos_, zero_reg, at);
2030 __ Ret(
eq, at, Operand(zero_reg));
2035 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
2039 __ Push(a3, a2, a1);
2042 __ TailCallExternalReference(
2043 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
2055 CpuFeatures::Scope scope(
FPU);
2058 const int argument_count = 1;
2059 const int fp_argument_count = 0;
2060 const Register scratch = a1;
2062 AllowExternalCallThatCantCauseGC scope(masm);
2063 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2064 __ li(a0, Operand(ExternalReference::isolate_address()));
2066 ExternalReference::store_buffer_overflow_function(masm->isolate()),
2069 CpuFeatures::Scope scope(
FPU);
2078 void UnaryOpStub::PrintName(StringStream* stream) {
2080 const char* overwrite_name =
NULL;
2085 stream->Add(
"UnaryOpStub_%s_%s_%s",
2093 void UnaryOpStub::Generate(MacroAssembler* masm) {
2094 switch (operand_type_) {
2096 GenerateTypeTransition(masm);
2099 GenerateSmiStub(masm);
2102 GenerateHeapNumberStub(masm);
2105 GenerateGenericStub(masm);
2111 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2116 __ Push(v0, a2, a1, a0);
2118 __ TailCallExternalReference(
2119 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2124 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2127 GenerateSmiStubSub(masm);
2129 case Token::BIT_NOT:
2130 GenerateSmiStubBitNot(masm);
2138 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2139 Label non_smi, slow;
2140 GenerateSmiCodeSub(masm, &non_smi, &slow);
2143 GenerateTypeTransition(masm);
2147 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2149 GenerateSmiCodeBitNot(masm, &non_smi);
2151 GenerateTypeTransition(masm);
2155 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2158 __ JumpIfNotSmi(a0, non_smi);
2161 __ And(t0, a0, ~0x80000000);
2162 __ Branch(slow,
eq, t0, Operand(zero_reg));
2166 __ subu(v0, zero_reg, a0);
2170 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2172 __ JumpIfNotSmi(a0, non_smi);
2182 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2185 GenerateHeapNumberStubSub(masm);
2187 case Token::BIT_NOT:
2188 GenerateHeapNumberStubBitNot(masm);
2196 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2197 Label non_smi, slow, call_builtin;
2198 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2200 GenerateHeapNumberCodeSub(masm, &slow);
2202 GenerateTypeTransition(masm);
2203 __ bind(&call_builtin);
2204 GenerateGenericCodeFallback(masm);
2208 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2209 Label non_smi, slow;
2210 GenerateSmiCodeBitNot(masm, &non_smi);
2212 GenerateHeapNumberCodeBitNot(masm, &slow);
2214 GenerateTypeTransition(masm);
2218 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2220 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2227 Label slow_allocate_heapnumber, heapnumber_allocated;
2228 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2229 __ jmp(&heapnumber_allocated);
2231 __ bind(&slow_allocate_heapnumber);
2235 __ CallRuntime(Runtime::kNumberAlloc, 0);
2240 __ bind(&heapnumber_allocated);
2252 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2253 MacroAssembler* masm,
2257 EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2259 __ ConvertToInt32(a0, a1, a2, a3,
f0, slow);
2264 __ Addu(a2, a1, Operand(0x40000000));
2265 __ Branch(&try_float,
lt, a2, Operand(zero_reg));
2272 __ bind(&try_float);
2274 Label slow_allocate_heapnumber, heapnumber_allocated;
2276 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2277 __ jmp(&heapnumber_allocated);
2279 __ bind(&slow_allocate_heapnumber);
2283 __ CallRuntime(Runtime::kNumberAlloc, 0);
2292 __ ConvertToInt32(v0, a1, a3, t0,
f0, &impossible);
2296 __ bind(&heapnumber_allocated);
2302 CpuFeatures::Scope scope(
FPU);
2310 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2311 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2314 __ bind(&impossible);
2315 if (FLAG_debug_code) {
2316 __ stop(
"Incorrect assumption in bit-not stub");
2322 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2325 GenerateGenericStubSub(masm);
2327 case Token::BIT_NOT:
2328 GenerateGenericStubBitNot(masm);
2336 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2337 Label non_smi, slow;
2338 GenerateSmiCodeSub(masm, &non_smi, &slow);
2340 GenerateHeapNumberCodeSub(masm, &slow);
2342 GenerateGenericCodeFallback(masm);
2346 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2347 Label non_smi, slow;
2348 GenerateSmiCodeBitNot(masm, &non_smi);
2350 GenerateHeapNumberCodeBitNot(masm, &slow);
2352 GenerateGenericCodeFallback(masm);
2356 void UnaryOpStub::GenerateGenericCodeFallback(
2357 MacroAssembler* masm) {
2364 case Token::BIT_NOT:
2373 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2381 __ Push(a2, a1, a0);
2383 __ TailCallExternalReference(
2384 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2391 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2392 MacroAssembler* masm) {
2397 void BinaryOpStub::Generate(MacroAssembler* masm) {
2400 AllowStubCallsScope allow_stub_calls(masm,
true);
2401 switch (operands_type_) {
2403 GenerateTypeTransition(masm);
2406 GenerateSmiStub(masm);
2409 GenerateInt32Stub(masm);
2412 GenerateHeapNumberStub(masm);
2415 GenerateOddballStub(masm);
2418 GenerateBothStringStub(masm);
2421 GenerateStringStub(masm);
2424 GenerateGeneric(masm);
2432 void BinaryOpStub::PrintName(StringStream* stream) {
2434 const char* overwrite_name;
2439 default: overwrite_name =
"UnknownOverwrite";
break;
2441 stream->Add(
"BinaryOpStub_%s_%s_%s",
2449 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2451 Register right = a0;
2453 Register scratch1 = t0;
2454 Register scratch2 = t1;
2459 Label not_smi_result;
2462 __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2463 __ RetOnNoOverflow(scratch1);
2467 __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2468 __ RetOnNoOverflow(scratch1);
2474 __ SmiUntag(scratch1, right);
2478 __ Mult(left, scratch1);
2483 __ sra(scratch1, scratch1, 31);
2484 __ Branch(¬_smi_result,
ne, scratch1, Operand(scratch2));
2487 __ Ret(
ne, v0, Operand(zero_reg));
2490 __ Addu(scratch2, right, left);
2494 __ Branch(&skip,
lt, scratch2, Operand(zero_reg));
2497 __ mov(v0, zero_reg);
2505 __ SmiUntag(scratch2, right);
2506 __ SmiUntag(scratch1, left);
2507 __ Div(scratch1, scratch2);
2510 __ Branch(¬_smi_result,
eq, scratch2, Operand(zero_reg));
2516 __ Branch(¬_smi_result,
ne, scratch1, Operand(zero_reg));
2518 __ Branch(&done,
ne, scratch1, Operand(zero_reg));
2519 __ Branch(¬_smi_result,
lt, scratch2, Operand(zero_reg));
2522 __ Addu(scratch2, scratch1, Operand(0x40000000));
2523 __ Branch(¬_smi_result,
lt, scratch2, Operand(zero_reg));
2524 __ SmiTag(v0, scratch1);
2530 __ SmiUntag(scratch2, right);
2531 __ SmiUntag(scratch1, left);
2532 __ Div(scratch1, scratch2);
2536 __ Branch(¬_smi_result,
eq, scratch2, Operand(zero_reg));
2541 __ Branch(&done,
ne, scratch2, Operand(zero_reg));
2542 __ Branch(¬_smi_result,
lt, scratch1, Operand(zero_reg));
2545 __ Addu(scratch1, scratch2, Operand(0x40000000));
2546 __ Branch(¬_smi_result,
lt, scratch1, Operand(zero_reg));
2547 __ SmiTag(v0, scratch2);
2553 __ or_(v0, left, right);
2555 case Token::BIT_AND:
2557 __ and_(v0, left, right);
2559 case Token::BIT_XOR:
2561 __ xor_(v0, left, right);
2565 __ GetLeastBitsFromSmi(scratch1, right, 5);
2566 __ srav(scratch1, left, scratch1);
2574 __ SmiUntag(scratch1, left);
2575 __ GetLeastBitsFromSmi(scratch2, right, 5);
2576 __ srlv(v0, scratch1, scratch2);
2579 __ And(scratch1, v0, Operand(0xc0000000));
2580 __ Branch(¬_smi_result,
ne, scratch1, Operand(zero_reg));
2587 __ SmiUntag(scratch1, left);
2588 __ GetLeastBitsFromSmi(scratch2, right, 5);
2589 __ sllv(scratch1, scratch1, scratch2);
2591 __ Addu(scratch2, scratch1, Operand(0x40000000));
2592 __ Branch(¬_smi_result,
lt, scratch2, Operand(zero_reg));
2593 __ SmiTag(v0, scratch1);
2599 __ bind(¬_smi_result);
2603 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2606 Label* gc_required) {
2608 Register right = a0;
2609 Register scratch1 = t3;
2610 Register scratch2 = t5;
2611 Register scratch3 = t0;
2613 ASSERT(smi_operands || (not_numbers !=
NULL));
2616 __ AssertSmi(right);
2619 Register heap_number_map = t2;
2620 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2637 Register result =
s0;
2638 GenerateHeapResultAllocation(
2639 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2658 CpuFeatures::Scope scope(
FPU);
2688 if (FLAG_debug_code) {
2689 __ stop(
"Unreachable code.");
2695 case Token::BIT_XOR:
2696 case Token::BIT_AND:
2701 __ SmiUntag(a3, left);
2702 __ SmiUntag(a2, right);
2724 Label result_not_a_smi;
2727 __ Or(a2, a3, Operand(a2));
2729 case Token::BIT_XOR:
2730 __ Xor(a2, a3, Operand(a2));
2732 case Token::BIT_AND:
2733 __ And(a2, a3, Operand(a2));
2737 __ GetLeastBitsFromInt32(a2, a2, 5);
2738 __ srav(a2, a3, a2);
2742 __ GetLeastBitsFromInt32(a2, a2, 5);
2743 __ srlv(a2, a3, a2);
2749 __ Branch(&result_not_a_smi,
lt, a2, Operand(zero_reg));
2751 __ Branch(not_numbers,
lt, a2, Operand(zero_reg));
2756 __ GetLeastBitsFromInt32(a2, a2, 5);
2757 __ sllv(a2, a3, a2);
2763 __ Addu(a3, a2, Operand(0x40000000));
2764 __ Branch(&result_not_a_smi,
lt, a3, Operand(zero_reg));
2769 __ bind(&result_not_a_smi);
2770 Register result = t1;
2772 __ AllocateHeapNumber(
2773 result, scratch1, scratch2, heap_number_map, gc_required);
2775 GenerateHeapResultAllocation(
2776 masm, result, heap_number_map, scratch1, scratch2, gc_required);
2789 CpuFeatures::Scope scope(
FPU);
2791 if (op_ == Token::SHR) {
2804 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2805 __ TailCallStub(&stub);
2819 void BinaryOpStub::GenerateSmiCode(
2820 MacroAssembler* masm,
2823 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2827 Register right = a0;
2828 Register scratch1 = t3;
2831 __ Or(scratch1, left, Operand(right));
2833 __ JumpIfNotSmi(scratch1, ¬_smis);
2836 GenerateSmiSmiOperation(masm);
2840 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2841 GenerateFPOperation(masm,
true, use_runtime, gc_required);
2847 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2848 Label not_smis, call_runtime;
2853 GenerateSmiCode(masm, &call_runtime,
NULL, NO_HEAPNUMBER_RESULTS);
2857 GenerateSmiCode(masm,
2860 ALLOW_HEAPNUMBER_RESULTS);
2865 GenerateTypeTransition(masm);
2867 __ bind(&call_runtime);
2868 GenerateCallRuntime(masm);
2872 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2876 GenerateAddStrings(masm);
2877 GenerateTypeTransition(masm);
2881 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2890 Register right = a0;
2893 __ JumpIfSmi(left, &call_runtime);
2894 __ GetObjectType(left, a2, a2);
2898 __ JumpIfSmi(right, &call_runtime);
2899 __ GetObjectType(right, a2, a2);
2903 GenerateRegisterArgsPush(masm);
2904 __ TailCallStub(&string_add_stub);
2906 __ bind(&call_runtime);
2907 GenerateTypeTransition(masm);
2911 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2915 Register right = a0;
2916 Register scratch1 = t3;
2917 Register scratch2 = t5;
2918 FPURegister double_scratch =
f0;
2919 FPURegister single_scratch =
f6;
2921 Register heap_number_result =
no_reg;
2922 Register heap_number_map = t2;
2923 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2933 __ Or(scratch1, left, right);
2934 __ JumpIfNotSmi(scratch1, &skip);
2935 GenerateSmiSmiOperation(masm);
2977 CpuFeatures::Scope scope(
FPU);
2978 Label return_heap_number;
3002 Register except_flag = scratch2;
3011 __ Branch(&transition,
ne, except_flag, Operand(zero_reg));
3015 __ mfc1(scratch1, single_scratch);
3016 __ Addu(scratch2, scratch1, Operand(0x40000000));
3018 __ Branch(&return_heap_number,
lt, scratch2, Operand(zero_reg));
3021 __ Branch(¬_zero,
ne, scratch1, Operand(zero_reg));
3022 __ mfc1(scratch2,
f11);
3024 __ Branch(&return_heap_number,
ne, scratch2, Operand(zero_reg));
3028 __ SmiTag(v0, scratch1);
3034 __ bind(&return_heap_number);
3040 heap_number_result =
s0;
3041 GenerateHeapResultAllocation(masm,
3047 __ mov(v0, heap_number_result);
3060 Label pop_and_call_runtime;
3063 heap_number_result =
s0;
3064 GenerateHeapResultAllocation(masm,
3069 &pop_and_call_runtime);
3076 masm, op_, heap_number_result, scratch1);
3077 if (FLAG_debug_code) {
3078 __ stop(
"Unreachable code.");
3081 __ bind(&pop_and_call_runtime);
3083 __ Branch(&call_runtime);
3090 case Token::BIT_XOR:
3091 case Token::BIT_AND:
3095 Label return_heap_number;
3096 Register scratch3 = t1;
3123 __ Or(a2, a3, Operand(a2));
3125 case Token::BIT_XOR:
3126 __ Xor(a2, a3, Operand(a2));
3128 case Token::BIT_AND:
3129 __ And(a2, a3, Operand(a2));
3132 __ And(a2, a2, Operand(0x1f));
3133 __ srav(a2, a3, a2);
3136 __ And(a2, a2, Operand(0x1f));
3137 __ srlv(a2, a3, a2);
3145 __ Branch((result_type_ <= BinaryOpIC::INT32)
3147 : &return_heap_number,
3152 __ Branch((result_type_ <= BinaryOpIC::INT32)
3161 __ And(a2, a2, Operand(0x1f));
3162 __ sllv(a2, a3, a2);
3169 __ Addu(scratch1, a2, Operand(0x40000000));
3171 __ Branch(&return_heap_number,
lt, scratch1, Operand(zero_reg));
3176 __ bind(&return_heap_number);
3177 heap_number_result = t1;
3178 GenerateHeapResultAllocation(masm,
3186 CpuFeatures::Scope scope(
FPU);
3188 if (op_ != Token::SHR) {
3190 __ mtc1(a2, double_scratch);
3191 __ cvt_d_w(double_scratch, double_scratch);
3194 __ mtc1(a2, double_scratch);
3195 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3199 __ mov(v0, heap_number_result);
3206 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3207 __ TailCallStub(&stub);
3220 if (transition.is_linked() ||
3221 ((op_ ==
Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3222 __ bind(&transition);
3223 GenerateTypeTransition(masm);
3226 __ bind(&call_runtime);
3227 GenerateCallRuntime(masm);
3231 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3237 GenerateAddStrings(masm);
3242 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3243 __ Branch(&check,
ne, a1, Operand(t0));
3247 __ LoadRoot(a1, Heap::kNanValueRootIndex);
3251 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3252 __ Branch(&done,
ne, a0, Operand(t0));
3256 __ LoadRoot(a0, Heap::kNanValueRootIndex);
3260 GenerateHeapNumberStub(masm);
3264 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3266 GenerateFPOperation(masm,
false, &call_runtime, &call_runtime);
3268 __ bind(&call_runtime);
3269 GenerateCallRuntime(masm);
3273 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3274 Label call_runtime, call_string_add_or_runtime;
3276 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3278 GenerateFPOperation(masm,
false, &call_string_add_or_runtime, &call_runtime);
3280 __ bind(&call_string_add_or_runtime);
3282 GenerateAddStrings(masm);
3285 __ bind(&call_runtime);
3286 GenerateCallRuntime(masm);
3290 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3292 Label left_not_string, call_runtime;
3295 Register right = a0;
3298 __ JumpIfSmi(left, &left_not_string);
3299 __ GetObjectType(left, a2, a2);
3303 GenerateRegisterArgsPush(masm);
3304 __ TailCallStub(&string_add_left_stub);
3307 __ bind(&left_not_string);
3308 __ JumpIfSmi(right, &call_runtime);
3309 __ GetObjectType(right, a2, a2);
3313 GenerateRegisterArgsPush(masm);
3314 __ TailCallStub(&string_add_right_stub);
3317 __ bind(&call_runtime);
3321 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3322 GenerateRegisterArgsPush(masm);
3342 case Token::BIT_AND:
3345 case Token::BIT_XOR:
3363 void BinaryOpStub::GenerateHeapResultAllocation(
3364 MacroAssembler* masm,
3366 Register heap_number_map,
3369 Label* gc_required) {
3373 ASSERT(!result.is(a0) && !result.is(a1));
3376 Label skip_allocation, allocated;
3377 Register overwritable_operand = mode_ ==
OVERWRITE_LEFT ? a1 : a0;
3380 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3382 __ AllocateHeapNumber(
3383 result, scratch1, scratch2, heap_number_map, gc_required);
3384 __ Branch(&allocated);
3385 __ bind(&skip_allocation);
3387 __ mov(result, overwritable_operand);
3388 __ bind(&allocated);
3391 __ AllocateHeapNumber(
3392 result, scratch1, scratch2, heap_number_map, gc_required);
3397 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3409 Label input_not_smi;
3412 Label invalid_cache;
3413 const Register scratch0 = t5;
3414 const Register scratch1 = t3;
3415 const Register cache_entry = a0;
3416 const bool tagged = (argument_type_ ==
TAGGED);
3419 CpuFeatures::Scope scope(
FPU);
3424 __ JumpIfNotSmi(a0, &input_not_smi);
3431 __ Move(a2, a3,
f4);
3434 __ bind(&input_not_smi);
3438 Heap::kHeapNumberMapRootIndex,
3447 __ Move(a2, a3,
f4);
3460 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3465 __ li(cache_entry, Operand(
3466 ExternalReference::transcendental_cache_array_address(
3469 __ lw(cache_entry,
MemOperand(cache_entry, type_ *
sizeof(
3470 Isolate::Current()->transcendental_cache()->caches_[0])));
3473 __ Branch(&invalid_cache,
eq, cache_entry, Operand(zero_reg));
3477 { TranscendentalCache::SubCache::Element test_elem[2];
3478 char* elem_start =
reinterpret_cast<char*
>(&test_elem[0]);
3479 char* elem2_start =
reinterpret_cast<char*
>(&test_elem[1]);
3480 char* elem_in0 =
reinterpret_cast<char*
>(&(test_elem[0].in[0]));
3481 char* elem_in1 =
reinterpret_cast<char*
>(&(test_elem[0].in[1]));
3482 char* elem_out =
reinterpret_cast<char*
>(&(test_elem[0].output));
3483 CHECK_EQ(12, elem2_start - elem_start);
3484 CHECK_EQ(0, elem_in0 - elem_start);
3492 __ Addu(a1, a1, t0);
3494 __ Addu(cache_entry, cache_entry, t0);
3500 __ Branch(&calculate,
ne, a2, Operand(t0));
3501 __ Branch(&calculate,
ne, a3, Operand(t1));
3503 Counters* counters = masm->isolate()->counters();
3504 __ IncrementCounter(
3505 counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3517 __ bind(&calculate);
3518 Counters* counters = masm->isolate()->counters();
3519 __ IncrementCounter(
3520 counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3522 __ bind(&invalid_cache);
3523 __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3529 CpuFeatures::Scope scope(
FPU);
3538 __ Push(a3, a2, cache_entry);
3539 GenerateCallCFunction(masm, scratch0);
3540 __ GetCFunctionDoubleResult(
f4);
3544 __ Pop(a3, a2, cache_entry);
3545 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3546 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3554 __ mov(v0, cache_entry);
3556 __ bind(&invalid_cache);
3559 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3560 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3565 __ CallRuntime(RuntimeFunction(), 1);
3570 __ bind(&skip_cache);
3573 GenerateCallCFunction(masm, scratch0);
3574 __ GetCFunctionDoubleResult(
f4);
3575 __ bind(&no_update);
3584 __ li(scratch0, Operand(4 * kPointerSize));
3586 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3593 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3596 __ PrepareCallCFunction(2, scratch);
3598 __ Move(a0, a1,
f4);
3602 AllowExternalCallThatCantCauseGC scope(masm);
3603 Isolate* isolate = masm->isolate();
3607 ExternalReference::math_sin_double_function(isolate),
3612 ExternalReference::math_cos_double_function(isolate),
3616 __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3621 ExternalReference::math_log_double_function(isolate),
3641 return Runtime::kAbort;
3647 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3652 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3657 CpuFeatures::Scope fpu_scope(
FPU);
3658 const Register base = a1;
3659 const Register exponent = a2;
3660 const Register heapnumbermap = t1;
3661 const Register heapnumber = v0;
3666 const FPURegister single_scratch =
f8;
3667 const Register scratch = t5;
3668 const Register scratch2 = t3;
3670 Label call_runtime, done, int_exponent;
3672 Label base_is_smi, unpack_exponent;
3679 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3681 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3683 __ Branch(&call_runtime,
ne, scratch, Operand(heapnumbermap));
3686 __ jmp(&unpack_exponent);
3688 __ bind(&base_is_smi);
3689 __ mtc1(scratch, single_scratch);
3690 __ cvt_d_w(double_base, single_scratch);
3691 __ bind(&unpack_exponent);
3693 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3696 __ Branch(&call_runtime,
ne, scratch, Operand(heapnumbermap));
3697 __ ldc1(double_exponent,
3699 }
else if (exponent_type_ ==
TAGGED) {
3701 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3703 __ ldc1(double_exponent,
3707 if (exponent_type_ !=
INTEGER) {
3708 Label int_exponent_convert;
3717 __ Branch(&int_exponent_convert,
eq, scratch2, Operand(zero_reg));
3723 Label not_plus_half;
3726 __ Move(double_scratch, 0.5);
3738 __ neg_d(double_result, double_scratch);
3742 __ sqrt_d(double_result, double_scratch);
3745 __ bind(¬_plus_half);
3746 __ Move(double_scratch, -0.5);
3762 __ Move(double_result, 1);
3763 __ sqrt_d(double_scratch, double_scratch);
3764 __ div_d(double_result, double_result, double_scratch);
3770 AllowExternalCallThatCantCauseGC scope(masm);
3771 __ PrepareCallCFunction(0, 2, scratch);
3772 __ SetCallCDoubleArguments(double_base, double_exponent);
3774 ExternalReference::power_double_double_function(masm->isolate()),
3778 __ GetCFunctionDoubleResult(double_result);
3781 __ bind(&int_exponent_convert);
3782 __ mfc1(scratch, single_scratch);
3786 __ bind(&int_exponent);
3789 if (exponent_type_ ==
INTEGER) {
3790 __ mov(scratch, exponent);
3793 __ mov(exponent, scratch);
3796 __ mov_d(double_scratch, double_base);
3797 __ Move(double_result, 1.0);
3800 Label positive_exponent;
3801 __ Branch(&positive_exponent,
ge, scratch, Operand(zero_reg));
3802 __ Subu(scratch, zero_reg, scratch);
3803 __ bind(&positive_exponent);
3805 Label while_true, no_carry, loop_end;
3806 __ bind(&while_true);
3808 __ And(scratch2, scratch, 1);
3810 __ Branch(&no_carry,
eq, scratch2, Operand(zero_reg));
3811 __ mul_d(double_result, double_result, double_scratch);
3814 __ sra(scratch, scratch, 1);
3816 __ Branch(&loop_end,
eq, scratch, Operand(zero_reg));
3817 __ mul_d(double_scratch, double_scratch, double_scratch);
3819 __ Branch(&while_true);
3823 __ Branch(&done,
ge, exponent, Operand(zero_reg));
3824 __ Move(double_scratch, 1.0);
3825 __ div_d(double_result, double_scratch, double_result);
3832 __ mtc1(exponent, single_scratch);
3833 __ cvt_d_w(double_exponent, single_scratch);
3836 Counters* counters = masm->isolate()->counters();
3839 __ bind(&call_runtime);
3840 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3845 __ AllocateHeapNumber(
3846 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3847 __ sdc1(double_result,
3849 ASSERT(heapnumber.is(v0));
3850 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3855 AllowExternalCallThatCantCauseGC scope(masm);
3856 __ PrepareCallCFunction(0, 2, scratch);
3857 __ SetCallCDoubleArguments(double_base, double_exponent);
3859 ExternalReference::power_double_double_function(masm->isolate()),
3863 __ GetCFunctionDoubleResult(double_result);
3866 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3872 bool CEntryStub::NeedsImmovableCode() {
3878 return (!save_doubles_ ||
ISOLATE->fp_stubs_generated()) &&
3883 void CodeStub::GenerateStubsAheadOfTime() {
3891 void CodeStub::GenerateFPStubs() {
3893 Handle<Code>
code = save_doubles.GetCode();
3894 code->set_is_pregenerated(
true);
3896 stub.GetCode()->set_is_pregenerated(
true);
3897 code->GetIsolate()->set_fp_stubs_generated(
true);
3903 Handle<Code> code = stub.GetCode();
3904 code->set_is_pregenerated(
true);
3908 void CEntryStub::GenerateCore(MacroAssembler* masm,
3909 Label* throw_normal_exception,
3910 Label* throw_termination_exception,
3911 Label* throw_out_of_memory_exception,
3913 bool always_allocate) {
3919 Isolate* isolate = masm->isolate();
3924 __ PrepareCallCFunction(1, 0, a1);
3925 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3928 ExternalReference scope_depth =
3929 ExternalReference::heap_always_allocate_scope_depth(isolate);
3930 if (always_allocate) {
3931 __ li(a0, Operand(scope_depth));
3933 __ Addu(a1, a1, Operand(1));
3945 __ AssertStackIsAligned();
3947 __ li(a2, Operand(ExternalReference::isolate_address()));
3959 masm->bal(&find_ra);
3961 masm->bind(&find_ra);
3966 const int kNumInstructionsToJump = 5;
3967 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3979 masm->InstructionsGeneratedSince(&find_ra));
3982 if (always_allocate) {
3984 __ li(a2, Operand(scope_depth));
3986 __ Subu(a3, a3, Operand(1));
3991 Label failure_returned;
3993 __ addiu(a2, v0, 1);
4004 __ LeaveExitFrame(save_doubles_,
s0,
true);
4008 __ bind(&failure_returned);
4011 __ Branch(&retry,
eq, t0, Operand(zero_reg));
4016 throw_out_of_memory_exception,
4019 Operand(reinterpret_cast<int32_t>(out_of_memory)));
4024 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
4025 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4032 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
4033 __ Branch(throw_termination_exception,
eq, v0, Operand(t0));
4036 __ jmp(throw_normal_exception);
4066 FrameScope scope(masm, StackFrame::MANUAL);
4067 __ EnterExitFrame(save_doubles_);
4073 Label throw_normal_exception;
4074 Label throw_termination_exception;
4075 Label throw_out_of_memory_exception;
4079 &throw_normal_exception,
4080 &throw_termination_exception,
4081 &throw_out_of_memory_exception,
4087 &throw_normal_exception,
4088 &throw_termination_exception,
4089 &throw_out_of_memory_exception,
4095 __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4097 &throw_normal_exception,
4098 &throw_termination_exception,
4099 &throw_out_of_memory_exception,
4103 __ bind(&throw_out_of_memory_exception);
4105 Isolate* isolate = masm->isolate();
4106 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4109 __ li(a2, Operand(external_caught));
4114 __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4115 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4120 __ bind(&throw_termination_exception);
4121 __ ThrowUncatchable(v0);
4123 __ bind(&throw_normal_exception);
4129 Label invoke, handler_entry, exit;
4130 Isolate* isolate = masm->isolate();
4146 CpuFeatures::Scope scope(
FPU);
4160 __ InitializeRootRegister();
4164 __ li(t3, Operand(-1));
4165 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4168 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4171 __ Push(t3, t2, t1, t0);
4192 Label non_outermost_js;
4193 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4194 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4196 __ Branch(&non_outermost_js,
ne, t2, Operand(zero_reg));
4198 __ li(t0, Operand(
Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4202 __ bind(&non_outermost_js);
4203 __ li(t0, Operand(
Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4210 __ bind(&handler_entry);
4211 handler_offset_ = handler_entry.pos();
4216 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4226 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4233 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4234 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4257 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4259 __ li(t0, Operand(construct_entry));
4261 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4262 __ li(t0, Operand(entry));
4275 Label non_outermost_js_2;
4277 __ Branch(&non_outermost_js_2,
4280 Operand(
Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4281 __ li(t1, Operand(ExternalReference(js_entry_sp)));
4283 __ bind(&non_outermost_js_2);
4287 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4295 CpuFeatures::Scope scope(
FPU);
4317 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4319 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4322 const Register
object = a0;
4324 const Register
function = a1;
4325 const Register prototype = t0;
4326 const Register inline_site = t5;
4327 const Register scratch = a2;
4331 Label slow, loop, is_instance, is_not_instance, not_js_object;
4333 if (!HasArgsInRegisters()) {
4339 __ JumpIfSmi(
object, ¬_js_object);
4340 __ IsObjectJSObjectType(
object, map, scratch, ¬_js_object);
4344 if (!HasCallSiteInlineCheck()) {
4346 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4347 __ Branch(&miss,
ne,
function, Operand(at));
4348 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4349 __ Branch(&miss,
ne, map, Operand(at));
4350 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4351 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4357 __ TryGetFunctionPrototype(
function, prototype, scratch, &slow,
true);
4360 __ JumpIfSmi(prototype, &slow);
4361 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4365 if (!HasCallSiteInlineCheck()) {
4366 __ StoreRoot(
function, Heap::kInstanceofCacheFunctionRootIndex);
4367 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4369 ASSERT(HasArgsInRegisters());
4374 __ LoadFromSafepointRegisterSlot(scratch, t0);
4375 __ Subu(inline_site, ra, scratch);
4377 __ GetRelocatedValue(inline_site, scratch, v1);
4386 Register scratch2 = map;
4390 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4392 __ Branch(&is_instance,
eq, scratch, Operand(prototype));
4393 __ Branch(&is_not_instance,
eq, scratch, Operand(scratch2));
4398 __ bind(&is_instance);
4400 if (!HasCallSiteInlineCheck()) {
4401 __ mov(v0, zero_reg);
4402 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4405 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4406 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4408 __ PatchRelocatedValue(inline_site, scratch, v0);
4410 if (!ReturnTrueFalseObject()) {
4412 __ mov(v0, zero_reg);
4415 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4417 __ bind(&is_not_instance);
4418 if (!HasCallSiteInlineCheck()) {
4420 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4423 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4424 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4426 __ PatchRelocatedValue(inline_site, scratch, v0);
4428 if (!ReturnTrueFalseObject()) {
4433 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4435 Label object_not_null, object_not_null_or_smi;
4436 __ bind(¬_js_object);
4439 __ JumpIfSmi(
function, &slow);
4440 __ GetObjectType(
function, scratch2, scratch);
4444 __ Branch(&object_not_null,
4447 Operand(masm->isolate()->factory()->null_value()));
4449 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4451 __ bind(&object_not_null);
4453 __ JumpIfNotSmi(
object, &object_not_null_or_smi);
4455 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4457 __ bind(&object_not_null_or_smi);
4459 __ IsObjectJSStringType(
object, scratch, &slow);
4461 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4465 if (!ReturnTrueFalseObject()) {
4466 if (HasArgsInRegisters()) {
4477 __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4478 __ DropAndRet(HasArgsInRegisters() ? 0 : 2,
eq, a0, Operand(zero_reg));
4479 __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4480 __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4491 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4494 const int kDisplacement =
4499 __ JumpIfNotSmi(a1, &slow);
4513 __ Branch(&slow,
hs, a1, Operand(a0));
4516 __ subu(a3, a0, a1);
4518 __ Addu(a3,
fp, Operand(t3));
4530 __ subu(a3, a0, a1);
4532 __ Addu(a3, a2, Operand(t3));
4540 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4544 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4561 __ Addu(a3, a3, Operand(t3));
4566 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4570 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4584 Label adaptor_frame, try_allocate;
4587 __ Branch(&adaptor_frame,
4594 __ b(&try_allocate);
4598 __ bind(&adaptor_frame);
4601 __ Addu(a3, a3, Operand(t6));
4609 __ Branch(&skip_min,
lt, a1, Operand(a2));
4613 __ bind(&try_allocate);
4617 const int kParameterMapHeaderSize =
4620 Label param_map_size;
4623 __ mov(t5, zero_reg);
4625 __ addiu(t5, t5, kParameterMapHeaderSize);
4626 __ bind(¶m_map_size);
4630 __ Addu(t5, t5, Operand(t6));
4637 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime,
TAG_OBJECT);
4642 const int kNormalOffset =
4644 const int kAliasedOffset =
4649 Label skip2_ne, skip2_eq;
4650 __ Branch(&skip2_ne,
ne, a1, Operand(zero_reg));
4654 __ Branch(&skip2_eq,
eq, a1, Operand(zero_reg));
4671 const int kCalleeOffset = JSObject::kHeaderSize +
4677 const int kLengthOffset = JSObject::kHeaderSize +
4692 Label skip_parameter_map;
4702 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4708 __ Addu(t2, t0, Operand(t6));
4709 __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4720 Label parameters_loop, parameters_test;
4723 __ Addu(t5, t5, Operand(
Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4724 __ Subu(t5, t5, Operand(a1));
4725 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4727 __ Addu(a3, t0, Operand(t6));
4728 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4736 __ jmp(¶meters_test);
4738 __ bind(¶meters_loop);
4742 __ Addu(t6, t0, t1);
4745 __ Addu(t6, a3, t1);
4748 __ bind(¶meters_test);
4751 __ bind(&skip_parameter_map);
4756 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4760 Label arguments_loop, arguments_test;
4764 __ Subu(t0, t0, Operand(t6));
4765 __ jmp(&arguments_test);
4767 __ bind(&arguments_loop);
4768 __ Subu(t0, t0, Operand(kPointerSize));
4771 __ Addu(t1, a3, Operand(t6));
4775 __ bind(&arguments_test);
4776 __ Branch(&arguments_loop,
lt, t5, Operand(a2));
4785 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4789 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4794 Label adaptor_frame, try_allocate, runtime;
4797 __ Branch(&adaptor_frame,
4804 __ Branch(&try_allocate);
4807 __ bind(&adaptor_frame);
4811 __ Addu(a3, a2, Operand(at));
4818 Label add_arguments_object;
4819 __ bind(&try_allocate);
4820 __ Branch(&add_arguments_object,
eq, a1, Operand(zero_reg));
4824 __ bind(&add_arguments_object);
4828 __ AllocateInNewSpace(a1,
4843 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize /
kPointerSize);
4852 __ Branch(&done,
eq, a1, Operand(zero_reg));
4861 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4874 __ Addu(a2, a2, Operand(-kPointerSize));
4878 __ Addu(t0, t0, Operand(kPointerSize));
4879 __ Subu(a1, a1, Operand(1));
4880 __ Branch(&loop,
ne, a1, Operand(zero_reg));
4888 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4892 void RegExpExecStub::Generate(MacroAssembler* masm) {
4896 #ifdef V8_INTERPRETED_REGEXP
4897 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4898 #else // V8_INTERPRETED_REGEXP
4911 Isolate* isolate = masm->isolate();
4913 Label runtime, invoke_regexp;
4921 Register subject =
s0;
4922 Register regexp_data =
s1;
4923 Register last_match_info_elements =
s2;
4926 ExternalReference address_of_regexp_stack_memory_address =
4927 ExternalReference::address_of_regexp_stack_memory_address(
4929 ExternalReference address_of_regexp_stack_memory_size =
4930 ExternalReference::address_of_regexp_stack_memory_size(isolate);
4931 __ li(a0, Operand(address_of_regexp_stack_memory_size));
4933 __ Branch(&runtime,
eq, a0, Operand(zero_reg));
4938 __ JumpIfSmi(a0, &runtime);
4939 __ GetObjectType(a0, a1, a1);
4944 if (FLAG_debug_code) {
4947 "Unexpected type for RegExp data, FixedArray expected",
4950 __ GetObjectType(regexp_data, a0, a0);
4952 "Unexpected type for RegExp data, FixedArray expected",
4970 __ Addu(a2, a2, Operand(2));
4979 __ JumpIfSmi(subject, &runtime);
4980 __ GetObjectType(subject, a0, a0);
4983 __ Branch(&runtime,
ne, a0, Operand(zero_reg));
4995 __ JumpIfNotSmi(a0, &runtime);
4996 __ Branch(&runtime,
ls, a3, Operand(a0));
5003 __ JumpIfSmi(a0, &runtime);
5004 __ GetObjectType(a0, a1, a1);
5007 __ lw(last_match_info_elements,
5010 __ Branch(&runtime,
ne, a0, Operand(
5011 isolate->factory()->fixed_array_map()));
5018 __ Branch(&runtime,
gt, a2, Operand(at));
5021 __ mov(t0, zero_reg);
5036 __ Branch(&seq_string,
eq, a1, Operand(zero_reg));
5048 Label cons_string, external_string, check_encoding;
5059 __ Branch(&runtime,
ne, at, Operand(zero_reg));
5066 __ jmp(&check_encoding);
5068 __ bind(&cons_string);
5070 __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
5071 __ Branch(&runtime,
ne, a0, Operand(a1));
5074 __ bind(&check_encoding);
5079 __ Branch(&external_string,
ne, at, Operand(zero_reg));
5081 __ bind(&seq_string);
5093 __ Movz(t9, t1, a0);
5098 __ JumpIfSmi(t9, &runtime);
5115 __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5119 const int kRegExpExecuteArguments = 9;
5120 const int kParameterRegisters = 4;
5121 __ EnterExitFrame(
false, kRegExpExecuteArguments - kParameterRegisters);
5139 __ li(a0, Operand(ExternalReference::isolate_address()));
5143 __ li(a0, Operand(1));
5147 __ li(a0, Operand(address_of_regexp_stack_memory_address));
5149 __ li(a2, Operand(address_of_regexp_stack_memory_size));
5151 __ addu(a0, a0, a2);
5156 __ mov(a0, zero_reg);
5161 ExternalReference::address_of_static_offsets_vector(isolate)));
5167 __ Xor(a3, a3, Operand(1));
5172 __ lw(subject,
MemOperand(
fp, kSubjectOffset + 2 * kPointerSize));
5177 __ sllv(t1, t0, a3);
5178 __ addu(t0, t2, t1);
5179 __ sllv(t1, a1, a3);
5180 __ addu(a2, t0, t1);
5184 __ sllv(t1, t2, a3);
5185 __ addu(a3, t0, t1);
5190 __ mov(a0, subject);
5194 DirectCEntryStub stub;
5195 stub.GenerateCall(masm, t9);
5207 __ Branch(&success,
eq, v0, Operand(1));
5218 __ li(a1, Operand(isolate->factory()->the_hole_value()));
5219 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5222 __ Branch(&runtime,
eq, v0, Operand(a1));
5227 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5228 Label termination_exception;
5229 __ Branch(&termination_exception,
eq, v0, Operand(a0));
5233 __ bind(&termination_exception);
5234 __ ThrowUncatchable(v0);
5238 __ li(v0, Operand(isolate->factory()->null_value()));
5248 __ Addu(a1, a1, Operand(2));
5260 __ mov(a2, subject);
5261 __ RecordWriteField(last_match_info_elements,
5270 __ RecordWriteField(last_match_info_elements,
5278 ExternalReference address_of_static_offsets_vector =
5279 ExternalReference::address_of_static_offsets_vector(isolate);
5280 __ li(a2, Operand(address_of_static_offsets_vector));
5284 Label next_capture, done;
5288 last_match_info_elements,
5290 __ bind(&next_capture);
5291 __ Subu(a1, a1, Operand(1));
5292 __ Branch(&done,
lt, a1, Operand(zero_reg));
5295 __ addiu(a2, a2, kPointerSize);
5300 __ addiu(a0, a0, kPointerSize);
5310 __ bind(&external_string);
5313 if (FLAG_debug_code) {
5318 "external string expected, but not found",
5329 __ jmp(&seq_string);
5333 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5334 #endif // V8_INTERPRETED_REGEXP
5338 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5339 const int kMaxInlineLength = 100;
5345 __ JumpIfNotSmi(a1, &slowcase);
5356 __ Addu(a2, t1, Operand(objects_size));
5357 __ AllocateInNewSpace(
5374 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5395 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5401 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
5410 __ addu(t1, t1, a3);
5412 __ Branch(&done,
ge, a3, Operand(t1));
5415 __ addiu(a3, a3, kPointerSize);
5421 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5425 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5434 masm->isolate()->heap()->undefined_value());
5436 masm->isolate()->heap()->the_hole_value());
5443 __ Branch(&done,
eq, a3, Operand(a1));
5444 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5445 __ Branch(&done,
eq, a3, Operand(at));
5449 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5460 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5470 Label slow, non_function;
5475 if (ReceiverMightBeImplicit()) {
5481 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5482 __ Branch(&call,
ne, t0, Operand(at));
5493 __ JumpIfSmi(a1, &non_function);
5495 __ GetObjectType(a1, a3, a3);
5498 if (RecordCallTarget()) {
5499 GenerateRecordCallTarget(masm);
5504 ParameterCount actual(argc_);
5506 if (ReceiverMightBeImplicit()) {
5507 Label call_as_function;
5508 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5509 __ Branch(&call_as_function,
eq, t0, Operand(at));
5510 __ InvokeFunction(a1,
5515 __ bind(&call_as_function);
5517 __ InvokeFunction(a1,
5525 if (RecordCallTarget()) {
5530 masm->isolate()->heap()->undefined_value());
5531 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5539 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5542 Handle<Code> adaptor =
5543 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5544 __ Jump(adaptor, RelocInfo::CODE_TARGET);
5549 __ bind(&non_function);
5551 __ li(a0, Operand(argc_));
5552 __ mov(a2, zero_reg);
5553 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5555 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5556 RelocInfo::CODE_TARGET);
5564 Label slow, non_function_call;
5567 __ JumpIfSmi(a1, &non_function_call);
5569 __ GetObjectType(a1, a3, a3);
5572 if (RecordCallTarget()) {
5573 GenerateRecordCallTarget(masm);
5588 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5591 __ bind(&non_function_call);
5592 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5597 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5598 RelocInfo::CODE_TARGET);
5604 void CompareStub::PrintName(StringStream* stream) {
5606 (lhs_.
is(a1) && rhs_.
is(a0)));
5607 const char* cc_name;
5609 case lt: cc_name =
"LT";
break;
5610 case gt: cc_name =
"GT";
break;
5611 case le: cc_name =
"LE";
break;
5612 case ge: cc_name =
"GE";
break;
5613 case eq: cc_name =
"EQ";
break;
5614 case ne: cc_name =
"NE";
break;
5615 default: cc_name =
"UnknownCondition";
break;
5617 bool is_equality = cc_ ==
eq || cc_ ==
ne;
5618 stream->Add(
"CompareStub_%s", cc_name);
5619 stream->Add(lhs_.
is(a0) ?
"_a0" :
"_a1");
5620 stream->Add(rhs_.
is(a0) ?
"_a0" :
"_a1");
5621 if (strict_ && is_equality) stream->Add(
"_STRICT");
5622 if (never_nan_nan_ && is_equality) stream->Add(
"_NO_NAN");
5623 if (!include_number_compare_) stream->Add(
"_NO_NUMBER");
5624 if (!include_smi_compare_) stream->Add(
"_NO_SMI");
5628 int CompareStub::MinorKey() {
5630 ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5632 (lhs_.
is(a1) && rhs_.
is(a0)));
5633 return ConditionField::encode(static_cast<unsigned>(cc_))
5634 | RegisterField::encode(lhs_.
is(a0))
5635 | StrictField::encode(strict_)
5636 | NeverNanNanField::encode(cc_ ==
eq ? never_nan_nan_ :
false)
5637 | IncludeSmiCompareField::encode(include_smi_compare_);
5645 Label got_char_code;
5646 Label sliced_string;
5653 __ JumpIfSmi(object_, receiver_not_string_);
5660 __ Branch(receiver_not_string_,
ne, t0, Operand(zero_reg));
5663 __ JumpIfNotSmi(index_, &index_not_smi_);
5665 __ bind(&got_smi_index_);
5669 __ Branch(index_out_of_range_,
ls, t0, Operand(index_));
5685 MacroAssembler* masm,
5686 const RuntimeCallHelper& call_helper) {
5687 __ Abort(
"Unexpected fallthrough to CharCodeAt slow case");
5690 __ bind(&index_not_smi_);
5694 Heap::kHeapNumberMapRootIndex,
5697 call_helper.BeforeCall(masm);
5699 __ Push(object_, index_);
5701 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5705 __ CallRuntime(Runtime::kNumberToSmi, 1);
5711 __ Move(index_, v0);
5716 call_helper.AfterCall(masm);
5718 __ JumpIfNotSmi(index_, index_out_of_range_);
5720 __ Branch(&got_smi_index_);
5725 __ bind(&call_runtime_);
5726 call_helper.BeforeCall(masm);
5728 __ Push(object_, index_);
5729 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5731 __ Move(result_, v0);
5733 call_helper.AfterCall(masm);
5736 __ Abort(
"Unexpected fallthrough from CharCodeAt slow case");
5756 __ Branch(&slow_case_,
ne, t0, Operand(zero_reg));
5758 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5762 __ Addu(result_, result_, t0);
5764 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5765 __ Branch(&slow_case_,
eq, result_, Operand(t0));
5771 MacroAssembler* masm,
5772 const RuntimeCallHelper& call_helper) {
5773 __ Abort(
"Unexpected fallthrough to CharFromCode slow case");
5775 __ bind(&slow_case_);
5776 call_helper.BeforeCall(masm);
5778 __ CallRuntime(Runtime::kCharFromCode, 1);
5779 __ Move(result_, v0);
5781 call_helper.AfterCall(masm);
5784 __ Abort(
"Unexpected fallthrough from CharFromCode slow case");
5798 MacroAssembler* masm,
5799 const RuntimeCallHelper& call_helper) {
5800 char_code_at_generator_.
GenerateSlow(masm, call_helper);
5801 char_from_code_generator_.
GenerateSlow(masm, call_helper);
5816 __ addu(count, count, count);
5818 __ Branch(&done,
eq, count, Operand(zero_reg));
5819 __ addu(count, dest, count);
5823 __ addiu(src, src, 1);
5825 __ addiu(dest, dest, 1);
5826 __ Branch(&loop,
lt, dest, Operand(count));
5832 enum CopyCharactersFlags {
5834 DEST_ALWAYS_ALIGNED = 2
5848 bool ascii = (flags & COPY_ASCII) != 0;
5849 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5851 if (dest_always_aligned && FLAG_debug_code) {
5856 "Destination of copy not aligned.",
5861 const int kReadAlignment = 4;
5862 const int kReadAlignmentMask = kReadAlignment - 1;
5872 __ addu(count, count, count);
5874 __ Branch(&done,
eq, count, Operand(zero_reg));
5878 __ Subu(scratch1, count, Operand(8));
5879 __ Addu(count, dest, Operand(count));
5880 Register limit = count;
5881 __ Branch(&byte_loop,
lt, scratch1, Operand(zero_reg));
5883 if (!dest_always_aligned) {
5885 __ And(scratch4, dest, Operand(kReadAlignmentMask));
5887 __ Branch(&dest_aligned,
eq, scratch4, Operand(zero_reg));
5889 __ bind(&aligned_loop);
5891 __ addiu(src, src, 1);
5893 __ addiu(dest, dest, 1);
5894 __ addiu(scratch4, scratch4, 1);
5895 __ Branch(&aligned_loop,
le, scratch4, Operand(kReadAlignmentMask));
5896 __ bind(&dest_aligned);
5901 __ And(scratch4, src, Operand(kReadAlignmentMask));
5902 __ Branch(&simple_loop,
eq, scratch4, Operand(zero_reg));
5911 __ Addu(src, src, Operand(kReadAlignment));
5914 __ Addu(dest, dest, Operand(kReadAlignment));
5915 __ Subu(scratch2, limit, dest);
5916 __ Branch(&loop,
ge, scratch2, Operand(kReadAlignment));
5919 __ Branch(&byte_loop);
5924 __ bind(&simple_loop);
5929 __ Addu(src, src, Operand(kReadAlignment));
5931 __ Addu(dest, dest, Operand(kReadAlignment));
5932 __ Subu(scratch2, limit, dest);
5933 __ Branch(&loop,
ge, scratch2, Operand(kReadAlignment));
5937 __ bind(&byte_loop);
5939 __ Branch(&done,
ge, dest, Operand(limit));
5941 __ addiu(src, src, 1);
5943 __ addiu(dest, dest, 1);
5944 __ Branch(&byte_loop);
5960 Register scratch = scratch3;
5964 Label not_array_index;
5965 __ Subu(scratch, c1, Operand(static_cast<int>(
'0')));
5966 __ Branch(¬_array_index,
5969 Operand(static_cast<int>(
'9' -
'0')));
5970 __ Subu(scratch, c2, Operand(static_cast<int>(
'0')));
5977 __ Branch(&tmp,
Ugreater, scratch, Operand(static_cast<int>(
'9' -
'0')));
5978 __ Or(c1, c1, scratch1);
5981 not_found,
Uless_equal, scratch, Operand(static_cast<int>(
'9' -
'0')));
5983 __ bind(¬_array_index);
5985 Register hash = scratch1;
5991 Register chars = c1;
5993 __ Or(chars, chars, scratch);
6000 Register symbol_table = c2;
6003 Register undefined = scratch4;
6004 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6007 Register mask = scratch2;
6009 __ sra(mask, mask, 1);
6010 __ Addu(mask, mask, -1);
6013 Register first_symbol_table_element = symbol_table;
6014 __ Addu(first_symbol_table_element, symbol_table,
6027 const int kProbes = 4;
6028 Label found_in_symbol_table;
6029 Label next_probe[kProbes];
6030 Register candidate = scratch5;
6031 for (
int i = 0; i < kProbes; i++) {
6034 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
6036 __ mov(candidate, hash);
6039 __ And(candidate, candidate, Operand(mask));
6044 __ Addu(scratch, scratch, first_symbol_table_element);
6049 __ GetObjectType(candidate, scratch, scratch);
6052 __ Branch(not_found,
eq, undefined, Operand(candidate));
6054 if (FLAG_debug_code) {
6055 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
6056 __ Assert(
eq,
"oddball in symbol table is not undefined or the hole",
6057 scratch, Operand(candidate));
6059 __ jmp(&next_probe[i]);
6061 __ bind(&is_string);
6066 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
6075 __ Branch(&found_in_symbol_table,
eq, chars, Operand(scratch));
6076 __ bind(&next_probe[i]);
6083 Register result = candidate;
6084 __ bind(&found_in_symbol_table);
6091 Register character) {
6093 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
6096 __ addu(hash, hash, character);
6097 __ sll(at, hash, 10);
6098 __ addu(hash, hash, at);
6100 __ srl(at, hash, 6);
6101 __ xor_(hash, hash, at);
6107 Register character) {
6109 __ addu(hash, hash, character);
6111 __ sll(at, hash, 10);
6112 __ addu(hash, hash, at);
6114 __ srl(at, hash, 6);
6115 __ xor_(hash, hash, at);
6122 __ sll(at, hash, 3);
6123 __ addu(hash, hash, at);
6125 __ srl(at, hash, 11);
6126 __ xor_(hash, hash, at);
6128 __ sll(at, hash, 15);
6129 __ addu(hash, hash, at);
6132 __ and_(hash, hash, at);
6136 __ Movz(hash, at, hash);
6140 void SubStringStub::Generate(MacroAssembler* masm) {
6167 __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6168 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6171 __ Branch(&runtime,
lt, a3, Operand(zero_reg));
6173 __ Branch(&runtime,
gt, a3, Operand(a2));
6174 __ Subu(a2, a2, a3);
6178 __ JumpIfSmi(v0, &runtime);
6183 __ Branch(&runtime,
ne, t0, Operand(zero_reg));
6192 __ Branch(&return_v0,
eq, a2, Operand(t0));
6194 __ Branch(&runtime,
hi, a2, Operand(t0));
6203 Label underlying_unpacked, sliced_string, seq_or_external_string;
6211 __ Branch(&sliced_string,
ne, t0, Operand(zero_reg));
6214 __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6215 __ Branch(&runtime,
ne, t1, Operand(t0));
6220 __ jmp(&underlying_unpacked);
6222 __ bind(&sliced_string);
6227 __ Addu(a3, a3, t0);
6231 __ jmp(&underlying_unpacked);
6233 __ bind(&seq_or_external_string);
6237 __ bind(&underlying_unpacked);
6239 if (FLAG_string_slices) {
6252 Label two_byte_slice, set_slice_header;
6256 __ Branch(&two_byte_slice,
eq, t0, Operand(zero_reg));
6257 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6258 __ jmp(&set_slice_header);
6259 __ bind(&two_byte_slice);
6260 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6261 __ bind(&set_slice_header);
6267 __ bind(©_routine);
6274 Label two_byte_sequential, sequential_string, allocate_result;
6278 __ Branch(&sequential_string,
eq, t0, Operand(zero_reg));
6284 __ Branch(&runtime,
ne, t0, Operand(zero_reg));
6287 __ jmp(&allocate_result);
6289 __ bind(&sequential_string);
6294 __ bind(&allocate_result);
6298 __ Branch(&two_byte_sequential,
eq, t0, Operand(zero_reg));
6301 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6304 __ Addu(t1, t1, a3);
6315 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6319 __ bind(&two_byte_sequential);
6320 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6325 __ Addu(t1, t1, t0);
6335 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6337 __ bind(&return_v0);
6338 Counters* counters = masm->isolate()->counters();
6339 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6344 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6353 Register scratch3) {
6354 Register length = scratch1;
6357 Label strings_not_equal, check_zero_length;
6360 __ Branch(&check_zero_length,
eq, length, Operand(scratch2));
6361 __ bind(&strings_not_equal);
6366 Label compare_chars;
6367 __ bind(&check_zero_length);
6369 __ Branch(&compare_chars,
ne, length, Operand(zero_reg));
6374 __ bind(&compare_chars);
6376 GenerateAsciiCharsCompareLoop(masm,
6377 left, right, length, scratch2, scratch3, v0,
6378 &strings_not_equal);
6392 Register scratch4) {
6393 Label result_not_equal, compare_lengths;
6397 __ Subu(scratch3, scratch1, Operand(scratch2));
6398 Register length_delta = scratch3;
6399 __ slt(scratch4, scratch2, scratch1);
6400 __ Movn(scratch1, scratch2, scratch4);
6401 Register min_length = scratch1;
6403 __ Branch(&compare_lengths,
eq, min_length, Operand(zero_reg));
6406 GenerateAsciiCharsCompareLoop(masm,
6407 left, right, min_length, scratch2, scratch4, v0,
6411 __ bind(&compare_lengths);
6414 __ mov(scratch2, length_delta);
6415 __ mov(scratch4, zero_reg);
6416 __ mov(v0, zero_reg);
6418 __ bind(&result_not_equal);
6422 __ Branch(&ret,
eq, scratch2, Operand(scratch4));
6424 __ Branch(&ret,
gt, scratch2, Operand(scratch4));
6431 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6432 MacroAssembler* masm,
6439 Label* chars_not_equal) {
6443 __ SmiUntag(length);
6444 __ Addu(scratch1, length,
6446 __ Addu(left, left, Operand(scratch1));
6447 __ Addu(right, right, Operand(scratch1));
6448 __ Subu(length, zero_reg, length);
6449 Register index = length;
6455 __ Addu(scratch3, left, index);
6457 __ Addu(scratch3, right, index);
6459 __ Branch(chars_not_equal,
ne, scratch1, Operand(scratch2));
6460 __ Addu(index, index, 1);
6461 __ Branch(&loop,
ne, index, Operand(zero_reg));
6465 void StringCompareStub::Generate(MacroAssembler* masm) {
6468 Counters* counters = masm->isolate()->counters();
6477 __ Branch(¬_same,
ne, a0, Operand(a1));
6481 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6487 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6490 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6491 __ Addu(
sp,
sp, Operand(2 * kPointerSize));
6495 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6499 void StringAddStub::Generate(MacroAssembler* masm) {
6500 Label call_runtime, call_builtin;
6503 Counters* counters = masm->isolate()->counters();
6515 __ JumpIfEitherSmi(a0, a1, &call_runtime);
6523 __ Or(t4, t0, Operand(t1));
6525 __ Branch(&call_runtime,
ne, t4, Operand(zero_reg));
6531 GenerateConvertArgument(
6532 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6533 builtin_id = Builtins::STRING_ADD_RIGHT;
6535 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6536 GenerateConvertArgument(
6537 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6538 builtin_id = Builtins::STRING_ADD_LEFT;
6548 Label strings_not_empty;
6557 __ Movz(v0, a1, a2);
6558 __ slt(t4, zero_reg, a2);
6559 __ slt(t5, zero_reg, a3);
6560 __ and_(t4, t4, t5);
6561 __ Branch(&strings_not_empty,
ne, t4, Operand(zero_reg));
6563 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6566 __ bind(&strings_not_empty);
6581 Label string_add_flat_result, longer_than_two;
6584 __ Addu(t2, a2, Operand(a3));
6587 __ Branch(&longer_than_two,
ne, t2, Operand(2));
6596 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6605 Label make_two_character_string;
6607 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6608 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6611 __ bind(&make_two_character_string);
6617 __ li(t2, Operand(2));
6618 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6620 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6623 __ bind(&longer_than_two);
6640 Label non_ascii, allocated, ascii_data;
6643 __ And(t4, t0, Operand(t1));
6645 __ Branch(&non_ascii,
eq, t4, Operand(zero_reg));
6648 __ bind(&ascii_data);
6649 __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6650 __ bind(&allocated);
6654 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6657 __ bind(&non_ascii);
6664 __ and_(at, at, t1);
6665 __ Branch(&ascii_data,
ne, at, Operand(zero_reg));
6667 __ xor_(t0, t0, t1);
6673 __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6674 __ Branch(&allocated);
6687 Label first_prepared, second_prepared;
6688 __ bind(&string_add_flat_result);
6696 __ Xor(t3, t0, Operand(t1));
6698 __ Branch(&call_runtime,
ne, t3, Operand(zero_reg));
6704 Label skip_first_add;
6705 __ Branch(&skip_first_add,
ne, t4, Operand(zero_reg));
6708 __ bind(&skip_first_add);
6712 __ Branch(&call_runtime,
ne, t4, Operand(zero_reg));
6714 __ bind(&first_prepared);
6719 Label skip_second_add;
6720 __ Branch(&skip_second_add,
ne, t4, Operand(zero_reg));
6723 __ bind(&skip_second_add);
6727 __ Branch(&call_runtime,
ne, t4, Operand(zero_reg));
6729 __ bind(&second_prepared);
6731 Label non_ascii_string_add_flat_result;
6740 __ Branch(&non_ascii_string_add_flat_result,
eq, t4, Operand(zero_reg));
6742 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6754 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6757 __ bind(&non_ascii_string_add_flat_result);
6758 __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6770 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6774 __ bind(&call_runtime);
6775 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6777 if (call_builtin.is_linked()) {
6778 __ bind(&call_builtin);
6784 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6793 Label not_string, done;
6794 __ JumpIfSmi(arg, ¬_string);
6795 __ GetObjectType(arg, scratch1, scratch1);
6800 __ bind(¬_string);
6810 __ mov(arg, scratch1);
6815 __ bind(¬_cached);
6816 __ JumpIfSmi(arg, slow);
6817 __ GetObjectType(arg, scratch1, scratch2);
6821 __ And(scratch2, scratch2, scratch4);
6822 __ Branch(slow,
ne, scratch2, Operand(scratch4));
6830 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6834 __ JumpIfNotSmi(a2, &miss);
6836 if (GetCondition() ==
eq) {
6838 __ Subu(v0, a0, a1);
6843 __ Subu(v0, a1, a0);
6852 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6856 Label unordered, maybe_undefined1, maybe_undefined2;
6858 __ And(a2, a1, Operand(a0));
6859 __ JumpIfSmi(a2, &generic_stub);
6861 __ GetObjectType(a0, a2, a2);
6863 __ GetObjectType(a1, a2, a2);
6869 CpuFeatures::Scope scope(
FPU);
6878 Label fpu_eq, fpu_lt;
6880 __ BranchF(&fpu_eq, &unordered,
eq,
f0,
f2);
6894 __ li(v0, Operand(
LESS));
6898 __ bind(&unordered);
6901 __ bind(&generic_stub);
6902 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6904 __ bind(&maybe_undefined1);
6906 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6907 __ Branch(&miss,
ne, a0, Operand(at));
6908 __ GetObjectType(a1, a2, a2);
6913 __ bind(&maybe_undefined2);
6915 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6916 __ Branch(&unordered,
eq, a1, Operand(at));
6924 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6930 Register right = a0;
6935 __ JumpIfEitherSmi(left, right, &miss);
6943 __ And(tmp1, tmp1, Operand(tmp2));
6945 __ Branch(&miss,
eq, tmp1, Operand(zero_reg));
6953 __ Ret(
ne, left, Operand(right));
6962 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6970 Register right = a0;
6978 __ JumpIfEitherSmi(left, right, &miss);
6987 __ Or(tmp3, tmp1, tmp2);
6989 __ Branch(&miss,
ne, tmp5, Operand(zero_reg));
6992 Label left_ne_right;
6995 __ Branch(&left_ne_right,
ne, left, Operand(right));
6997 __ mov(v0, zero_reg);
6998 __ bind(&left_ne_right);
7007 __ And(tmp3, tmp1, Operand(tmp2));
7010 __ Branch(&is_symbol,
eq, tmp5, Operand(zero_reg));
7016 __ bind(&is_symbol);
7021 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
7022 tmp1, tmp2, tmp3, tmp4, &runtime);
7027 masm, left, right, tmp1, tmp2, tmp3);
7030 masm, left, right, tmp1, tmp2, tmp3, tmp4);
7035 __ Push(left, right);
7037 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
7039 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
7047 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
7050 __ And(a2, a1, Operand(a0));
7051 __ JumpIfSmi(a2, &miss);
7053 __ GetObjectType(a0, a2, a2);
7055 __ GetObjectType(a1, a2, a2);
7060 __ subu(v0, a0, a1);
7067 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
7070 __ JumpIfSmi(a2, &miss);
7073 __ Branch(&miss,
ne, a2, Operand(known_map_));
7074 __ Branch(&miss,
ne, a3, Operand(known_map_));
7077 __ subu(v0, a0, a1);
7083 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7086 ExternalReference miss =
7087 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7093 __ addiu(
sp,
sp, -kPointerSize);
7116 __ Assert(
ne,
"Received invalid return address.", t0,
7117 Operand(reinterpret_cast<uint32_t>(
kZapValue)));
7124 ExternalReference
function) {
7125 __ li(t9, Operand(
function));
7132 __ Move(t9, target);
7133 __ AssertStackIsAligned();
7143 masm->bal(&find_ra);
7145 masm->bind(&find_ra);
7147 const int kNumInstructionsToJump = 6;
7148 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7153 Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7154 RelocInfo::CODE_TARGET),
7159 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7167 Register properties,
7168 Handle<String> name,
7169 Register scratch0) {
7175 for (
int i = 0; i < kInlinedProbes; i++) {
7178 Register index = scratch0;
7181 __ Subu(index, index, Operand(1));
7182 __ And(index, index, Operand(
7183 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7187 __ sll(at, index, 1);
7188 __ Addu(index, index, at);
7190 Register entity_name = scratch0;
7193 Register tmp = properties;
7194 __ sll(scratch0, index, 1);
7195 __ Addu(tmp, properties, scratch0);
7198 ASSERT(!tmp.is(entity_name));
7199 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7200 __ Branch(done,
eq, entity_name, Operand(tmp));
7202 if (i != kInlinedProbes - 1) {
7204 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7207 __ Branch(miss,
eq, entity_name, Operand(Handle<String>(name)));
7210 __ Branch(&the_hole,
eq, entity_name, Operand(tmp));
7217 __ Branch(miss,
eq, scratch0, Operand(zero_reg));
7227 const int spill_mask =
7228 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7229 a2.bit() | a1.bit() | a0.bit() | v0.bit());
7231 __ MultiPush(spill_mask);
7233 __ li(a1, Operand(Handle<String>(name)));
7237 __ MultiPop(spill_mask);
7239 __ Branch(done,
eq, at, Operand(zero_reg));
7240 __ Branch(miss,
ne, at, Operand(zero_reg));
7254 Register scratch2) {
7255 ASSERT(!elements.is(scratch1));
7256 ASSERT(!elements.is(scratch2));
7257 ASSERT(!name.is(scratch1));
7258 ASSERT(!name.is(scratch2));
7260 __ AssertString(name);
7265 __ Subu(scratch1, scratch1, Operand(1));
7270 for (
int i = 0; i < kInlinedProbes; i++) {
7277 ASSERT(StringDictionary::GetProbeOffset(i) <
7279 __ Addu(scratch2, scratch2, Operand(
7283 __ And(scratch2, scratch1, scratch2);
7289 __ sll(at, scratch2, 1);
7290 __ Addu(scratch2, scratch2, at);
7293 __ sll(at, scratch2, 2);
7294 __ Addu(scratch2, elements, at);
7296 __ Branch(done,
eq, name, Operand(at));
7299 const int spill_mask =
7300 (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7301 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7302 ~(scratch1.bit() | scratch2.bit());
7304 __ MultiPush(spill_mask);
7306 ASSERT(!elements.is(a1));
7308 __ Move(a0, elements);
7310 __ Move(a0, elements);
7315 __ mov(scratch2, a2);
7317 __ MultiPop(spill_mask);
7319 __ Branch(done,
ne, at, Operand(zero_reg));
7320 __ Branch(miss,
eq, at, Operand(zero_reg));
7336 Register result = v0;
7337 Register dictionary = a0;
7339 Register index = a2;
7342 Register undefined = t1;
7343 Register entry_key = t2;
7345 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7349 __ Subu(mask, mask, Operand(1));
7353 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7355 for (
int i = kInlinedProbes; i < kTotalProbes; i++) {
7362 ASSERT(StringDictionary::GetProbeOffset(i) <
7364 __ Addu(index, hash, Operand(
7367 __ mov(index, hash);
7370 __ And(index, mask, index);
7376 __ sll(index, index, 1);
7377 __ Addu(index, index, at);
7381 __ sll(index, index, 2);
7382 __ Addu(index, index, dictionary);
7386 __ Branch(¬_in_dictionary,
eq, entry_key, Operand(undefined));
7389 __ Branch(&in_dictionary,
eq, entry_key, Operand(key));
7397 __ Branch(&maybe_in_dictionary,
eq, result, Operand(zero_reg));
7401 __ bind(&maybe_in_dictionary);
7407 __ mov(result, zero_reg);
7410 __ bind(&in_dictionary);
7414 __ bind(¬_in_dictionary);
7416 __ mov(result, zero_reg);
7420 struct AheadOfTimeWriteBarrierStubList {
7421 Register object, value, address;
7425 #define REG(Name) { kRegister_ ## Name ## _Code }
7427 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7466 for (
const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7467 !entry->object.is(
no_reg);
7469 if (object_.
is(entry->object) &&
7470 value_.
is(entry->value) &&
7471 address_.
is(entry->address) &&
7472 remembered_set_action_ == entry->action &&
7488 stub1.GetCode()->set_is_pregenerated(
true);
7493 for (
const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7494 !entry->object.is(
no_reg);
7501 stub.GetCode()->set_is_pregenerated(
true);
7506 bool CodeStub::CanUseFPRegisters() {
7515 void RecordWriteStub::Generate(MacroAssembler* masm) {
7516 Label skip_to_incremental_noncompacting;
7517 Label skip_to_incremental_compacting;
7525 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7527 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7531 __ RememberedSetHelper(object_,
7539 __ bind(&skip_to_incremental_noncompacting);
7542 __ bind(&skip_to_incremental_compacting);
7553 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7557 Label dont_need_remembered_set;
7559 __ lw(regs_.scratch0(),
MemOperand(regs_.address(), 0));
7560 __ JumpIfNotInNewSpace(regs_.scratch0(),
7562 &dont_need_remembered_set);
7564 __ CheckPageFlag(regs_.object(),
7568 &dont_need_remembered_set);
7572 CheckNeedsToInformIncrementalMarker(
7573 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7574 InformIncrementalMarker(masm, mode);
7575 regs_.Restore(masm);
7576 __ RememberedSetHelper(object_,
7582 __ bind(&dont_need_remembered_set);
7585 CheckNeedsToInformIncrementalMarker(
7586 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7587 InformIncrementalMarker(masm, mode);
7588 regs_.Restore(masm);
7593 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7594 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7595 int argument_count = 3;
7596 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7598 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7599 ASSERT(!address.is(regs_.object()));
7601 __ Move(address, regs_.address());
7602 __ Move(a0, regs_.object());
7604 __ Move(a1, address);
7609 __ li(a2, Operand(ExternalReference::isolate_address()));
7611 AllowExternalCallThatCantCauseGC scope(masm);
7614 ExternalReference::incremental_evacuation_record_write_function(
7620 ExternalReference::incremental_marking_record_write_function(
7624 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7628 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7629 MacroAssembler* masm,
7630 OnNoNeedToInformIncrementalMarker on_no_need,
7633 Label need_incremental;
7634 Label need_incremental_pop_scratch;
7637 __ lw(regs_.scratch1(),
7640 __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
7641 __ sw(regs_.scratch1(),
7644 __ Branch(&need_incremental,
lt, regs_.scratch1(), Operand(zero_reg));
7648 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7650 regs_.Restore(masm);
7651 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7652 __ RememberedSetHelper(object_,
7664 __ lw(regs_.scratch0(),
MemOperand(regs_.address(), 0));
7667 Label ensure_not_white;
7669 __ CheckPageFlag(regs_.scratch0(),
7675 __ CheckPageFlag(regs_.object(),
7681 __ bind(&ensure_not_white);
7686 __ Push(regs_.object(), regs_.address());
7687 __ EnsureNotWhite(regs_.scratch0(),
7691 &need_incremental_pop_scratch);
7692 __ Pop(regs_.object(), regs_.address());
7694 regs_.Restore(masm);
7695 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7696 __ RememberedSetHelper(object_,
7705 __ bind(&need_incremental_pop_scratch);
7706 __ Pop(regs_.object(), regs_.address());
7708 __ bind(&need_incremental);
7714 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7724 Label double_elements;
7726 Label slow_elements;
7727 Label fast_elements;
7729 __ CheckFastElements(a2, t1, &double_elements);
7731 __ JumpIfSmi(a0, &smi_element);
7732 __ CheckFastSmiElements(a2, t1, &fast_elements);
7736 __ bind(&slow_elements);
7738 __ Push(a1, a3, a0);
7742 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7745 __ bind(&fast_elements);
7748 __ Addu(t2, t1, t2);
7759 __ bind(&smi_element);
7762 __ Addu(t2, t1, t2);
7768 __ bind(&double_elements);
7770 __ StoreNumberToDoubleElements(a0, a3, a1,
7780 if (entry_hook_ !=
NULL) {
7789 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7792 const int32_t kReturnAddressDistanceFromFunctionStart =
7796 __ Push(ra, t1, a1);
7797 const int32_t kNumSavedRegs = 3;
7800 __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
7804 __ Addu(a1,
sp, Operand(kNumSavedRegs * kPointerSize));
7807 int frame_alignment = masm->ActivationFrameAlignment();
7808 if (frame_alignment > kPointerSize) {
7811 __ And(
sp,
sp, Operand(-frame_alignment));
7814 #if defined(V8_HOST_ARCH_MIPS)
7815 __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
7821 reinterpret_cast<intptr_t
>(EntryHookTrampoline));
7822 ApiFunction dispatcher(trampoline_address);
7823 __ li(at, Operand(ExternalReference(&dispatcher,
7824 ExternalReference::BUILTIN_CALL,
7830 if (frame_alignment > kPointerSize) {
7843 #endif // V8_TARGET_ARCH_MIPS
bool FLAG_enable_slow_asserts
static const int kResourceDataOffset
static const int kCallerFPOffset
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kLengthOffset
static const int kBitFieldOffset
const int kCArgsSlotsSize
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
static const int kCodeOffset
static const int kEvacuationCandidateMask
#define CHECK_EQ(expected, value)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
static const int kMaxAsciiCharCode
static const int kPrototypeOrInitialMapOffset
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kArgumentsObjectSize
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
const int kFailureTypeTagSize
static const uint32_t kExponentMask
static const char * GetName(TypeInfo type_info)
static Failure * InternalError()
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void LoadNumberAsInt32Double(MacroAssembler *masm, Register object, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, Register dst1, Register dst2, Register heap_number_map, Register scratch1, Register scratch2, SwVfpRegister single_scratch, Label *not_int32)
static const char * Name(Value tok)
static Smi * FromInt(int value)
void Generate(MacroAssembler *masm)
static void DoubleIs32BitInteger(MacroAssembler *masm, Register src1, Register src2, Register dst, Register scratch, Label *not_int32)
static const int kOptimizedCodeMapOffset
static const int kDataOffset
static const int kGlobalReceiverOffset
static const int kJSRegexpStaticOffsetsVectorSize
const int kNumCalleeSavedFPU
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, int flags)
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
static const intptr_t kPageAlignmentMask
static Handle< Object > UninitializedSentinel(Isolate *isolate)
static bool IsSupported(CpuFeature f)
static Failure * Exception()
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
static const int kCallerSPOffset
bool CanBeUndetectable() const
#define ASSERT(condition)
static void LoadOperands(MacroAssembler *masm, FloatingPointHelper::Destination destination, Register heap_number_map, Register scratch1, Register scratch2, Label *not_number)
const RegList kJSCallerSaved
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
const int kPointerSizeLog2
static const int kInstanceSizeOffset
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
static const char * GetName(TypeInfo type_info)
const uint32_t kStringRepresentationMask
MemOperand GlobalObjectOperand()
static const int kEntrySize
const intptr_t kObjectAlignmentMask
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
const uint32_t kAsciiDataHintTag
const uint32_t kShortExternalStringMask
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
static const int kZeroHash
const RegList kCalleeSavedFPU
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
static const int kLastCaptureCountOffset
const RegList kCallerSavedFPU
static const int kFirstOffset
static const int kMinLength
StringDictionaryLookupStub(LookupMode mode)
const uint32_t kNotStringTag
static const int kParentOffset
static const int kNonMantissaBitsInTopWord
static const int kLiteralsOffset
DwVfpRegister DoubleRegister
static const int kArgumentsObjectSizeStrict
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
static const int kCapacityOffset
const uint32_t kIsSymbolMask
static const int kExponentShift
const intptr_t kFailureTagMask
static const int kValueOffset
const int kFailureTagSize
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
static const int kIrregexpCaptureCountOffset
static const int kInputOffset
static const size_t kWriteBarrierCounterOffset
static bool IsBitOp(Value op)
const uint32_t kIsIndirectStringMask
const bool IsMipsSoftFloatABI
void Generate(MacroAssembler *masm)
static void LoadSmis(MacroAssembler *masm, Destination destination, Register scratch1, Register scratch2)
virtual bool IsPregenerated()
static void CallCCodeForDoubleOperation(MacroAssembler *masm, Token::Value op, Register heap_number_result, Register scratch)
static const int kStringWrapperSafeForDefaultValueOf
static void MaybeCallEntryHook(MacroAssembler *masm)
const RegList kCalleeSaved
const uint32_t kAsciiDataHintMask
static void ConvertNumberToInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
static void LoadNumberAsInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label *not_int32)
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static const int kMinLength
const uint32_t kShortExternalStringTag
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kNextFunctionLinkOffset
void Generate(MacroAssembler *masm)
static int SizeFor(int length)
static const int kElementsOffset
const uint32_t kStringTag
static bool IsEqualityOp(Value op)
static const int kCallTargetAddressOffset
static const int kOffsetOffset
friend class BlockTrampolinePoolScope
void Generate(MacroAssembler *masm)
static const int kLengthOffset
static int SizeFor(int length)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
static const int kHeaderSize
const intptr_t kPointerAlignmentMask
void Generate(MacroAssembler *masm)
void GenerateCall(MacroAssembler *masm, ExternalReference function)
static const int kMapOffset
static const int kMantissaBitsInTopWord
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
const uint32_t kIsNotStringMask
const int kNumCalleeSaved
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kCallerFPOffset
static const int kArgumentsLengthIndex
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
static const int kContextOffset
static const int kFunctionOffset
static const int kFirstCaptureOffset
static const uint32_t kHashBitMask
static const uint32_t kSignMask
static const int kLastInputOffset
static const int kHeaderSize
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
#define ASSERT_EQ(v1, v2)
static void GenerateAheadOfTime()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
static const int kArgumentsCalleeIndex
static const int kIsUndetectable
static const int kHeaderSize
void Generate(MacroAssembler *masm)
static const int kEntryLength
void GenerateFast(MacroAssembler *masm)
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kInstrSize
static const int kDataTagOffset
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static const int kPrototypeOffset
static void GenerateFixedRegStubsAheadOfTime()
static const int kElementsStartOffset
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
static const int kValueOffset
bool Contains(Type type) const
const uint32_t kSymbolTag
static const int kNativeContextOffset
const uint32_t kAsciiStringTag
static const int kConstructStubOffset
static const int kExponentBits
static const int kHashShift
static const int kSharedFunctionInfoOffset
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void check(i::Vector< const char > string)
static const int kExponentOffset
static const int kValueOffset
static const int kDataUC16CodeOffset
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
virtual bool IsPregenerated()
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
static const int kIndexOffset
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
void Generate(MacroAssembler *masm)