38 namespace i = v8::internal;
100 static void EntryCode(MacroAssembler* masm) {
104 __ InitializeSmiConstantRegister();
105 __ InitializeRootRegister();
109 static void ExitCode(MacroAssembler* masm) {
113 __ movq(
rdx, Immediate(-1));
122 int64_t test_numbers[] = {
123 0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
127 int test_number_count = 15;
128 for (
int i = 0; i < test_number_count; i++) {
129 int64_t number = test_numbers[i];
135 if (static_cast<int>(number) == number) {
136 Smi* smi_from_int =
Smi::FromInt(static_cast<int32_t>(number));
137 CHECK_EQ(smi_from_int, smi_from_intptr);
139 int64_t smi_value = smi_from_intptr->value();
146 static void TestMoveSmi(MacroAssembler* masm, Label* exit,
int id, Smi* value) {
147 __ movl(
rax, Immediate(
id));
149 __ Set(
rdx, reinterpret_cast<intptr_t>(value));
165 HandleScope handles(isolate);
166 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
167 MacroAssembler* masm = &assembler;
190 masm->GetCode(&desc);
192 int result = FUNCTION_CAST<F0>(buffer)();
204 __ movl(
rax, Immediate(
id + 1));
207 __ movl(
rax, Immediate(
id + 2));
211 __ movl(
rax, Immediate(
id + 3));
214 __ movl(
rax, Immediate(
id + 4));
224 __ movl(
rax, Immediate(
id + 9));
228 __ movl(
rax, Immediate(
id + 10));
233 __ movl(
rax, Immediate(
id + 11));
253 HandleScope handles(isolate);
254 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
256 MacroAssembler* masm = &assembler;
286 masm->GetCode(&desc);
288 int result = FUNCTION_CAST<F0>(buffer)();
303 HandleScope handles(isolate);
304 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
306 MacroAssembler* masm = &assembler;
310 __ movq(
rax, Immediate(1));
311 __ movl(
rcx, Immediate(0));
317 __ movq(
rax, Immediate(2));
318 __ movl(
rcx, Immediate(1024));
324 __ movq(
rax, Immediate(3));
325 __ movl(
rcx, Immediate(-1));
331 __ movq(
rax, Immediate(4));
338 __ movq(
rax, Immediate(5));
347 __ movq(
rax, Immediate(6));
348 __ movl(
rcx, Immediate(0));
354 __ movq(
rax, Immediate(7));
355 __ movl(
rcx, Immediate(1024));
361 __ movq(
rax, Immediate(8));
362 __ movl(
rcx, Immediate(-1));
368 __ movq(
rax, Immediate(9));
375 __ movq(
rax, Immediate(10));
389 masm->GetCode(&desc);
391 int result = FUNCTION_CAST<F0>(buffer)();
401 int64_t result = x + y;
403 __ movl(
rax, Immediate(
id));
407 __ Integer64PlusConstantToSmi(
rdx,
rcx, y);
416 __ Integer64PlusConstantToSmi(
rcx,
rcx, y);
422 TEST(Integer64PlusConstantToSmi) {
431 HandleScope handles(isolate);
432 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
434 MacroAssembler* masm = &assembler;
459 masm->GetCode(&desc);
461 int result = FUNCTION_CAST<F0>(buffer)();
475 HandleScope handles(isolate);
476 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
478 MacroAssembler* masm = &assembler;
483 __ movl(
rax, Immediate(1));
487 __ movl(
rcx, Immediate(0));
489 cond = masm->CheckSmi(
rcx);
494 cond = masm->CheckSmi(
rcx);
498 __ movl(
rcx, Immediate(-1));
500 cond = masm->CheckSmi(
rcx);
505 cond = masm->CheckSmi(
rcx);
511 cond = masm->CheckSmi(
rcx);
516 cond = masm->CheckSmi(
rcx);
522 cond = masm->CheckSmi(
rcx);
527 cond = masm->CheckSmi(
rcx);
533 __ movl(
rcx, Immediate(0));
535 cond = masm->CheckNonNegativeSmi(
rcx);
540 cond = masm->CheckNonNegativeSmi(
rcx);
544 __ movq(
rcx, Immediate(-1));
546 cond = masm->CheckNonNegativeSmi(
rcx);
552 cond = masm->CheckNonNegativeSmi(
rcx);
557 cond = masm->CheckNonNegativeSmi(
rcx);
563 cond = masm->CheckNonNegativeSmi(
rcx);
568 cond = masm->CheckNonNegativeSmi(
rcx);
576 cond = masm->CheckIsMinSmi(
rcx);
580 __ movq(
rcx, Immediate(0));
582 cond = masm->CheckIsMinSmi(
rcx);
588 cond = masm->CheckIsMinSmi(
rcx);
594 cond = masm->CheckIsMinSmi(
rcx);
604 cond = masm->CheckBothSmi(
rcx,
rdx);
609 cond = masm->CheckBothSmi(
rcx,
rdx);
614 cond = masm->CheckBothSmi(
rcx,
rdx);
619 cond = masm->CheckBothSmi(
rcx,
rdx);
623 cond = masm->CheckBothSmi(
rcx,
rcx);
627 cond = masm->CheckBothSmi(
rdx,
rdx);
632 __ movq(
rcx, Immediate(0));
633 cond = masm->CheckInteger32ValidSmiValue(
rax);
637 __ movq(
rcx, Immediate(-1));
638 cond = masm->CheckInteger32ValidSmiValue(
rax);
643 cond = masm->CheckInteger32ValidSmiValue(
rax);
648 cond = masm->CheckInteger32ValidSmiValue(
rax);
659 masm->GetCode(&desc);
661 int result = FUNCTION_CAST<F0>(buffer)();
667 void TestSmiNeg(MacroAssembler* masm, Label* exit,
int id,
int x) {
672 __ movl(
rax, Immediate(
id + 8));
686 Label smi_ok, smi_ok2;
688 __ movl(
rax, Immediate(
id));
723 HandleScope handles(isolate);
724 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
726 MacroAssembler* masm = &assembler;
745 masm->GetCode(&desc);
747 int result = FUNCTION_CAST<F0>(buffer)();
752 static void SmiAddTest(MacroAssembler* masm,
757 __ movl(
rcx, Immediate(first));
759 __ movl(
rdx, Immediate(second));
761 __ movl(
r8, Immediate(first + second));
762 __ Integer32ToSmi(
r8,
r8);
764 __ movl(
rax, Immediate(
id));
774 __ movl(
rcx, Immediate(first));
786 __ movl(
rcx, Immediate(first));
802 __ movl(
rcx, Immediate(first));
818 static void SmiAddOverflowTest(MacroAssembler* masm,
827 __ movl(
rax, Immediate(
id));
835 __ bind(&overflow_ok);
846 __ bind(&overflow_ok);
861 __ bind(&overflow_ok);
872 __ bind(&overflow_ok);
885 __ bind(&overflow_ok);
896 __ bind(&overflow_ok);
908 __ bind(&overflow_ok);
921 __ bind(&overflow_ok);
939 HandleScope handles(isolate);
940 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
942 MacroAssembler* masm = &assembler;
947 SmiAddTest(masm, &exit, 0x10, 1, 2);
948 SmiAddTest(masm, &exit, 0x20, 1, -2);
949 SmiAddTest(masm, &exit, 0x30, -1, 2);
950 SmiAddTest(masm, &exit, 0x40, -1, -2);
951 SmiAddTest(masm, &exit, 0x50, 0x1000, 0x2000);
956 SmiAddOverflowTest(masm, &exit, 0x90, -1);
957 SmiAddOverflowTest(masm, &exit, 0xA0, 1);
958 SmiAddOverflowTest(masm, &exit, 0xB0, 1024);
960 SmiAddOverflowTest(masm, &exit, 0xD0, -2);
961 SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
970 masm->GetCode(&desc);
972 int result = FUNCTION_CAST<F0>(buffer)();
977 static void SmiSubTest(MacroAssembler* masm,
986 __ movl(
rax, Immediate(
id));
1037 static void SmiSubOverflowTest(MacroAssembler* masm,
1046 __ movl(
rax, Immediate(
id));
1054 __ bind(&overflow_ok);
1065 __ bind(&overflow_ok);
1081 __ bind(&overflow_ok);
1092 __ bind(&overflow_ok);
1105 __ bind(&overflow_ok);
1116 __ bind(&overflow_ok);
1128 __ bind(&overflow_ok);
1142 __ bind(&overflow_ok);
1160 HandleScope handles(isolate);
1161 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1163 MacroAssembler* masm = &assembler;
1167 SmiSubTest(masm, &exit, 0x10, 1, 2);
1168 SmiSubTest(masm, &exit, 0x20, 1, -2);
1169 SmiSubTest(masm, &exit, 0x30, -1, 2);
1170 SmiSubTest(masm, &exit, 0x40, -1, -2);
1171 SmiSubTest(masm, &exit, 0x50, 0x1000, 0x2000);
1177 SmiSubOverflowTest(masm, &exit, 0xA0, 1);
1178 SmiSubOverflowTest(masm, &exit, 0xB0, 1024);
1180 SmiSubOverflowTest(masm, &exit, 0xD0, -2);
1181 SmiSubOverflowTest(masm, &exit, 0xE0, -42000);
1183 SmiSubOverflowTest(masm, &exit, 0x100, 0);
1191 masm->GetCode(&desc);
1193 int result = FUNCTION_CAST<F0>(buffer)();
1199 void TestSmiMul(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1200 int64_t result =
static_cast<int64_t
>(x) * static_cast<int64_t>(y);
1201 bool negative_zero = (result == 0) && (x < 0 || y < 0);
1206 __ movl(
rax, Immediate(
id));
1221 __ movl(
rax, Immediate(
id + 8));
1222 Label overflow_ok, overflow_ok2;
1225 __ bind(&overflow_ok);
1232 __ bind(&overflow_ok2);
1250 HandleScope handles(isolate);
1251 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1253 MacroAssembler* masm = &assembler;
1261 TestSmiMul(masm, &exit, 0x50, 0x10000, 0x10000);
1262 TestSmiMul(masm, &exit, 0x60, 0x10000, 0xffff);
1263 TestSmiMul(masm, &exit, 0x70, 0x10000, 0xffff);
1278 masm->GetCode(&desc);
1280 int result = FUNCTION_CAST<F0>(buffer)();
1285 void TestSmiDiv(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1286 bool division_by_zero = (y == 0);
1287 bool negative_zero = (x == 0 && y < 0);
1288 #if V8_TARGET_ARCH_X64
1293 bool fraction = !division_by_zero && !overflow && (x % y != 0);
1296 if (!fraction && !overflow && !negative_zero && !division_by_zero) {
1299 __ movq(
r15, Immediate(
id));
1322 __ movq(
r15, Immediate(
id + 8));
1324 Label fail_ok, fail_ok2;
1356 HandleScope handles(isolate);
1357 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1359 MacroAssembler* masm = &assembler;
1395 masm->GetCode(&desc);
1397 int result = FUNCTION_CAST<F0>(buffer)();
1402 void TestSmiMod(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1403 bool division_by_zero = (y == 0);
1405 bool fraction = !division_by_zero && !division_overflow && ((x % y) != 0);
1406 bool negative_zero = (!fraction && x < 0);
1410 if (!division_overflow && !negative_zero && !division_by_zero) {
1412 __ movq(
r15, Immediate(
id));
1433 __ movq(
r15, Immediate(
id + 8));
1435 Label fail_ok, fail_ok2;
1466 HandleScope handles(isolate);
1467 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1469 MacroAssembler* masm = &assembler;
1505 masm->GetCode(&desc);
1507 int result = FUNCTION_CAST<F0>(buffer)();
1513 __ movl(
rax, Immediate(
id));
1515 for (
int i = 0; i < 8; i++) {
1517 SmiIndex index = masm->SmiToIndex(
rdx,
rcx, i);
1519 __ shl(index.reg, Immediate(index.scale));
1520 __ Set(
r8, static_cast<intptr_t>(x) << i);
1521 __ cmpq(index.reg,
r8);
1525 index = masm->SmiToIndex(
rcx,
rcx, i);
1527 __ shl(
rcx, Immediate(index.scale));
1528 __ Set(
r8, static_cast<intptr_t>(x) << i);
1534 index = masm->SmiToNegativeIndex(
rdx,
rcx, i);
1536 __ shl(index.reg, Immediate(index.scale));
1537 __ Set(
r8, static_cast<intptr_t>(-x) << i);
1538 __ cmpq(index.reg,
r8);
1542 index = masm->SmiToNegativeIndex(
rcx,
rcx, i);
1544 __ shl(
rcx, Immediate(index.scale));
1545 __ Set(
r8, static_cast<intptr_t>(-x) << i);
1563 HandleScope handles(isolate);
1564 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1566 MacroAssembler* masm = &assembler;
1582 masm->GetCode(&desc);
1584 int result = FUNCTION_CAST<F0>(buffer)();
1590 __ movl(
rax, Immediate(
id));
1632 HandleScope handles(isolate);
1633 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1635 MacroAssembler* masm = &assembler;
1655 masm->GetCode(&desc);
1657 int result = FUNCTION_CAST<F0>(buffer)();
1662 void TestSmiAnd(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1665 __ movl(
rax, Immediate(
id));
1711 HandleScope handles(isolate);
1712 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1714 MacroAssembler* masm = &assembler;
1736 masm->GetCode(&desc);
1738 int result = FUNCTION_CAST<F0>(buffer)();
1743 void TestSmiOr(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1746 __ movl(
rax, Immediate(
id));
1792 HandleScope handles(isolate);
1793 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1795 MacroAssembler* masm = &assembler;
1809 TestSmiOr(masm, &exit, 0xB0, 0x05555555, 0x01234567);
1810 TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
1819 masm->GetCode(&desc);
1821 int result = FUNCTION_CAST<F0>(buffer)();
1826 void TestSmiXor(MacroAssembler* masm, Label* exit,
int id,
int x,
int y) {
1829 __ movl(
rax, Immediate(
id));
1875 HandleScope handles(isolate);
1876 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1878 MacroAssembler* masm = &assembler;
1892 TestSmiXor(masm, &exit, 0xB0, 0x5555555, 0x01234567);
1893 TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
1902 masm->GetCode(&desc);
1904 int result = FUNCTION_CAST<F0>(buffer)();
1909 void TestSmiNot(MacroAssembler* masm, Label* exit,
int id,
int x) {
1911 __ movl(
rax, Immediate(
id));
1942 HandleScope handles(isolate);
1943 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
1945 MacroAssembler* masm = &assembler;
1964 masm->GetCode(&desc);
1966 int result = FUNCTION_CAST<F0>(buffer)();
1973 const int kNumShifts = 5;
1974 __ movl(
rax, Immediate(
id));
1975 for (
int i = 0; i < kNumShifts; i++) {
1977 int shift = shifts[i];
1978 int result = x <<
shift;
1982 __ SmiShiftLeftConstant(
r9,
rcx, shift);
1990 __ SmiShiftLeftConstant(
rcx,
rcx, shift);
2038 HandleScope handles(isolate);
2039 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2041 MacroAssembler* masm = &assembler;
2059 masm->GetCode(&desc);
2061 int result = FUNCTION_CAST<F0>(buffer)();
2071 const int kNumShifts = 5;
2072 __ movl(
rax, Immediate(
id));
2073 for (
int i = 0; i < kNumShifts; i++) {
2074 int shift = shifts[i];
2075 intptr_t result =
static_cast<unsigned int>(x) >> shift;
2079 __ SmiShiftLogicalRightConstant(
r9,
rcx, shift, exit);
2109 __ SmiShiftLogicalRightConstant(
r9,
rcx, shift, &fail_ok);
2120 __ SmiShiftLogicalRight(
r9,
rcx,
r8, &fail_ok3);
2128 __ addq(
rax, Immediate(3));
2144 HandleScope handles(isolate);
2145 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2147 MacroAssembler* masm = &assembler;
2165 masm->GetCode(&desc);
2167 int result = FUNCTION_CAST<F0>(buffer)();
2177 const int kNumShifts = 5;
2178 __ movl(
rax, Immediate(
id));
2179 for (
int i = 0; i < kNumShifts; i++) {
2180 int shift = shifts[i];
2182 int result = (x < 0) ? ~((~x) >>
shift) : (x >> shift);
2185 __ SmiShiftArithmeticRightConstant(
rcx,
rcx, shift);
2213 HandleScope handles(isolate);
2214 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2216 MacroAssembler* masm = &assembler;
2234 masm->GetCode(&desc);
2236 int result = FUNCTION_CAST<F0>(buffer)();
2243 int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
2244 int power_count = 8;
2245 __ movl(
rax, Immediate(
id));
2246 for (
int i = 0; i < power_count; i++) {
2247 int power = powers[i];
2248 intptr_t result =
static_cast<intptr_t
>(x) << power;
2252 __ PositiveSmiTimesPowerOfTwoToInteger64(
rdx,
rcx, power);
2259 __ PositiveSmiTimesPowerOfTwoToInteger64(
rcx,
rcx, power);
2267 TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
2277 HandleScope handles(isolate);
2278 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2280 MacroAssembler* masm = &assembler;
2300 masm->GetCode(&desc);
2302 int result = FUNCTION_CAST<F0>(buffer)();
2310 for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
2320 HandleScope handles(isolate);
2321 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2323 MacroAssembler* masm = &assembler;
2331 __ pushq(Immediate(0x100));
2333 __ pushq(Immediate(0x101));
2334 __ pushq(Immediate(0x102));
2335 __ pushq(Immediate(0x103));
2336 __ pushq(Immediate(0x104));
2337 __ pushq(Immediate(0x105));
2338 __ pushq(Immediate(0x106));
2339 __ pushq(Immediate(0x107));
2340 __ pushq(Immediate(0x108));
2341 __ pushq(Immediate(0x109));
2349 __ movl(
rcx, Immediate(2));
2350 __ Move(
r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
2351 __ movl(
rax, Immediate(1));
2353 Operand sp0 = Operand(
rsp, 0);
2357 __ cmpl(
rdx, Immediate(0x109));
2364 __ cmpl(
rdx, Immediate(0x107));
2372 __ cmpl(
rdx, Immediate(0x107));
2377 __ cmpl(
rdx, Immediate(0x105));
2383 __ cmpl(
rdx, Immediate(0x109));
2390 __ movl(
rdx, sp2c2);
2391 __ cmpl(
rdx, Immediate(0x105));
2396 __ cmpl(
rdx, Immediate(0x103));
2402 __ cmpl(
rdx, Immediate(0x107));
2407 Operand bp0 = Operand(
rbp, 0);
2411 __ cmpl(
rdx, Immediate(0x100));
2417 __ cmpl(
rdx, Immediate(0x102));
2425 __ cmpl(
rdx, Immediate(0x102));
2431 __ cmpl(
rdx, Immediate(0x100));
2436 __ cmpl(
rdx, Immediate(0x104));
2443 __ movl(
rdx, bp2c4);
2444 __ cmpl(
rdx, Immediate(0x102));
2449 __ cmpl(
rdx, Immediate(0x100));
2454 __ cmpl(
rdx, Immediate(0x104));
2458 Operand bx0 = Operand(
rbx, 0);
2462 __ cmpl(
rdx, Immediate(0x105));
2467 __ cmpl(
rdx, Immediate(0x100));
2472 __ cmpl(
rdx, Immediate(0x109));
2480 __ cmpl(
rdx, Immediate(0x103));
2485 __ cmpl(
rdx, Immediate(0x101));
2491 __ cmpl(
rdx, Immediate(0x105));
2498 __ movl(
rdx, bx2c2);
2499 __ cmpl(
rdx, Immediate(0x105));
2504 __ cmpl(
rdx, Immediate(0x103));
2509 __ cmpl(
rdx, Immediate(0x107));
2513 Operand r80 = Operand(
r8, 0);
2517 __ cmpl(
rdx, Immediate(0x80808080));
2522 __ cmpl(
rdx, Immediate(0x78787878));
2527 __ cmpl(
rdx, Immediate(0x88888888));
2532 __ cmpl(
rdx, Immediate(0x40404040));
2537 __ cmpl(
rdx, Immediate(0xC0C0C0C0));
2545 __ cmpl(
rdx, Immediate(0x88888888));
2550 __ cmpl(
rdx, Immediate(0x80808080));
2555 __ cmpl(
rdx, Immediate(0x90909090));
2560 __ cmpl(
rdx, Immediate(0x48484848));
2565 __ cmpl(
rdx, Immediate(0xC8C8C8C8));
2574 __ cmpl(
rdx, Immediate(0xC0C0C0C0));
2579 __ cmpl(
rdx, Immediate(0xB8B8B8B8));
2584 __ cmpl(
rdx, Immediate(0xC8C8C8C8));
2589 __ cmpl(
rdx, Immediate(0x80808080));
2594 __ cmpl(
rdx, Immediate(0xE0E0E0E0));
2600 __ cmpl(
rdx, Immediate(0x84848484));
2605 __ cmpl(
rdx, Immediate(0xFCFCFCFC));
2612 __ movl(
rdx, Operand(r80, 2));
2613 __ cmpl(
rdx, Immediate(0x81818080));
2617 __ movl(
rdx, Operand(r80, -2));
2618 __ cmpl(
rdx, Immediate(0x80807F7F));
2622 __ movl(
rdx, Operand(r80, 126));
2623 __ cmpl(
rdx, Immediate(0xA0A09F9F));
2627 __ movl(
rdx, Operand(r80, -126));
2628 __ cmpl(
rdx, Immediate(0x61616060));
2632 __ movl(
rdx, Operand(r80, 254));
2633 __ cmpl(
rdx, Immediate(0xC0C0BFBF));
2637 __ movl(
rdx, Operand(r80, -254));
2638 __ cmpl(
rdx, Immediate(0x41414040));
2644 __ movl(
rax, Immediate(0));
2656 masm->GetCode(&desc);
2658 int result = FUNCTION_CAST<F0>(buffer)();
2663 TEST(LoadAndStoreWithRepresentation) {
2673 HandleScope handles(isolate);
2674 MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
2675 MacroAssembler* masm = &assembler;
2681 __ movq(
rax, Immediate(1));
2683 __ movq(
rcx, Immediate(-1));
2686 __ movl(
rdx, Immediate(255));
2694 __ movq(
rax, Immediate(2));
2707 __ movq(
rax, Immediate(3));
2709 __ movq(
rcx, Immediate(-1));
2712 __ movl(
rdx, Immediate(-1));
2720 __ movq(
rax, Immediate(4));
2722 __ movl(
rcx, Immediate(0x44332211));
2725 __ movl(
rdx, Immediate(0x44332211));
2733 __ movq(
rax, Immediate(5));
2746 __ movq(
rax, Immediate(6));
2759 __ movq(
rax, Immediate(7));
2761 __ movq(
rcx, Immediate(-1));
2764 __ movl(
rdx, Immediate(255));
2768 __ movq(
rcx, Immediate(-1));
2773 __ movq(
rax, Immediate(8));
2775 __ movq(
rcx, Immediate(-1));
2778 __ movl(
rdx, Immediate(65535));
2782 __ movq(
rcx, Immediate(-1));
2787 __ movq(
rax, Immediate(9));
2789 __ movq(
rcx, Immediate(-1));
2792 __ movl(
rdx, Immediate(65535));
2806 masm->GetCode(&desc);
2808 int result = FUNCTION_CAST<F0>(buffer)();
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void TestI64PlusConstantToSmi(MacroAssembler *masm, Label *exit, int id, int64_t x, int y)
static Representation UInteger8()
static bool Initialize(Deserializer *des)
const intptr_t kSmiTagMask
#define CHECK_EQ(expected, value)
static Representation Smi()
void TestSmiIndex(MacroAssembler *masm, Label *exit, int id, int x)
static Smi * FromInt(int value)
void TestSmiMod(MacroAssembler *masm, Label *exit, int id, int x, int y)
static Representation Integer32()
void TestSmiNot(MacroAssembler *masm, Label *exit, int id, int x)
static const int kMinimalBufferSize
static Smi * FromIntptr(intptr_t value)
#define ASSERT(condition)
void TestSmiMul(MacroAssembler *masm, Label *exit, int id, int x, int y)
static Representation Integer16()
void TestSmiShiftLogicalRight(MacroAssembler *masm, Label *exit, int id, int x)
void TestSmiShiftArithmeticRight(MacroAssembler *masm, Label *exit, int id, int x)
void TestSmiShiftLeft(MacroAssembler *masm, Label *exit, int id, int x)
static Representation HeapObject()
static bool IsValid(intptr_t value)
static const int kMinValue
static i::Isolate * i_isolate()
#define V8_2PART_UINT64_C(a, b)
static Representation External()
void TestSmiDiv(MacroAssembler *masm, Label *exit, int id, int x, int y)
void TestSmiXor(MacroAssembler *masm, Label *exit, int id, int x, int y)
void TestSmiAnd(MacroAssembler *masm, Label *exit, int id, int x, int y)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
void TestSmiCompare(MacroAssembler *masm, Label *exit, int id, int x, int y)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
void TestSelectNonSmi(MacroAssembler *masm, Label *exit, int id, int x, int y)
void TestPositiveSmiPowerUp(MacroAssembler *masm, Label *exit, int id, int x)
static Representation UInteger16()
static Representation Integer8()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Representation Tagged()
const Register kSmiConstantRegister
static const int kMaxValue
void TestSmiOr(MacroAssembler *masm, Label *exit, int id, int x, int y)
void TestSmiNeg(MacroAssembler *masm, Label *exit, int id, int x)
F FUNCTION_CAST(Address addr)