38 using namespace v8::internal;
50 static void InitializeVM() {
52 FLAG_disable_native_files =
true;
80 CHECK(code->IsCode());
83 ::printf(
"f() = %d\n", res);
102 __ addiu(a1, a1, -1);
106 __ Branch(&L,
ne, v1, Operand(0));
118 CHECK(code->IsCode());
121 ::printf(
"f() = %d\n", res);
139 __ ori(t0, zero_reg, 0);
142 __ ori(t0, t0, 0x0f0f);
143 __ ori(t0, t0, 0xf0f0);
145 __ addiu(t2, t1, -0x10);
148 __ li(t0, 0x00000004);
149 __ li(t1, 0x00001234);
150 __ li(t2, 0x12345678);
151 __ li(t3, 0x7fffffff);
152 __ li(t4, 0xfffffffc);
153 __ li(t5, 0xffffedcc);
154 __ li(t6, 0xedcba988);
155 __ li(t7, 0x80000000);
164 __ Branch(&error,
ne, v0, Operand(0x0f234560));
169 __ Branch(&error,
ne, v0, Operand(0x00001234));
172 __ Branch(&error,
ne, v1, Operand(0x80000003));
175 __ Branch(&error,
ne, v1, Operand(0x7ffffffc));
182 __ Branch(&error,
ne, v0, Operand(0xedcba983));
186 __ Branch(&error,
ne, v0, Operand(0x1));
189 __ Branch(&error,
ne, v0, Operand(0x0));
193 __ addiu(v0, zero_reg, 0x7421);
194 __ addiu(v0, v0, -0x1);
195 __ addiu(v0, v0, -0x20);
196 __ Branch(&error,
ne, v0, Operand(0x00007400));
198 __ addiu(v1, t3, 0x1);
199 __ Branch(&error,
ne, v1, Operand(0x80000000));
202 __ slti(v0, t1, 0x00002000);
203 __ slti(v0, v0, 0xffff8000);
204 __ Branch(&error,
ne, v0, Operand(0x0));
206 __ sltiu(v0, t1, 0x00002000);
207 __ sltiu(v0, v0, 0x00008000);
208 __ Branch(&error,
ne, v0, Operand(0x1));
211 __ andi(v0, t1, 0xf0f0);
212 __ ori(v0, v0, 0x8a00);
213 __ xori(v0, v0, 0x83cc);
214 __ Branch(&error,
ne, v0, Operand(0x000019fc));
217 __ Branch(&error,
ne, v1, Operand(0x81230000));
229 __ Branch(&error,
ne, v0, Operand(51));
231 __ Ins(a0, t1, 12, 8);
232 __ Branch(&error,
ne, a0, Operand(0x7ff34fff));
234 __ Ext(a1, a0, 8, 12);
235 __ Branch(&error,
ne, a1, Operand(0x34f));
237 __ Branch(&error,
ne, a0, Operand(t6));
240 __ li(v0, 0x31415926);
258 CHECK(code->IsCode());
261 ::printf(
"f() = %d\n", res);
288 CpuFeatures::Scope scope(
FPU);
324 CHECK(code->IsCode());
340 CHECK_EQ(10.97451593465515908537, t.g);
361 CpuFeatures::Scope scope(
FPU);
390 CHECK(code->IsCode());
422 CpuFeatures::Scope scope(
FPU);
459 CHECK(code->IsCode());
518 __ ori(t5, t5, 0x3333);
532 CHECK(code->IsCode());
567 Label neither_is_nan, less_than, outa_here;
570 CpuFeatures::Scope scope(
FPU);
575 __ bc1f(&neither_is_nan);
578 __ Branch(&outa_here);
580 __ bind(&neither_is_nan);
587 __ bc1t(&less_than, 2);
591 __ Branch(&outa_here);
594 __ Addu(t0, zero_reg, Operand(1));
611 CHECK(code->IsCode());
659 __ Ror(t1, t0, 0x0004);
660 __ Ror(t2, t0, 0x0008);
661 __ Ror(t3, t0, 0x000c);
662 __ Ror(t4, t0, 0x0010);
663 __ Ror(t5, t0, 0x0014);
664 __ Ror(t6, t0, 0x0018);
665 __ Ror(t7, t0, 0x001c);
710 CHECK(code->IsCode());
712 t.input = 0x12345678;
715 CHECK_EQ(0x81234567, t.result_rotr_4);
716 CHECK_EQ(0x78123456, t.result_rotr_8);
717 CHECK_EQ(0x67812345, t.result_rotr_12);
718 CHECK_EQ(0x56781234, t.result_rotr_16);
719 CHECK_EQ(0x45678123, t.result_rotr_20);
720 CHECK_EQ(0x34567812, t.result_rotr_24);
721 CHECK_EQ(0x23456781, t.result_rotr_28);
723 CHECK_EQ(0x81234567, t.result_rotrv_4);
724 CHECK_EQ(0x78123456, t.result_rotrv_8);
725 CHECK_EQ(0x67812345, t.result_rotrv_12);
726 CHECK_EQ(0x56781234, t.result_rotrv_16);
727 CHECK_EQ(0x45678123, t.result_rotrv_20);
728 CHECK_EQ(0x34567812, t.result_rotrv_24);
729 CHECK_EQ(0x23456781, t.result_rotrv_28);
739 Label exit, exit2, exit3;
741 __ Branch(&exit,
ge, a0, Operand(0x00000000));
742 __ Branch(&exit2,
ge, a0, Operand(0x00001FFF));
743 __ Branch(&exit3,
ge, a0, Operand(0x0001FFFF));
757 CHECK(code->IsCode());
783 CpuFeatures::Scope scope(
FPU);
818 CHECK(code->IsCode());
821 t.b_long_hi = 0x000000ff;
822 t.b_long_lo = 0x00ff00ff;
950 CHECK(code->IsCode());
952 t.reg_init = 0xaabbccdd;
953 t.mem_init = 0x11223344;
1001 __ addu(t1, t0, t3);
1002 __ subu(t4, t0, t3);
1006 __ addu(t0, t0, t0);
1054 CHECK(code->IsCode());
1077 double cvt_small_out;
1078 uint32_t trunc_big_out;
1079 uint32_t trunc_small_out;
1080 uint32_t cvt_big_in;
1081 uint32_t cvt_small_in;
1088 CpuFeatures::Scope scope(
FPU);
1113 CHECK(code->IsCode());
1116 t.cvt_big_in = 0xFFFFFFFF;
1117 t.cvt_small_in = 333;
1122 CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
1123 CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
1125 CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
1126 CHECK_EQ(static_cast<int>(t.trunc_small_out),
1127 static_cast<int>(t.cvt_small_in));
1137 #define ROUND_STRUCT_ELEMENT(x) \
1138 int32_t x##_up_out; \
1139 int32_t x##_down_out; \
1140 int32_t neg_##x##_up_out; \
1141 int32_t neg_##x##_down_out; \
1142 uint32_t x##_err1_out; \
1143 uint32_t x##_err2_out; \
1144 uint32_t x##_err3_out; \
1145 uint32_t x##_err4_out; \
1146 int32_t x##_invalid_result;
1150 double round_down_in;
1151 double neg_round_up_in;
1152 double neg_round_down_in;
1166 #undef ROUND_STRUCT_ELEMENT
1171 CpuFeatures::Scope scope(
FPU);
1177 #define RUN_ROUND_TEST(x) \
1178 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
1179 __ x##_w_d(f0, f0); \
1180 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
1182 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
1183 __ x##_w_d(f0, f0); \
1184 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
1186 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
1187 __ x##_w_d(f0, f0); \
1188 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
1190 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
1191 __ x##_w_d(f0, f0); \
1192 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
1194 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
1195 __ ctc1(zero_reg, FCSR); \
1196 __ x##_w_d(f0, f0); \
1197 __ cfc1(a2, FCSR); \
1198 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
1200 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
1201 __ ctc1(zero_reg, FCSR); \
1202 __ x##_w_d(f0, f0); \
1203 __ cfc1(a2, FCSR); \
1204 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
1206 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
1207 __ ctc1(zero_reg, FCSR); \
1208 __ x##_w_d(f0, f0); \
1209 __ cfc1(a2, FCSR); \
1210 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
1212 __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
1213 __ ctc1(zero_reg, FCSR); \
1214 __ x##_w_d(f0, f0); \
1215 __ cfc1(a2, FCSR); \
1216 __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
1217 __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
1237 CHECK(code->IsCode());
1240 t.round_up_in = 123.51;
1241 t.round_down_in = 123.49;
1242 t.neg_round_up_in = -123.5;
1243 t.neg_round_down_in = -123.49;
1246 t.err3_in =
static_cast<double>(1) + 0xFFFFFFFF;
1252 #define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
1253 #define CHECK_ROUND_RESULT(type) \
1254 CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
1255 CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
1256 CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
1257 CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
1258 CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
1275 __ beq(v0, v1, &target);
1277 __ bne(v0, v1, &target);
#define CHECK_EQ(expected, value)
#define RUN_ROUND_TEST(x)
Object *(* F3)(void *p, int p1, int p2, int p3, int p4)
static bool IsSupported(CpuFeature f)
static Code * cast(Object *obj)
#define OFFSET_OF(type, field)
#define CHECK_ROUND_RESULT(type)
void GetCode(CodeDesc *desc)
Object *(* F2)(int x, int y, int p2, int p3, int p4)
static Flags ComputeFlags(Kind kind, InlineCacheState ic_state=UNINITIALIZED, ExtraICState extra_ic_state=kNoExtraICState, StubType type=NORMAL, int argc=-1, InlineCacheHolderFlag holder=OWN_MAP)
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4)
#define T(name, string, precedence)
const FPUControlRegister FCSR
#define ROUND_STRUCT_ELEMENT(x)
Object *(* F1)(int x, int p1, int p2, int p3, int p4)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static Persistent< Context > New(ExtensionConfiguration *extensions=NULL, Handle< ObjectTemplate > global_template=Handle< ObjectTemplate >(), Handle< Value > global_object=Handle< Value >())
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments