36 using namespace v8::internal;
64 CHECK(code->IsCode());
70 ::printf(
"f() = %d\n", res);
84 __ mov(
r0, Operand::Zero());
92 __ teq(
r1, Operand::Zero());
102 CHECK(code->IsCode());
108 ::printf(
"f() = %d\n", res);
130 __ teq(
r1, Operand::Zero());
135 __ RecordComment(
"dead code, just testing relocations");
137 __ RecordComment(
"dead code, just testing immediate operands");
149 CHECK(code->IsCode());
155 ::printf(
"f() = %d\n", res);
198 CHECK(code->IsCode());
207 ::printf(
"f() = %d\n", res);
246 CpuFeatureScope scope(&assm,
VFP3);
280 __ vmov(
d4, 1.000000059604644775390625);
297 __ vcvt_f64_s32(
d4, 1);
324 CHECK(code->IsCode());
355 CHECK_EQ(1.000000059604644775390625, t.d);
372 CpuFeatureScope scope(&assm,
ARMv7);
387 CHECK(code->IsCode());
392 int res =
reinterpret_cast<int>(
394 ::printf(
"f() = %d\n", res);
409 CpuFeatureScope scope(&assm,
ARMv7);
423 CHECK(code->IsCode());
428 int res =
reinterpret_cast<int>(
430 ::printf(
"f() = %d\n", res);
441 static void TestRoundingMode(
VCVTTypes types,
445 bool expected_exception =
false) {
452 CpuFeatureScope scope(&assm,
VFP3);
454 Label wrong_exception;
481 __ b(&wrong_exception,
482 expected_exception ?
eq :
ne);
489 __ bind(&wrong_exception);
499 CHECK(code->IsCode());
504 int res =
reinterpret_cast<int>(
506 ::printf(
"res = %d\n", res);
523 TestRoundingMode(
s32_f64,
RN, 123.7, 124);
524 TestRoundingMode(
s32_f64,
RN, -123.7, -124);
525 TestRoundingMode(
s32_f64,
RN, 123456.2, 123456);
526 TestRoundingMode(
s32_f64,
RN, -123456.2, -123456);
539 TestRoundingMode(
s32_f64,
RM, 123.7, 123);
540 TestRoundingMode(
s32_f64,
RM, -123.7, -124);
541 TestRoundingMode(
s32_f64,
RM, 123456.2, 123456);
542 TestRoundingMode(
s32_f64,
RM, -123456.2, -123457);
553 TestRoundingMode(
s32_f64,
RZ, 123.7, 123);
554 TestRoundingMode(
s32_f64,
RZ, -123.7, -123);
555 TestRoundingMode(
s32_f64,
RZ, 123456.2, 123456);
556 TestRoundingMode(
s32_f64,
RZ, -123456.2, -123456);
569 TestRoundingMode(
u32_f64,
RN, -123456.7, 0,
true);
573 TestRoundingMode(
u32_f64,
RM, -0.5, 0,
true);
574 TestRoundingMode(
u32_f64,
RM, -123456.7, 0,
true);
579 TestRoundingMode(
u32_f64,
RZ, -123456.7, 0,
true);
585 static const uint32_t kMaxUInt = 0xffffffffu;
588 TestRoundingMode(
u32_f64,
RZ, 123.7, 123);
589 TestRoundingMode(
u32_f64,
RZ, 123456.2, 123456);
593 static_cast<uint32_t>(
kMaxInt) + 1);
594 TestRoundingMode(
u32_f64,
RZ, (kMaxUInt + 0.5), kMaxUInt);
595 TestRoundingMode(
u32_f64,
RZ, (kMaxUInt + 1.0), kMaxUInt,
true);
599 TestRoundingMode(
u32_f64,
RM, 123.7, 123);
600 TestRoundingMode(
u32_f64,
RM, 123456.2, 123456);
604 static_cast<uint32_t>(
kMaxInt) + 1);
605 TestRoundingMode(
u32_f64,
RM, (kMaxUInt + 0.5), kMaxUInt);
606 TestRoundingMode(
u32_f64,
RM, (kMaxUInt + 1.0), kMaxUInt,
true);
611 TestRoundingMode(
u32_f64,
RN, 123.7, 124);
612 TestRoundingMode(
u32_f64,
RN, 123456.2, 123456);
616 static_cast<uint32_t>(
kMaxInt) + 1);
617 TestRoundingMode(
u32_f64,
RN, (kMaxUInt + 0.49), kMaxUInt);
618 TestRoundingMode(
u32_f64,
RN, (kMaxUInt + 0.5), kMaxUInt,
true);
619 TestRoundingMode(
u32_f64,
RN, (kMaxUInt + 1.0), kMaxUInt,
true);
685 CHECK(code->IsCode());
797 CHECK(code->IsCode());
905 CHECK(code->IsCode());
983 __ mov(
r2, Operand::Zero());
989 __ mov(
r2, Operand::Zero());
1002 CHECK(code->IsCode());
1011 CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
1064 CpuFeatureScope scope(&assm,
VFP3);
1110 __ vmov(
d20, 14.7610017472335499);
1132 CHECK(code->IsCode());
1145 CHECK_EQ(14.7610017472335499, t.a);
1146 CHECK_EQ(3.84200491244266251, t.b);
1147 CHECK_EQ(73.8818412254460241, t.c);
1151 CHECK_EQ(14.7610017472335499, t.i);
1153 CHECK_EQ(73.8818412254460241, t.k);
1183 __ b(
ne, &fpscr_done);
1186 __ bind(&fpscr_done);
1207 CHECK(code->IsCode());
1220 const uint32_t kArmNanUpper32 = 0x7ff80000;
1221 const uint32_t kArmNanLower32 = 0x00000000;
1223 const uint64_t kArmNanInt64 =
1224 (
static_cast<uint64_t
>(kArmNanUpper32) << 32) | kArmNanLower32;
1229 CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
1230 CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
1231 CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
1232 CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
1233 CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
1234 CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
1235 CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
1236 CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
1282 CpuFeatureScope scope(&assm,
NEON);
1287 __ vld1(
Neon8, NeonListOperand(
d0, 4), NeonMemOperand(
r4));
1289 __ vst1(
Neon8, NeonListOperand(
d0, 4), NeonMemOperand(
r4));
1293 __ vld1(
Neon8, NeonListOperand(
d0), NeonMemOperand(
r4));
1296 __ vst1(
Neon8, NeonListOperand(
d0, 2), NeonMemOperand(
r4));
1300 __ vld1(
Neon8, NeonListOperand(
d1), NeonMemOperand(
r4));
1303 __ vst1(
Neon8, NeonListOperand(
d2, 2), NeonMemOperand(
r4));
1313 CHECK(code->IsCode());
1318 t.src0 = 0x01020304;
1319 t.src1 = 0x11121314;
1320 t.src2 = 0x21222324;
1321 t.src3 = 0x31323334;
1322 t.src4 = 0x41424344;
1323 t.src5 = 0x51525354;
1324 t.src6 = 0x61626364;
1325 t.src7 = 0x71727374;
1334 t.srcA0 = 0x41424344;
1335 t.srcA1 = 0x81828384;
1418 CHECK(code->IsCode());
1423 t.src0 = 0x01020304;
1424 t.src1 = 0x11121314;
1425 t.src2 = 0x11121300;
1450 for (
size_t i = 0; i < 1 << 23 ; ++i) {
1461 #define TEST_SDIV(expected_, dividend_, divisor_) \
1462 t.dividend = dividend_; \
1463 t.divisor = divisor_; \
1465 dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
1466 CHECK_EQ(expected_, t.result);
1485 CpuFeatureScope scope(&assm,
SUDIV);
1503 CHECK(code->IsCode());
1538 Label start, target_away, target_faraway;
1543 __ mov(
r3, Operand::Zero());
1547 __ mov_label_offset(
r4, &start);
1549 __ mov_label_offset(
r1, &target_faraway);
1552 __ mov_label_offset(
r1, &target_away);
1557 __ mov(
r2, Operand::Zero());
1559 __ cmp(
r2, Operand::Zero());
1560 __ b(
eq, &target_away);
1564 for (
int i = 0; i < (1 << 10); i++) {
1567 __ bind(&target_away);
1578 for (
int i = 0; i < (1 << 21); i++) {
1581 __ bind(&target_faraway);
1588 assm.GetCode(&desc);
1591 CHECK(code->IsCode());
1592 F1 f = FUNCTION_CAST<F1>(code->entry());
1594 ::printf(
"f() = %d\n", res);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Object *(* F3)(void *p0, int p1, int p2, int p3, int p4)
#define CHECK_EQ(expected, value)
const LowDwVfpRegister d0
static bool IsSupported(CpuFeature f)
Object *(* F2)(int x, int y, int p2, int p3, int p4)
#define ASSERT(condition)
const LowDwVfpRegister d3
static Code * cast(Object *obj)
const VmovIndex VmovIndexHi
const uint32_t kVFPDefaultNaNModeControlBit
const uint64_t kHoleNanInt64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const LowDwVfpRegister d7
const LowDwVfpRegister d4
#define OFFSET_OF(type, field)
void GetCode(CodeDesc *desc)
const LowDwVfpRegister d6
const LowDwVfpRegister d5
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Object *(* F1)(int x, int p1, int p2, int p3, int p4)
static i::Isolate * i_isolate()
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4)
const uint32_t kVFPExceptionMask
#define T(name, string, precedence)
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
const LowDwVfpRegister d2
static void InitializeVM()
const VmovIndex VmovIndexLo
static Flags ComputeFlags(Kind kind, InlineCacheState ic_state=UNINITIALIZED, ExtraICState extra_ic_state=kNoExtraICState, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Handle< Code > NewCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Object *(* F4)(void *p0, void *p1, int p2, int p3, int p4)
#define TEST_SDIV(expected_, dividend_, divisor_)
const uint32_t kVFPRoundingModeMask
const LowDwVfpRegister d1