38 using namespace v8::internal;
51 typedef int (*
F1)(int64_t x);
52 typedef int (*
F2)(int64_t x, int64_t y);
53 typedef int (*
F3)(
double x);
54 typedef int64_t (*
F4)(int64_t* x, int64_t* y);
55 typedef int64_t (*
F5)(int64_t x);
68 TEST(AssemblerX64ReturnOperation) {
85 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
90 TEST(AssemblerX64StackOperations) {
117 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
122 TEST(AssemblerX64ArithmeticOperations) {
139 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
144 TEST(AssemblerX64ImulOperation) {
163 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
165 result = FUNCTION_CAST<F2>(buffer)(0x100000000l, 0x100000000l);
167 result = FUNCTION_CAST<F2>(buffer)(-0x100000000l, 0x100000000l);
172 TEST(AssemblerX64XchglOperations) {
193 int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
200 TEST(AssemblerX64OrlOperations) {
218 int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
224 TEST(AssemblerX64RollOperations) {
234 __ roll(
rax, Immediate(1));
241 int64_t result = FUNCTION_CAST<F5>(buffer)(src);
246 TEST(AssemblerX64SublOperations) {
264 int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
270 TEST(AssemblerX64TestlOperations) {
281 __ movq(
rax, Immediate(1));
284 __ j(
zero, &done, Label::kNear);
285 __ movq(
rax, Immediate(0));
294 int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
295 CHECK_EQ(static_cast<int64_t>(1), result);
299 TEST(AssemblerX64XorlOperations) {
317 int64_t result = FUNCTION_CAST<F4>(buffer)(&left, &right);
323 TEST(AssemblerX64MemoryOperands) {
340 const int kStackElementSize = 8;
352 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
357 TEST(AssemblerX64ControlFlow) {
381 int result = FUNCTION_CAST<F2>(buffer)(3, 2);
386 TEST(AssemblerX64LoopImmediates) {
396 __ movq(
rax, Immediate(-3));
400 __ bind(&Loop1_body);
401 __ addq(
rax, Immediate(7));
402 __ bind(&Loop1_test);
403 __ cmpq(
rax, Immediate(20));
406 __ cmpq(
rax, Immediate(25));
411 __ movq(
rax, Immediate(0x11FEED00));
413 __ bind(&Loop2_body);
414 __ addq(
rax, Immediate(-0x1100));
415 __ bind(&Loop2_test);
416 __ cmpq(
rax, Immediate(0x11FE8000));
419 __ cmpq(
rax, Immediate(0x11FE7600));
422 __ movq(
rax, Immediate(1));
425 __ movq(
rax, Immediate(0));
431 int result = FUNCTION_CAST<F0>(buffer)();
436 TEST(OperandRegisterDependency) {
437 int offsets[4] = {0, 1, 0xfed, 0xbeefcad};
438 for (
int i = 0; i < 4; i++) {
439 int offset = offsets[i];
479 TEST(AssemblerX64LabelChaining) {
498 Assembler assm(isolate, buffer,
sizeof(buffer));
504 __ movq(
rax, Immediate(1));
505 __ movq(
rbx, Immediate(2));
506 __ movq(
rcx, Immediate(3));
507 __ movq(
rdx, Immediate(4));
508 __ movq(
rdi, Immediate(5));
509 __ movq(
rsi, Immediate(6));
510 for (
int i = 0; i < 16; i++) {
517 __ cmpq(
rax, Immediate(1));
519 __ cmpq(
rbx, Immediate(2));
521 __ cmpq(
rcx, Immediate(3));
523 __ cmpq(
rdx, Immediate(4));
525 __ cmpq(
rdi, Immediate(5));
527 __ cmpq(
rsi, Immediate(6));
529 __ movq(
rax, Immediate(42));
537 __ movq(
rax, Immediate(13));
551 CHECK(code->IsCode());
553 F0 f = FUNCTION_CAST<F0>(code->
entry());
560 #define ELEMENT_COUNT 4
566 CHECK(args[0]->IsArray());
571 Assembler assm(isolate, buffer,
sizeof(buffer));
577 for (
int i = 0; i < ELEMENT_COUNT; i++) {
578 __ movl(
rax, Immediate(vec->
Get(i)->Int32Value()));
579 __ shl(
rax, Immediate(0x20));
580 __ orq(
rax, Immediate(vec->
Get(++i)->Int32Value()));
591 __ addq(
rsp, Immediate(ELEMENT_COUNT *
sizeof(
int32_t)));
603 CHECK(code->IsCode());
605 F0 f = FUNCTION_CAST<F0>(code->
entry());
611 TEST(StackAlignmentForSSE2) {
619 global_template->
Set(v8_str(
"do_sse2"),
624 "function foo(vec) {"
625 " return do_sse2(vec);"
632 int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
634 for (
int i = 0; i < ELEMENT_COUNT; i++) {
635 v8_vec->
Set(i, v8_num(vec[i]));
656 Assembler assm(isolate, buffer,
sizeof(buffer));
657 { CpuFeatureScope fscope2(&assm,
SSE4_1);
668 CHECK(code->IsCode());
681 typedef int (*
F6)(
float x,
float y);
707 CHECK(code->IsCode());
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void Fail(const v8::FunctionCallbackInfo< v8::Value > &args)
#define CHECK_EQ(expected, value)
Local< Value > Call(Handle< Value > recv, int argc, Handle< Value > argv[])
Local< Value > Get(Handle< Value > key)
V8_INLINE ReturnValue< T > GetReturnValue() const
static const int kMinimalBufferSize
static bool IsSupported(CpuFeature f)
static Local< Integer > New(Isolate *isolate, int32_t value)
void Set(Handle< String > name, Handle< Data > value, PropertyAttribute attributes=None)
static Code * cast(Object *obj)
static Local< ObjectTemplate > New()
void GetCode(CodeDesc *desc)
static Local< FunctionTemplate > New(Isolate *isolate, FunctionCallback callback=0, Handle< Value > data=Handle< Value >(), Handle< Signature > signature=Handle< Signature >(), int length=0)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
double uint64_to_double(uint64_t d64)
int(* F2)(int64_t x, int64_t y)
static i::Isolate * i_isolate()
#define V8_2PART_UINT64_C(a, b)
int64_t(* F4)(int64_t *x, int64_t *y)
static Local< Array > New(Isolate *isolate, int length=0)
static V8_INLINE Local< T > Cast(Local< S > that)
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
static void InitializeVM()
static Flags ComputeFlags(Kind kind, InlineCacheState ic_state=UNINITIALIZED, ExtraICState extra_ic_state=kNoExtraICState, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int ActivationFrameAlignment()
int(* F6)(float x, float y)
bool Set(Handle< Value > key, Handle< Value > value, PropertyAttribute attribs=None)
static v8::Isolate * isolate()