v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
test-assembler-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "disassembler.h"
31 #include "factory.h"
32 #include "arm/simulator-arm.h"
33 #include "arm/assembler-arm-inl.h"
34 #include "cctest.h"
35 
36 using namespace v8::internal;
37 
38 
39 // Define these function prototypes to match JSEntryFunction in execution.cc.
40 typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
41 typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
42 typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
43 typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
44 
45 
46 #define __ assm.
47 
48 TEST(0) {
50  Isolate* isolate = CcTest::i_isolate();
51  HandleScope scope(isolate);
52 
53  Assembler assm(isolate, NULL, 0);
54 
55  __ add(r0, r0, Operand(r1));
56  __ mov(pc, Operand(lr));
57 
58  CodeDesc desc;
59  assm.GetCode(&desc);
60  Object* code = isolate->heap()->CreateCode(
61  desc,
63  Handle<Code>())->ToObjectChecked();
64  CHECK(code->IsCode());
65 #ifdef DEBUG
66  Code::cast(code)->Print();
67 #endif
68  F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
69  int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
70  ::printf("f() = %d\n", res);
71  CHECK_EQ(7, res);
72 }
73 
74 
75 TEST(1) {
77  Isolate* isolate = CcTest::i_isolate();
78  HandleScope scope(isolate);
79 
80  Assembler assm(isolate, NULL, 0);
81  Label L, C;
82 
83  __ mov(r1, Operand(r0));
84  __ mov(r0, Operand::Zero());
85  __ b(&C);
86 
87  __ bind(&L);
88  __ add(r0, r0, Operand(r1));
89  __ sub(r1, r1, Operand(1));
90 
91  __ bind(&C);
92  __ teq(r1, Operand::Zero());
93  __ b(ne, &L);
94  __ mov(pc, Operand(lr));
95 
96  CodeDesc desc;
97  assm.GetCode(&desc);
98  Object* code = isolate->heap()->CreateCode(
99  desc,
101  Handle<Code>())->ToObjectChecked();
102  CHECK(code->IsCode());
103 #ifdef DEBUG
104  Code::cast(code)->Print();
105 #endif
106  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
107  int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
108  ::printf("f() = %d\n", res);
109  CHECK_EQ(5050, res);
110 }
111 
112 
113 TEST(2) {
115  Isolate* isolate = CcTest::i_isolate();
116  HandleScope scope(isolate);
117 
118  Assembler assm(isolate, NULL, 0);
119  Label L, C;
120 
121  __ mov(r1, Operand(r0));
122  __ mov(r0, Operand(1));
123  __ b(&C);
124 
125  __ bind(&L);
126  __ mul(r0, r1, r0);
127  __ sub(r1, r1, Operand(1));
128 
129  __ bind(&C);
130  __ teq(r1, Operand::Zero());
131  __ b(ne, &L);
132  __ mov(pc, Operand(lr));
133 
134  // some relocated stuff here, not executed
135  __ RecordComment("dead code, just testing relocations");
136  __ mov(r0, Operand(isolate->factory()->true_value()));
137  __ RecordComment("dead code, just testing immediate operands");
138  __ mov(r0, Operand(-1));
139  __ mov(r0, Operand(0xFF000000));
140  __ mov(r0, Operand(0xF0F0F0F0));
141  __ mov(r0, Operand(0xFFF0FFFF));
142 
143  CodeDesc desc;
144  assm.GetCode(&desc);
145  Object* code = isolate->heap()->CreateCode(
146  desc,
148  Handle<Code>())->ToObjectChecked();
149  CHECK(code->IsCode());
150 #ifdef DEBUG
151  Code::cast(code)->Print();
152 #endif
153  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
154  int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
155  ::printf("f() = %d\n", res);
156  CHECK_EQ(3628800, res);
157 }
158 
159 
160 TEST(3) {
162  Isolate* isolate = CcTest::i_isolate();
163  HandleScope scope(isolate);
164 
165  typedef struct {
166  int i;
167  char c;
168  int16_t s;
169  } T;
170  T t;
171 
172  Assembler assm(isolate, NULL, 0);
173  Label L, C;
174 
175  __ mov(ip, Operand(sp));
176  __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
177  __ sub(fp, ip, Operand(4));
178  __ mov(r4, Operand(r0));
179  __ ldr(r0, MemOperand(r4, OFFSET_OF(T, i)));
180  __ mov(r2, Operand(r0, ASR, 1));
181  __ str(r2, MemOperand(r4, OFFSET_OF(T, i)));
182  __ ldrsb(r2, MemOperand(r4, OFFSET_OF(T, c)));
183  __ add(r0, r2, Operand(r0));
184  __ mov(r2, Operand(r2, LSL, 2));
185  __ strb(r2, MemOperand(r4, OFFSET_OF(T, c)));
186  __ ldrsh(r2, MemOperand(r4, OFFSET_OF(T, s)));
187  __ add(r0, r2, Operand(r0));
188  __ mov(r2, Operand(r2, ASR, 3));
189  __ strh(r2, MemOperand(r4, OFFSET_OF(T, s)));
190  __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
191 
192  CodeDesc desc;
193  assm.GetCode(&desc);
194  Object* code = isolate->heap()->CreateCode(
195  desc,
197  Handle<Code>())->ToObjectChecked();
198  CHECK(code->IsCode());
199 #ifdef DEBUG
200  Code::cast(code)->Print();
201 #endif
202  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
203  t.i = 100000;
204  t.c = 10;
205  t.s = 1000;
206  int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0));
207  ::printf("f() = %d\n", res);
208  CHECK_EQ(101010, res);
209  CHECK_EQ(100000/2, t.i);
210  CHECK_EQ(10*4, t.c);
211  CHECK_EQ(1000/8, t.s);
212 }
213 
214 
215 TEST(4) {
216  // Test the VFP floating point instructions.
218  Isolate* isolate = CcTest::i_isolate();
219  HandleScope scope(isolate);
220 
221  typedef struct {
222  double a;
223  double b;
224  double c;
225  double d;
226  double e;
227  double f;
228  double g;
229  double h;
230  int i;
231  double j;
232  double m;
233  double n;
234  float x;
235  float y;
236  } T;
237  T t;
238 
239  // Create a function that accepts &t, and loads, manipulates, and stores
240  // the doubles and floats.
241  Assembler assm(isolate, NULL, 0);
242  Label L, C;
243 
244 
246  CpuFeatureScope scope(&assm, VFP3);
247 
248  __ mov(ip, Operand(sp));
249  __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
250  __ sub(fp, ip, Operand(4));
251 
252  __ mov(r4, Operand(r0));
253  __ vldr(d6, r4, OFFSET_OF(T, a));
254  __ vldr(d7, r4, OFFSET_OF(T, b));
255  __ vadd(d5, d6, d7);
256  __ vstr(d5, r4, OFFSET_OF(T, c));
257 
258  __ vmla(d5, d6, d7);
259  __ vmls(d5, d5, d6);
260 
261  __ vmov(r2, r3, d5);
262  __ vmov(d4, r2, r3);
263  __ vstr(d4, r4, OFFSET_OF(T, b));
264 
265  // Load t.x and t.y, switch values, and store back to the struct.
266  __ vldr(s0, r4, OFFSET_OF(T, x));
267  __ vldr(s31, r4, OFFSET_OF(T, y));
268  __ vmov(s16, s0);
269  __ vmov(s0, s31);
270  __ vmov(s31, s16);
271  __ vstr(s0, r4, OFFSET_OF(T, x));
272  __ vstr(s31, r4, OFFSET_OF(T, y));
273 
274  // Move a literal into a register that can be encoded in the instruction.
275  __ vmov(d4, 1.0);
276  __ vstr(d4, r4, OFFSET_OF(T, e));
277 
278  // Move a literal into a register that requires 64 bits to encode.
279  // 0x3ff0000010000000 = 1.000000059604644775390625
280  __ vmov(d4, 1.000000059604644775390625);
281  __ vstr(d4, r4, OFFSET_OF(T, d));
282 
283  // Convert from floating point to integer.
284  __ vmov(d4, 2.0);
285  __ vcvt_s32_f64(s31, d4);
286  __ vstr(s31, r4, OFFSET_OF(T, i));
287 
288  // Convert from integer to floating point.
289  __ mov(lr, Operand(42));
290  __ vmov(s31, lr);
291  __ vcvt_f64_s32(d4, s31);
292  __ vstr(d4, r4, OFFSET_OF(T, f));
293 
294  // Convert from fixed point to floating point.
295  __ mov(lr, Operand(1234));
296  __ vmov(s8, lr);
297  __ vcvt_f64_s32(d4, 1);
298  __ vstr(d4, r4, OFFSET_OF(T, j));
299 
300  // Test vabs.
301  __ vldr(d1, r4, OFFSET_OF(T, g));
302  __ vabs(d0, d1);
303  __ vstr(d0, r4, OFFSET_OF(T, g));
304  __ vldr(d2, r4, OFFSET_OF(T, h));
305  __ vabs(d0, d2);
306  __ vstr(d0, r4, OFFSET_OF(T, h));
307 
308  // Test vneg.
309  __ vldr(d1, r4, OFFSET_OF(T, m));
310  __ vneg(d0, d1);
311  __ vstr(d0, r4, OFFSET_OF(T, m));
312  __ vldr(d1, r4, OFFSET_OF(T, n));
313  __ vneg(d0, d1);
314  __ vstr(d0, r4, OFFSET_OF(T, n));
315 
316  __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
317 
318  CodeDesc desc;
319  assm.GetCode(&desc);
320  Object* code = isolate->heap()->CreateCode(
321  desc,
323  Handle<Code>())->ToObjectChecked();
324  CHECK(code->IsCode());
325 #ifdef DEBUG
326  Code::cast(code)->Print();
327 #endif
328  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
329  t.a = 1.5;
330  t.b = 2.75;
331  t.c = 17.17;
332  t.d = 0.0;
333  t.e = 0.0;
334  t.f = 0.0;
335  t.g = -2718.2818;
336  t.h = 31415926.5;
337  t.i = 0;
338  t.j = 0;
339  t.m = -2718.2818;
340  t.n = 123.456;
341  t.x = 4.5;
342  t.y = 9.0;
343  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
344  USE(dummy);
345  CHECK_EQ(4.5, t.y);
346  CHECK_EQ(9.0, t.x);
347  CHECK_EQ(-123.456, t.n);
348  CHECK_EQ(2718.2818, t.m);
349  CHECK_EQ(2, t.i);
350  CHECK_EQ(2718.2818, t.g);
351  CHECK_EQ(31415926.5, t.h);
352  CHECK_EQ(617.0, t.j);
353  CHECK_EQ(42.0, t.f);
354  CHECK_EQ(1.0, t.e);
355  CHECK_EQ(1.000000059604644775390625, t.d);
356  CHECK_EQ(4.25, t.c);
357  CHECK_EQ(-4.1875, t.b);
358  CHECK_EQ(1.5, t.a);
359  }
360 }
361 
362 
363 TEST(5) {
364  // Test the ARMv7 bitfield instructions.
366  Isolate* isolate = CcTest::i_isolate();
367  HandleScope scope(isolate);
368 
369  Assembler assm(isolate, NULL, 0);
370 
372  CpuFeatureScope scope(&assm, ARMv7);
373  // On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
374  __ ubfx(r0, r0, 1, 12); // 0b00..010101010101 = 0x555
375  __ sbfx(r0, r0, 0, 5); // 0b11..111111110101 = -11
376  __ bfc(r0, 1, 3); // 0b11..111111110001 = -15
377  __ mov(r1, Operand(7));
378  __ bfi(r0, r1, 3, 3); // 0b11..111111111001 = -7
379  __ mov(pc, Operand(lr));
380 
381  CodeDesc desc;
382  assm.GetCode(&desc);
383  Object* code = isolate->heap()->CreateCode(
384  desc,
386  Handle<Code>())->ToObjectChecked();
387  CHECK(code->IsCode());
388 #ifdef DEBUG
389  Code::cast(code)->Print();
390 #endif
391  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
392  int res = reinterpret_cast<int>(
393  CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
394  ::printf("f() = %d\n", res);
395  CHECK_EQ(-7, res);
396  }
397 }
398 
399 
400 TEST(6) {
401  // Test saturating instructions.
403  Isolate* isolate = CcTest::i_isolate();
404  HandleScope scope(isolate);
405 
406  Assembler assm(isolate, NULL, 0);
407 
409  CpuFeatureScope scope(&assm, ARMv7);
410  __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
411  __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
412  __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
413  __ add(r0, r1, Operand(r2));
414  __ add(r0, r0, Operand(r3));
415  __ mov(pc, Operand(lr));
416 
417  CodeDesc desc;
418  assm.GetCode(&desc);
419  Object* code = isolate->heap()->CreateCode(
420  desc,
422  Handle<Code>())->ToObjectChecked();
423  CHECK(code->IsCode());
424 #ifdef DEBUG
425  Code::cast(code)->Print();
426 #endif
427  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
428  int res = reinterpret_cast<int>(
429  CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
430  ::printf("f() = %d\n", res);
431  CHECK_EQ(382, res);
432  }
433 }
434 
435 
436 enum VCVTTypes {
439 };
440 
441 static void TestRoundingMode(VCVTTypes types,
443  double value,
444  int expected,
445  bool expected_exception = false) {
446  Isolate* isolate = CcTest::i_isolate();
447  HandleScope scope(isolate);
448 
449  Assembler assm(isolate, NULL, 0);
450 
452  CpuFeatureScope scope(&assm, VFP3);
453 
454  Label wrong_exception;
455 
456  __ vmrs(r1);
457  // Set custom FPSCR.
459  __ orr(r2, r2, Operand(mode));
460  __ vmsr(r2);
461 
462  // Load value, convert, and move back result to r0 if everything went well.
463  __ vmov(d1, value);
464  switch (types) {
465  case s32_f64:
466  __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
467  break;
468 
469  case u32_f64:
470  __ vcvt_u32_f64(s0, d1, kFPSCRRounding);
471  break;
472 
473  default:
474  UNREACHABLE();
475  break;
476  }
477  // Check for vfp exceptions
478  __ vmrs(r2);
480  // Check that we behaved as expected.
481  __ b(&wrong_exception,
482  expected_exception ? eq : ne);
483  // There was no exception. Retrieve the result and return.
484  __ vmov(r0, s0);
485  __ mov(pc, Operand(lr));
486 
487  // The exception behaviour is not what we expected.
488  // Load a special value and return.
489  __ bind(&wrong_exception);
490  __ mov(r0, Operand(11223344));
491  __ mov(pc, Operand(lr));
492 
493  CodeDesc desc;
494  assm.GetCode(&desc);
495  Object* code = isolate->heap()->CreateCode(
496  desc,
498  Handle<Code>())->ToObjectChecked();
499  CHECK(code->IsCode());
500 #ifdef DEBUG
501  Code::cast(code)->Print();
502 #endif
503  F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
504  int res = reinterpret_cast<int>(
505  CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
506  ::printf("res = %d\n", res);
507  CHECK_EQ(expected, res);
508  }
509 }
510 
511 
512 TEST(7) {
514  // Test vfp rounding modes.
515 
516  // s32_f64 (double to integer).
517 
518  TestRoundingMode(s32_f64, RN, 0, 0);
519  TestRoundingMode(s32_f64, RN, 0.5, 0);
520  TestRoundingMode(s32_f64, RN, -0.5, 0);
521  TestRoundingMode(s32_f64, RN, 1.5, 2);
522  TestRoundingMode(s32_f64, RN, -1.5, -2);
523  TestRoundingMode(s32_f64, RN, 123.7, 124);
524  TestRoundingMode(s32_f64, RN, -123.7, -124);
525  TestRoundingMode(s32_f64, RN, 123456.2, 123456);
526  TestRoundingMode(s32_f64, RN, -123456.2, -123456);
527  TestRoundingMode(s32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
528  TestRoundingMode(s32_f64, RN, (kMaxInt + 0.49), kMaxInt);
529  TestRoundingMode(s32_f64, RN, (kMaxInt + 1.0), kMaxInt, true);
530  TestRoundingMode(s32_f64, RN, (kMaxInt + 0.5), kMaxInt, true);
531  TestRoundingMode(s32_f64, RN, static_cast<double>(kMinInt), kMinInt);
532  TestRoundingMode(s32_f64, RN, (kMinInt - 0.5), kMinInt);
533  TestRoundingMode(s32_f64, RN, (kMinInt - 1.0), kMinInt, true);
534  TestRoundingMode(s32_f64, RN, (kMinInt - 0.51), kMinInt, true);
535 
536  TestRoundingMode(s32_f64, RM, 0, 0);
537  TestRoundingMode(s32_f64, RM, 0.5, 0);
538  TestRoundingMode(s32_f64, RM, -0.5, -1);
539  TestRoundingMode(s32_f64, RM, 123.7, 123);
540  TestRoundingMode(s32_f64, RM, -123.7, -124);
541  TestRoundingMode(s32_f64, RM, 123456.2, 123456);
542  TestRoundingMode(s32_f64, RM, -123456.2, -123457);
543  TestRoundingMode(s32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
544  TestRoundingMode(s32_f64, RM, (kMaxInt + 0.5), kMaxInt);
545  TestRoundingMode(s32_f64, RM, (kMaxInt + 1.0), kMaxInt, true);
546  TestRoundingMode(s32_f64, RM, static_cast<double>(kMinInt), kMinInt);
547  TestRoundingMode(s32_f64, RM, (kMinInt - 0.5), kMinInt, true);
548  TestRoundingMode(s32_f64, RM, (kMinInt + 0.5), kMinInt);
549 
550  TestRoundingMode(s32_f64, RZ, 0, 0);
551  TestRoundingMode(s32_f64, RZ, 0.5, 0);
552  TestRoundingMode(s32_f64, RZ, -0.5, 0);
553  TestRoundingMode(s32_f64, RZ, 123.7, 123);
554  TestRoundingMode(s32_f64, RZ, -123.7, -123);
555  TestRoundingMode(s32_f64, RZ, 123456.2, 123456);
556  TestRoundingMode(s32_f64, RZ, -123456.2, -123456);
557  TestRoundingMode(s32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
558  TestRoundingMode(s32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
559  TestRoundingMode(s32_f64, RZ, (kMaxInt + 1.0), kMaxInt, true);
560  TestRoundingMode(s32_f64, RZ, static_cast<double>(kMinInt), kMinInt);
561  TestRoundingMode(s32_f64, RZ, (kMinInt - 0.5), kMinInt);
562  TestRoundingMode(s32_f64, RZ, (kMinInt - 1.0), kMinInt, true);
563 
564 
565  // u32_f64 (double to integer).
566 
567  // Negative values.
568  TestRoundingMode(u32_f64, RN, -0.5, 0);
569  TestRoundingMode(u32_f64, RN, -123456.7, 0, true);
570  TestRoundingMode(u32_f64, RN, static_cast<double>(kMinInt), 0, true);
571  TestRoundingMode(u32_f64, RN, kMinInt - 1.0, 0, true);
572 
573  TestRoundingMode(u32_f64, RM, -0.5, 0, true);
574  TestRoundingMode(u32_f64, RM, -123456.7, 0, true);
575  TestRoundingMode(u32_f64, RM, static_cast<double>(kMinInt), 0, true);
576  TestRoundingMode(u32_f64, RM, kMinInt - 1.0, 0, true);
577 
578  TestRoundingMode(u32_f64, RZ, -0.5, 0);
579  TestRoundingMode(u32_f64, RZ, -123456.7, 0, true);
580  TestRoundingMode(u32_f64, RZ, static_cast<double>(kMinInt), 0, true);
581  TestRoundingMode(u32_f64, RZ, kMinInt - 1.0, 0, true);
582 
583  // Positive values.
584  // kMaxInt is the maximum *signed* integer: 0x7fffffff.
585  static const uint32_t kMaxUInt = 0xffffffffu;
586  TestRoundingMode(u32_f64, RZ, 0, 0);
587  TestRoundingMode(u32_f64, RZ, 0.5, 0);
588  TestRoundingMode(u32_f64, RZ, 123.7, 123);
589  TestRoundingMode(u32_f64, RZ, 123456.2, 123456);
590  TestRoundingMode(u32_f64, RZ, static_cast<double>(kMaxInt), kMaxInt);
591  TestRoundingMode(u32_f64, RZ, (kMaxInt + 0.5), kMaxInt);
592  TestRoundingMode(u32_f64, RZ, (kMaxInt + 1.0),
593  static_cast<uint32_t>(kMaxInt) + 1);
594  TestRoundingMode(u32_f64, RZ, (kMaxUInt + 0.5), kMaxUInt);
595  TestRoundingMode(u32_f64, RZ, (kMaxUInt + 1.0), kMaxUInt, true);
596 
597  TestRoundingMode(u32_f64, RM, 0, 0);
598  TestRoundingMode(u32_f64, RM, 0.5, 0);
599  TestRoundingMode(u32_f64, RM, 123.7, 123);
600  TestRoundingMode(u32_f64, RM, 123456.2, 123456);
601  TestRoundingMode(u32_f64, RM, static_cast<double>(kMaxInt), kMaxInt);
602  TestRoundingMode(u32_f64, RM, (kMaxInt + 0.5), kMaxInt);
603  TestRoundingMode(u32_f64, RM, (kMaxInt + 1.0),
604  static_cast<uint32_t>(kMaxInt) + 1);
605  TestRoundingMode(u32_f64, RM, (kMaxUInt + 0.5), kMaxUInt);
606  TestRoundingMode(u32_f64, RM, (kMaxUInt + 1.0), kMaxUInt, true);
607 
608  TestRoundingMode(u32_f64, RN, 0, 0);
609  TestRoundingMode(u32_f64, RN, 0.5, 0);
610  TestRoundingMode(u32_f64, RN, 1.5, 2);
611  TestRoundingMode(u32_f64, RN, 123.7, 124);
612  TestRoundingMode(u32_f64, RN, 123456.2, 123456);
613  TestRoundingMode(u32_f64, RN, static_cast<double>(kMaxInt), kMaxInt);
614  TestRoundingMode(u32_f64, RN, (kMaxInt + 0.49), kMaxInt);
615  TestRoundingMode(u32_f64, RN, (kMaxInt + 0.5),
616  static_cast<uint32_t>(kMaxInt) + 1);
617  TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.49), kMaxUInt);
618  TestRoundingMode(u32_f64, RN, (kMaxUInt + 0.5), kMaxUInt, true);
619  TestRoundingMode(u32_f64, RN, (kMaxUInt + 1.0), kMaxUInt, true);
620 }
621 
622 
623 TEST(8) {
624  // Test VFP multi load/store with ia_w.
626  Isolate* isolate = CcTest::i_isolate();
627  HandleScope scope(isolate);
628 
629  typedef struct {
630  double a;
631  double b;
632  double c;
633  double d;
634  double e;
635  double f;
636  double g;
637  double h;
638  } D;
639  D d;
640 
641  typedef struct {
642  float a;
643  float b;
644  float c;
645  float d;
646  float e;
647  float f;
648  float g;
649  float h;
650  } F;
651  F f;
652 
653  // Create a function that uses vldm/vstm to move some double and
654  // single precision values around in memory.
655  Assembler assm(isolate, NULL, 0);
656 
657  __ mov(ip, Operand(sp));
658  __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
659  __ sub(fp, ip, Operand(4));
660 
661  __ add(r4, r0, Operand(OFFSET_OF(D, a)));
662  __ vldm(ia_w, r4, d0, d3);
663  __ vldm(ia_w, r4, d4, d7);
664 
665  __ add(r4, r0, Operand(OFFSET_OF(D, a)));
666  __ vstm(ia_w, r4, d6, d7);
667  __ vstm(ia_w, r4, d0, d5);
668 
669  __ add(r4, r1, Operand(OFFSET_OF(F, a)));
670  __ vldm(ia_w, r4, s0, s3);
671  __ vldm(ia_w, r4, s4, s7);
672 
673  __ add(r4, r1, Operand(OFFSET_OF(F, a)));
674  __ vstm(ia_w, r4, s6, s7);
675  __ vstm(ia_w, r4, s0, s5);
676 
677  __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
678 
679  CodeDesc desc;
680  assm.GetCode(&desc);
681  Object* code = isolate->heap()->CreateCode(
682  desc,
684  Handle<Code>())->ToObjectChecked();
685  CHECK(code->IsCode());
686 #ifdef DEBUG
687  Code::cast(code)->Print();
688 #endif
689  F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
690  d.a = 1.1;
691  d.b = 2.2;
692  d.c = 3.3;
693  d.d = 4.4;
694  d.e = 5.5;
695  d.f = 6.6;
696  d.g = 7.7;
697  d.h = 8.8;
698 
699  f.a = 1.0;
700  f.b = 2.0;
701  f.c = 3.0;
702  f.d = 4.0;
703  f.e = 5.0;
704  f.f = 6.0;
705  f.g = 7.0;
706  f.h = 8.0;
707 
708  Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
709  USE(dummy);
710 
711  CHECK_EQ(7.7, d.a);
712  CHECK_EQ(8.8, d.b);
713  CHECK_EQ(1.1, d.c);
714  CHECK_EQ(2.2, d.d);
715  CHECK_EQ(3.3, d.e);
716  CHECK_EQ(4.4, d.f);
717  CHECK_EQ(5.5, d.g);
718  CHECK_EQ(6.6, d.h);
719 
720  CHECK_EQ(7.0, f.a);
721  CHECK_EQ(8.0, f.b);
722  CHECK_EQ(1.0, f.c);
723  CHECK_EQ(2.0, f.d);
724  CHECK_EQ(3.0, f.e);
725  CHECK_EQ(4.0, f.f);
726  CHECK_EQ(5.0, f.g);
727  CHECK_EQ(6.0, f.h);
728 }
729 
730 
731 TEST(9) {
732  // Test VFP multi load/store with ia.
734  Isolate* isolate = CcTest::i_isolate();
735  HandleScope scope(isolate);
736 
737  typedef struct {
738  double a;
739  double b;
740  double c;
741  double d;
742  double e;
743  double f;
744  double g;
745  double h;
746  } D;
747  D d;
748 
749  typedef struct {
750  float a;
751  float b;
752  float c;
753  float d;
754  float e;
755  float f;
756  float g;
757  float h;
758  } F;
759  F f;
760 
761  // Create a function that uses vldm/vstm to move some double and
762  // single precision values around in memory.
763  Assembler assm(isolate, NULL, 0);
764 
765  __ mov(ip, Operand(sp));
766  __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
767  __ sub(fp, ip, Operand(4));
768 
769  __ add(r4, r0, Operand(OFFSET_OF(D, a)));
770  __ vldm(ia, r4, d0, d3);
771  __ add(r4, r4, Operand(4 * 8));
772  __ vldm(ia, r4, d4, d7);
773 
774  __ add(r4, r0, Operand(OFFSET_OF(D, a)));
775  __ vstm(ia, r4, d6, d7);
776  __ add(r4, r4, Operand(2 * 8));
777  __ vstm(ia, r4, d0, d5);
778 
779  __ add(r4, r1, Operand(OFFSET_OF(F, a)));
780  __ vldm(ia, r4, s0, s3);
781  __ add(r4, r4, Operand(4 * 4));
782  __ vldm(ia, r4, s4, s7);
783 
784  __ add(r4, r1, Operand(OFFSET_OF(F, a)));
785  __ vstm(ia, r4, s6, s7);
786  __ add(r4, r4, Operand(2 * 4));
787  __ vstm(ia, r4, s0, s5);
788 
789  __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
790 
791  CodeDesc desc;
792  assm.GetCode(&desc);
793  Object* code = isolate->heap()->CreateCode(
794  desc,
796  Handle<Code>())->ToObjectChecked();
797  CHECK(code->IsCode());
798 #ifdef DEBUG
799  Code::cast(code)->Print();
800 #endif
801  F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
802  d.a = 1.1;
803  d.b = 2.2;
804  d.c = 3.3;
805  d.d = 4.4;
806  d.e = 5.5;
807  d.f = 6.6;
808  d.g = 7.7;
809  d.h = 8.8;
810 
811  f.a = 1.0;
812  f.b = 2.0;
813  f.c = 3.0;
814  f.d = 4.0;
815  f.e = 5.0;
816  f.f = 6.0;
817  f.g = 7.0;
818  f.h = 8.0;
819 
820  Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
821  USE(dummy);
822 
823  CHECK_EQ(7.7, d.a);
824  CHECK_EQ(8.8, d.b);
825  CHECK_EQ(1.1, d.c);
826  CHECK_EQ(2.2, d.d);
827  CHECK_EQ(3.3, d.e);
828  CHECK_EQ(4.4, d.f);
829  CHECK_EQ(5.5, d.g);
830  CHECK_EQ(6.6, d.h);
831 
832  CHECK_EQ(7.0, f.a);
833  CHECK_EQ(8.0, f.b);
834  CHECK_EQ(1.0, f.c);
835  CHECK_EQ(2.0, f.d);
836  CHECK_EQ(3.0, f.e);
837  CHECK_EQ(4.0, f.f);
838  CHECK_EQ(5.0, f.g);
839  CHECK_EQ(6.0, f.h);
840 }
841 
842 
843 TEST(10) {
844  // Test VFP multi load/store with db_w.
846  Isolate* isolate = CcTest::i_isolate();
847  HandleScope scope(isolate);
848 
849  typedef struct {
850  double a;
851  double b;
852  double c;
853  double d;
854  double e;
855  double f;
856  double g;
857  double h;
858  } D;
859  D d;
860 
861  typedef struct {
862  float a;
863  float b;
864  float c;
865  float d;
866  float e;
867  float f;
868  float g;
869  float h;
870  } F;
871  F f;
872 
873  // Create a function that uses vldm/vstm to move some double and
874  // single precision values around in memory.
875  Assembler assm(isolate, NULL, 0);
876 
877  __ mov(ip, Operand(sp));
878  __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
879  __ sub(fp, ip, Operand(4));
880 
881  __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
882  __ vldm(db_w, r4, d4, d7);
883  __ vldm(db_w, r4, d0, d3);
884 
885  __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
886  __ vstm(db_w, r4, d0, d5);
887  __ vstm(db_w, r4, d6, d7);
888 
889  __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
890  __ vldm(db_w, r4, s4, s7);
891  __ vldm(db_w, r4, s0, s3);
892 
893  __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
894  __ vstm(db_w, r4, s0, s5);
895  __ vstm(db_w, r4, s6, s7);
896 
897  __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
898 
899  CodeDesc desc;
900  assm.GetCode(&desc);
901  Object* code = isolate->heap()->CreateCode(
902  desc,
904  Handle<Code>())->ToObjectChecked();
905  CHECK(code->IsCode());
906 #ifdef DEBUG
907  Code::cast(code)->Print();
908 #endif
909  F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
910  d.a = 1.1;
911  d.b = 2.2;
912  d.c = 3.3;
913  d.d = 4.4;
914  d.e = 5.5;
915  d.f = 6.6;
916  d.g = 7.7;
917  d.h = 8.8;
918 
919  f.a = 1.0;
920  f.b = 2.0;
921  f.c = 3.0;
922  f.d = 4.0;
923  f.e = 5.0;
924  f.f = 6.0;
925  f.g = 7.0;
926  f.h = 8.0;
927 
928  Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
929  USE(dummy);
930 
931  CHECK_EQ(7.7, d.a);
932  CHECK_EQ(8.8, d.b);
933  CHECK_EQ(1.1, d.c);
934  CHECK_EQ(2.2, d.d);
935  CHECK_EQ(3.3, d.e);
936  CHECK_EQ(4.4, d.f);
937  CHECK_EQ(5.5, d.g);
938  CHECK_EQ(6.6, d.h);
939 
940  CHECK_EQ(7.0, f.a);
941  CHECK_EQ(8.0, f.b);
942  CHECK_EQ(1.0, f.c);
943  CHECK_EQ(2.0, f.d);
944  CHECK_EQ(3.0, f.e);
945  CHECK_EQ(4.0, f.f);
946  CHECK_EQ(5.0, f.g);
947  CHECK_EQ(6.0, f.h);
948 }
949 
950 
951 TEST(11) {
952  // Test instructions using the carry flag.
954  Isolate* isolate = CcTest::i_isolate();
955  HandleScope scope(isolate);
956 
957  typedef struct {
958  int32_t a;
959  int32_t b;
960  int32_t c;
961  int32_t d;
962  } I;
963  I i;
964 
965  i.a = 0xabcd0001;
966  i.b = 0xabcd0000;
967 
968  Assembler assm(isolate, NULL, 0);
969 
970  // Test HeapObject untagging.
971  __ ldr(r1, MemOperand(r0, OFFSET_OF(I, a)));
972  __ mov(r1, Operand(r1, ASR, 1), SetCC);
973  __ adc(r1, r1, Operand(r1), LeaveCC, cs);
974  __ str(r1, MemOperand(r0, OFFSET_OF(I, a)));
975 
976  __ ldr(r2, MemOperand(r0, OFFSET_OF(I, b)));
977  __ mov(r2, Operand(r2, ASR, 1), SetCC);
978  __ adc(r2, r2, Operand(r2), LeaveCC, cs);
979  __ str(r2, MemOperand(r0, OFFSET_OF(I, b)));
980 
981  // Test corner cases.
982  __ mov(r1, Operand(0xffffffff));
983  __ mov(r2, Operand::Zero());
984  __ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
985  __ adc(r3, r1, Operand(r2));
986  __ str(r3, MemOperand(r0, OFFSET_OF(I, c)));
987 
988  __ mov(r1, Operand(0xffffffff));
989  __ mov(r2, Operand::Zero());
990  __ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
991  __ adc(r3, r1, Operand(r2));
992  __ str(r3, MemOperand(r0, OFFSET_OF(I, d)));
993 
994  __ mov(pc, Operand(lr));
995 
996  CodeDesc desc;
997  assm.GetCode(&desc);
998  Object* code = isolate->heap()->CreateCode(
999  desc,
1001  Handle<Code>())->ToObjectChecked();
1002  CHECK(code->IsCode());
1003 #ifdef DEBUG
1004  Code::cast(code)->Print();
1005 #endif
1006  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1007  Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
1008  USE(dummy);
1009 
1010  CHECK_EQ(0xabcd0001, i.a);
1011  CHECK_EQ(static_cast<int32_t>(0xabcd0000) >> 1, i.b);
1012  CHECK_EQ(0x00000000, i.c);
1013  CHECK_EQ(0xffffffff, i.d);
1014 }
1015 
1016 
1017 TEST(12) {
1018  // Test chaining of label usages within instructions (issue 1644).
1020  Isolate* isolate = CcTest::i_isolate();
1021  HandleScope scope(isolate);
1022 
1023  Assembler assm(isolate, NULL, 0);
1024  Label target;
1025  __ b(eq, &target);
1026  __ b(ne, &target);
1027  __ bind(&target);
1028  __ nop();
1029 }
1030 
1031 
1032 TEST(13) {
1033  // Test VFP instructions using registers d16-d31.
1035  Isolate* isolate = CcTest::i_isolate();
1036  HandleScope scope(isolate);
1037 
1039  return;
1040  }
1041 
1042  typedef struct {
1043  double a;
1044  double b;
1045  double c;
1046  double x;
1047  double y;
1048  double z;
1049  double i;
1050  double j;
1051  double k;
1052  uint32_t low;
1053  uint32_t high;
1054  } T;
1055  T t;
1056 
1057  // Create a function that accepts &t, and loads, manipulates, and stores
1058  // the doubles and floats.
1059  Assembler assm(isolate, NULL, 0);
1060  Label L, C;
1061 
1062 
1064  CpuFeatureScope scope(&assm, VFP3);
1065 
1066  __ stm(db_w, sp, r4.bit() | lr.bit());
1067 
1068  // Load a, b, c into d16, d17, d18.
1069  __ mov(r4, Operand(r0));
1070  __ vldr(d16, r4, OFFSET_OF(T, a));
1071  __ vldr(d17, r4, OFFSET_OF(T, b));
1072  __ vldr(d18, r4, OFFSET_OF(T, c));
1073 
1074  __ vneg(d25, d16);
1075  __ vadd(d25, d25, d17);
1076  __ vsub(d25, d25, d18);
1077  __ vmul(d25, d25, d25);
1078  __ vdiv(d25, d25, d18);
1079 
1080  __ vmov(d16, d25);
1081  __ vsqrt(d17, d25);
1082  __ vneg(d17, d17);
1083  __ vabs(d17, d17);
1084  __ vmla(d18, d16, d17);
1085 
1086  // Store d16, d17, d18 into a, b, c.
1087  __ mov(r4, Operand(r0));
1088  __ vstr(d16, r4, OFFSET_OF(T, a));
1089  __ vstr(d17, r4, OFFSET_OF(T, b));
1090  __ vstr(d18, r4, OFFSET_OF(T, c));
1091 
1092  // Load x, y, z into d29-d31.
1093  __ add(r4, r0, Operand(OFFSET_OF(T, x)));
1094  __ vldm(ia_w, r4, d29, d31);
1095 
1096  // Swap d29 and d30 via r registers.
1097  __ vmov(r1, r2, d29);
1098  __ vmov(d29, d30);
1099  __ vmov(d30, r1, r2);
1100 
1101  // Convert to and from integer.
1102  __ vcvt_s32_f64(s1, d31);
1103  __ vcvt_f64_u32(d31, s1);
1104 
1105  // Store d29-d31 into x, y, z.
1106  __ add(r4, r0, Operand(OFFSET_OF(T, x)));
1107  __ vstm(ia_w, r4, d29, d31);
1108 
1109  // Move constants into d20, d21, d22 and store into i, j, k.
1110  __ vmov(d20, 14.7610017472335499);
1111  __ vmov(d21, 16.0);
1112  __ mov(r1, Operand(372106121));
1113  __ mov(r2, Operand(1079146608));
1114  __ vmov(d22, VmovIndexLo, r1);
1115  __ vmov(d22, VmovIndexHi, r2);
1116  __ add(r4, r0, Operand(OFFSET_OF(T, i)));
1117  __ vstm(ia_w, r4, d20, d22);
1118  // Move d22 into low and high.
1119  __ vmov(r4, VmovIndexLo, d22);
1120  __ str(r4, MemOperand(r0, OFFSET_OF(T, low)));
1121  __ vmov(r4, VmovIndexHi, d22);
1122  __ str(r4, MemOperand(r0, OFFSET_OF(T, high)));
1123 
1124  __ ldm(ia_w, sp, r4.bit() | pc.bit());
1125 
1126  CodeDesc desc;
1127  assm.GetCode(&desc);
1128  Object* code = isolate->heap()->CreateCode(
1129  desc,
1131  Handle<Code>())->ToObjectChecked();
1132  CHECK(code->IsCode());
1133 #ifdef DEBUG
1134  Code::cast(code)->Print();
1135 #endif
1136  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1137  t.a = 1.5;
1138  t.b = 2.75;
1139  t.c = 17.17;
1140  t.x = 1.5;
1141  t.y = 2.75;
1142  t.z = 17.17;
1143  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1144  USE(dummy);
1145  CHECK_EQ(14.7610017472335499, t.a);
1146  CHECK_EQ(3.84200491244266251, t.b);
1147  CHECK_EQ(73.8818412254460241, t.c);
1148  CHECK_EQ(2.75, t.x);
1149  CHECK_EQ(1.5, t.y);
1150  CHECK_EQ(17.0, t.z);
1151  CHECK_EQ(14.7610017472335499, t.i);
1152  CHECK_EQ(16.0, t.j);
1153  CHECK_EQ(73.8818412254460241, t.k);
1154  CHECK_EQ(372106121, t.low);
1155  CHECK_EQ(1079146608, t.high);
1156  }
1157 }
1158 
1159 
1160 TEST(14) {
1161  // Test the VFP Canonicalized Nan mode.
1163  Isolate* isolate = CcTest::i_isolate();
1164  HandleScope scope(isolate);
1165 
1166  typedef struct {
1167  double left;
1168  double right;
1169  double add_result;
1170  double sub_result;
1171  double mul_result;
1172  double div_result;
1173  } T;
1174  T t;
1175 
1176  // Create a function that makes the four basic operations.
1177  Assembler assm(isolate, NULL, 0);
1178 
1179  // Ensure FPSCR state (as JSEntryStub does).
1180  Label fpscr_done;
1181  __ vmrs(r1);
1183  __ b(ne, &fpscr_done);
1185  __ vmsr(r1);
1186  __ bind(&fpscr_done);
1187 
1188  __ vldr(d0, r0, OFFSET_OF(T, left));
1189  __ vldr(d1, r0, OFFSET_OF(T, right));
1190  __ vadd(d2, d0, d1);
1191  __ vstr(d2, r0, OFFSET_OF(T, add_result));
1192  __ vsub(d2, d0, d1);
1193  __ vstr(d2, r0, OFFSET_OF(T, sub_result));
1194  __ vmul(d2, d0, d1);
1195  __ vstr(d2, r0, OFFSET_OF(T, mul_result));
1196  __ vdiv(d2, d0, d1);
1197  __ vstr(d2, r0, OFFSET_OF(T, div_result));
1198 
1199  __ mov(pc, Operand(lr));
1200 
1201  CodeDesc desc;
1202  assm.GetCode(&desc);
1203  Object* code = isolate->heap()->CreateCode(
1204  desc,
1206  Handle<Code>())->ToObjectChecked();
1207  CHECK(code->IsCode());
1208 #ifdef DEBUG
1209  Code::cast(code)->Print();
1210 #endif
1211  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1212  t.left = BitCast<double>(kHoleNanInt64);
1213  t.right = 1;
1214  t.add_result = 0;
1215  t.sub_result = 0;
1216  t.mul_result = 0;
1217  t.div_result = 0;
1218  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1219  USE(dummy);
1220  const uint32_t kArmNanUpper32 = 0x7ff80000;
1221  const uint32_t kArmNanLower32 = 0x00000000;
1222 #ifdef DEBUG
1223  const uint64_t kArmNanInt64 =
1224  (static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
1225  ASSERT(kArmNanInt64 != kHoleNanInt64);
1226 #endif
1227  // With VFP2 the sign of the canonicalized Nan is undefined. So
1228  // we remove the sign bit for the upper tests.
1229  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
1230  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
1231  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
1232  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
1233  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
1234  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
1235  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
1236  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
1237 }
1238 
1239 
1240 TEST(15) {
1241  // Test the Neon instructions.
1243  Isolate* isolate = CcTest::i_isolate();
1244  HandleScope scope(isolate);
1245 
1246  typedef struct {
1247  uint32_t src0;
1248  uint32_t src1;
1249  uint32_t src2;
1250  uint32_t src3;
1251  uint32_t src4;
1252  uint32_t src5;
1253  uint32_t src6;
1254  uint32_t src7;
1255  uint32_t dst0;
1256  uint32_t dst1;
1257  uint32_t dst2;
1258  uint32_t dst3;
1259  uint32_t dst4;
1260  uint32_t dst5;
1261  uint32_t dst6;
1262  uint32_t dst7;
1263  uint32_t srcA0;
1264  uint32_t srcA1;
1265  uint32_t dstA0;
1266  uint32_t dstA1;
1267  uint32_t dstA2;
1268  uint32_t dstA3;
1269  uint32_t dstA4;
1270  uint32_t dstA5;
1271  uint32_t dstA6;
1272  uint32_t dstA7;
1273  } T;
1274  T t;
1275 
1276  // Create a function that accepts &t, and loads, manipulates, and stores
1277  // the doubles and floats.
1278  Assembler assm(isolate, NULL, 0);
1279 
1280 
1282  CpuFeatureScope scope(&assm, NEON);
1283 
1284  __ stm(db_w, sp, r4.bit() | lr.bit());
1285  // Move 32 bytes with neon.
1286  __ add(r4, r0, Operand(OFFSET_OF(T, src0)));
1287  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
1288  __ add(r4, r0, Operand(OFFSET_OF(T, dst0)));
1289  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(r4));
1290 
1291  // Expand 8 bytes into 8 words(16 bits).
1292  __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
1293  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(r4));
1294  __ vmovl(NeonU8, q0, d0);
1295  __ add(r4, r0, Operand(OFFSET_OF(T, dstA0)));
1296  __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
1297 
1298  // The same expansion, but with different source and destination registers.
1299  __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
1300  __ vld1(Neon8, NeonListOperand(d1), NeonMemOperand(r4));
1301  __ vmovl(NeonU8, q1, d1);
1302  __ add(r4, r0, Operand(OFFSET_OF(T, dstA4)));
1303  __ vst1(Neon8, NeonListOperand(d2, 2), NeonMemOperand(r4));
1304 
1305  __ ldm(ia_w, sp, r4.bit() | pc.bit());
1306 
1307  CodeDesc desc;
1308  assm.GetCode(&desc);
1309  Object* code = isolate->heap()->CreateCode(
1310  desc,
1312  Handle<Code>())->ToObjectChecked();
1313  CHECK(code->IsCode());
1314 #ifdef DEBUG
1315  Code::cast(code)->Print();
1316 #endif
1317  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1318  t.src0 = 0x01020304;
1319  t.src1 = 0x11121314;
1320  t.src2 = 0x21222324;
1321  t.src3 = 0x31323334;
1322  t.src4 = 0x41424344;
1323  t.src5 = 0x51525354;
1324  t.src6 = 0x61626364;
1325  t.src7 = 0x71727374;
1326  t.dst0 = 0;
1327  t.dst1 = 0;
1328  t.dst2 = 0;
1329  t.dst3 = 0;
1330  t.dst4 = 0;
1331  t.dst5 = 0;
1332  t.dst6 = 0;
1333  t.dst7 = 0;
1334  t.srcA0 = 0x41424344;
1335  t.srcA1 = 0x81828384;
1336  t.dstA0 = 0;
1337  t.dstA1 = 0;
1338  t.dstA2 = 0;
1339  t.dstA3 = 0;
1340  t.dstA4 = 0;
1341  t.dstA5 = 0;
1342  t.dstA6 = 0;
1343  t.dstA7 = 0;
1344  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1345  USE(dummy);
1346  CHECK_EQ(0x01020304, t.dst0);
1347  CHECK_EQ(0x11121314, t.dst1);
1348  CHECK_EQ(0x21222324, t.dst2);
1349  CHECK_EQ(0x31323334, t.dst3);
1350  CHECK_EQ(0x41424344, t.dst4);
1351  CHECK_EQ(0x51525354, t.dst5);
1352  CHECK_EQ(0x61626364, t.dst6);
1353  CHECK_EQ(0x71727374, t.dst7);
1354  CHECK_EQ(0x00430044, t.dstA0);
1355  CHECK_EQ(0x00410042, t.dstA1);
1356  CHECK_EQ(0x00830084, t.dstA2);
1357  CHECK_EQ(0x00810082, t.dstA3);
1358  CHECK_EQ(0x00430044, t.dstA4);
1359  CHECK_EQ(0x00410042, t.dstA5);
1360  CHECK_EQ(0x00830084, t.dstA6);
1361  CHECK_EQ(0x00810082, t.dstA7);
1362  }
1363 }
1364 
1365 
1366 TEST(16) {
1367  // Test the pkh, uxtb, uxtab and uxtb16 instructions.
1369  Isolate* isolate = CcTest::i_isolate();
1370  HandleScope scope(isolate);
1371 
1372  typedef struct {
1373  uint32_t src0;
1374  uint32_t src1;
1375  uint32_t src2;
1376  uint32_t dst0;
1377  uint32_t dst1;
1378  uint32_t dst2;
1379  uint32_t dst3;
1380  uint32_t dst4;
1381  } T;
1382  T t;
1383 
1384  // Create a function that accepts &t, and loads, manipulates, and stores
1385  // the doubles and floats.
1386  Assembler assm(isolate, NULL, 0);
1387 
1388  __ stm(db_w, sp, r4.bit() | lr.bit());
1389 
1390  __ mov(r4, Operand(r0));
1391  __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src0)));
1392  __ ldr(r1, MemOperand(r4, OFFSET_OF(T, src1)));
1393 
1394  __ pkhbt(r2, r0, Operand(r1, LSL, 8));
1395  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst0)));
1396 
1397  __ pkhtb(r2, r0, Operand(r1, ASR, 8));
1398  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst1)));
1399 
1400  __ uxtb16(r2, Operand(r0, ROR, 8));
1401  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst2)));
1402 
1403  __ uxtb(r2, Operand(r0, ROR, 8));
1404  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst3)));
1405 
1406  __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src2)));
1407  __ uxtab(r2, r0, Operand(r1, ROR, 8));
1408  __ str(r2, MemOperand(r4, OFFSET_OF(T, dst4)));
1409 
1410  __ ldm(ia_w, sp, r4.bit() | pc.bit());
1411 
1412  CodeDesc desc;
1413  assm.GetCode(&desc);
1414  Object* code = isolate->heap()->CreateCode(
1415  desc,
1417  Handle<Code>())->ToObjectChecked();
1418  CHECK(code->IsCode());
1419 #ifdef DEBUG
1420  Code::cast(code)->Print();
1421 #endif
1422  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1423  t.src0 = 0x01020304;
1424  t.src1 = 0x11121314;
1425  t.src2 = 0x11121300;
1426  t.dst0 = 0;
1427  t.dst1 = 0;
1428  t.dst2 = 0;
1429  t.dst3 = 0;
1430  t.dst4 = 0;
1431  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1432  USE(dummy);
1433  CHECK_EQ(0x12130304, t.dst0);
1434  CHECK_EQ(0x01021213, t.dst1);
1435  CHECK_EQ(0x00010003, t.dst2);
1436  CHECK_EQ(0x00000003, t.dst3);
1437  CHECK_EQ(0x11121313, t.dst4);
1438 }
1439 
1440 
1441 TEST(17) {
1442  // Test generating labels at high addresses.
1443  // Should not assert.
1445  Isolate* isolate = CcTest::i_isolate();
1446  HandleScope scope(isolate);
1447 
1448  // Generate a code segment that will be longer than 2^24 bytes.
1449  Assembler assm(isolate, NULL, 0);
1450  for (size_t i = 0; i < 1 << 23 ; ++i) { // 2^23
1451  __ nop();
1452  }
1453 
1454  Label target;
1455  __ b(eq, &target);
1456  __ bind(&target);
1457  __ nop();
1458 }
1459 
1460 
1461 #define TEST_SDIV(expected_, dividend_, divisor_) \
1462  t.dividend = dividend_; \
1463  t.divisor = divisor_; \
1464  t.result = 0; \
1465  dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
1466  CHECK_EQ(expected_, t.result);
1467 
1468 
1469 TEST(18) {
1470  // Test the sdiv.
1472  Isolate* isolate = CcTest::i_isolate();
1473  HandleScope scope(isolate);
1474 
1475  typedef struct {
1476  uint32_t dividend;
1477  uint32_t divisor;
1478  uint32_t result;
1479  } T;
1480  T t;
1481 
1482  Assembler assm(isolate, NULL, 0);
1483 
1485  CpuFeatureScope scope(&assm, SUDIV);
1486 
1487  __ mov(r3, Operand(r0));
1488 
1489  __ ldr(r0, MemOperand(r3, OFFSET_OF(T, dividend)));
1490  __ ldr(r1, MemOperand(r3, OFFSET_OF(T, divisor)));
1491 
1492  __ sdiv(r2, r0, r1);
1493  __ str(r2, MemOperand(r3, OFFSET_OF(T, result)));
1494 
1495  __ bx(lr);
1496 
1497  CodeDesc desc;
1498  assm.GetCode(&desc);
1499  Object* code = isolate->heap()->CreateCode(
1500  desc,
1502  Handle<Code>())->ToObjectChecked();
1503  CHECK(code->IsCode());
1504 #ifdef DEBUG
1505  Code::cast(code)->Print();
1506 #endif
1507  F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
1508  Object* dummy;
1509  TEST_SDIV(1073741824, kMinInt, -2);
1510  TEST_SDIV(kMinInt, kMinInt, -1);
1511  TEST_SDIV(5, 10, 2);
1512  TEST_SDIV(3, 10, 3);
1513  TEST_SDIV(-5, 10, -2);
1514  TEST_SDIV(-3, 10, -3);
1515  TEST_SDIV(-5, -10, 2);
1516  TEST_SDIV(-3, -10, 3);
1517  TEST_SDIV(5, -10, -2);
1518  TEST_SDIV(3, -10, -3);
1519  USE(dummy);
1520  }
1521 }
1522 
1523 
1524 #undef TEST_SDIV
1525 
1526 
1527 TEST(code_relative_offset) {
1528  // Test extracting the offset of a label from the beginning of the code
1529  // in a register.
1531  Isolate* isolate = CcTest::i_isolate();
1532  HandleScope scope(isolate);
1533  // Initialize a code object that will contain the code.
1534  Handle<Object> code_object(isolate->heap()->undefined_value(), isolate);
1535 
1536  Assembler assm(isolate, NULL, 0);
1537 
1538  Label start, target_away, target_faraway;
1539 
1540  __ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
1541 
1542  // r3 is used as the address zero, the test will crash when we load it.
1543  __ mov(r3, Operand::Zero());
1544 
1545  // r5 will be a pointer to the start of the code.
1546  __ mov(r5, Operand(code_object));
1547  __ mov_label_offset(r4, &start);
1548 
1549  __ mov_label_offset(r1, &target_faraway);
1551 
1552  __ mov_label_offset(r1, &target_away);
1553 
1554  // Jump straight to 'target_away' the first time and use the relative
1555  // position the second time. This covers the case when extracting the
1556  // position of a label which is linked.
1557  __ mov(r2, Operand::Zero());
1558  __ bind(&start);
1559  __ cmp(r2, Operand::Zero());
1560  __ b(eq, &target_away);
1561  __ add(pc, r5, r1);
1562  // Emit invalid instructions to push the label between 2^8 and 2^16
1563  // instructions away. The test will crash if they are reached.
1564  for (int i = 0; i < (1 << 10); i++) {
1565  __ ldr(r3, MemOperand(r3));
1566  }
1567  __ bind(&target_away);
1568  // This will be hit twice: r0 = r0 + 5 + 5.
1569  __ add(r0, r0, Operand(5));
1570 
1572  __ add(pc, r5, r4, LeaveCC, ne);
1573 
1574  __ mov(r2, Operand(1));
1575  __ b(&start);
1576  // Emit invalid instructions to push the label between 2^16 and 2^24
1577  // instructions away. The test will crash if they are reached.
1578  for (int i = 0; i < (1 << 21); i++) {
1579  __ ldr(r3, MemOperand(r3));
1580  }
1581  __ bind(&target_faraway);
1582  // r0 = r0 + 5 + 5 + 11
1583  __ add(r0, r0, Operand(11));
1584 
1585  __ ldm(ia_w, sp, r4.bit() | r5.bit() | pc.bit());
1586 
1587  CodeDesc desc;
1588  assm.GetCode(&desc);
1589  Handle<Code> code = isolate->factory()->NewCode(desc,
1590  Code::ComputeFlags(Code::STUB), code_object);
1591  CHECK(code->IsCode());
1592  F1 f = FUNCTION_CAST<F1>(code->entry());
1593  int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 21, 0, 0, 0, 0));
1594  ::printf("f() = %d\n", res);
1595  CHECK_EQ(42, res);
1596 }
1597 
1598 #undef __
const QwNeonRegister q0
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
Object *(* F3)(void *p0, int p1, int p2, int p3, int p4)
const int kMinInt
Definition: globals.h:249
#define CHECK_EQ(expected, value)
Definition: checks.h:252
const Register r3
const SwVfpRegister s7
const LowDwVfpRegister d0
const SwVfpRegister s8
const DwVfpRegister d31
const SwVfpRegister s16
#define __
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
const int kMaxInt
Definition: globals.h:248
Object *(* F2)(int x, int y, int p2, int p3, int p4)
#define ASSERT(condition)
Definition: checks.h:329
const DwVfpRegister d22
const LowDwVfpRegister d3
#define CHECK(condition)
Definition: checks.h:75
const Register r2
Factory * factory()
Definition: isolate.h:995
const DwVfpRegister d25
static Code * cast(Object *obj)
const VmovIndex VmovIndexHi
const SwVfpRegister s6
const SwVfpRegister s31
const DwVfpRegister d16
const Register sp
const SwVfpRegister s3
const uint32_t kVFPDefaultNaNModeControlBit
const uint64_t kHoleNanInt64
Definition: v8globals.h:458
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const DwVfpRegister d18
const DwVfpRegister d17
const LowDwVfpRegister d7
const LowDwVfpRegister d4
#define OFFSET_OF(type, field)
Definition: globals.h:325
const Register ip
void GetCode(CodeDesc *desc)
const LowDwVfpRegister d6
const int kPointerSize
Definition: globals.h:268
const LowDwVfpRegister d5
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Object *(* F1)(int x, int p1, int p2, int p3, int p4)
const Register pc
static i::Isolate * i_isolate()
Definition: cctest.h:102
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4)
Definition: simulator-arm.h:48
const uint32_t kVFPExceptionMask
const SwVfpRegister s0
const Register r0
const SwVfpRegister s5
const QwNeonRegister q1
#define T(name, string, precedence)
Definition: token.cc:48
const SwVfpRegister s1
const Register lr
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Definition: heap.cc:4119
const LowDwVfpRegister d2
const Register r1
static void InitializeVM()
Definition: cctest.h:116
const DwVfpRegister d29
const VmovIndex VmovIndexLo
static Flags ComputeFlags(Kind kind, InlineCacheState ic_state=UNINITIALIZED, ExtraICState extra_ic_state=kNoExtraICState, StubType type=NORMAL, InlineCacheHolderFlag holder=OWN_MAP)
Definition: objects-inl.h:4601
Handle< Code > NewCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Definition: factory.cc:1291
void USE(T)
Definition: globals.h:341
const SwVfpRegister s4
const DwVfpRegister d30
Object *(* F4)(void *p0, void *p1, int p2, int p3, int p4)
const DwVfpRegister d20
#define TEST_SDIV(expected_, dividend_, divisor_)
const uint32_t kVFPRoundingModeMask
const LowDwVfpRegister d1
const Register fp
signed short int16_t
Definition: unicode.cc:45
const DwVfpRegister d21
const Register r5
const Register r4