v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
test-assembler-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <cmath>
32 #include <limits>
33 
34 #include "v8.h"
35 
36 #include "macro-assembler.h"
37 #include "arm64/simulator-arm64.h"
39 #include "arm64/disasm-arm64.h"
40 #include "arm64/utils-arm64.h"
41 #include "cctest.h"
42 #include "test-utils-arm64.h"
43 
44 using namespace v8::internal;
45 
46 // Test infrastructure.
47 //
48 // Tests are functions which accept no parameters and have no return values.
49 // The testing code should not perform an explicit return once completed. For
50 // example to test the mov immediate instruction a very simple test would be:
51 //
52 // TEST(mov_x0_one) {
53 // SETUP();
54 //
55 // START();
56 // __ mov(x0, Operand(1));
57 // END();
58 //
59 // RUN();
60 //
61 // ASSERT_EQUAL_64(1, x0);
62 //
63 // TEARDOWN();
64 // }
65 //
66 // Within a START ... END block all registers but sp can be modified. sp has to
67 // be explicitly saved/restored. The END() macro replaces the function return
68 // so it may appear multiple times in a test if the test has multiple exit
69 // points.
70 //
71 // Once the test has been run all integer and floating point registers as well
72 // as flags are accessible through a RegisterDump instance, see
73 // utils-arm64.cc for more info on RegisterDump.
74 //
75 // We provide some helper assert to handle common cases:
76 //
77 // ASSERT_EQUAL_32(int32_t, int_32t)
78 // ASSERT_EQUAL_FP32(float, float)
79 // ASSERT_EQUAL_32(int32_t, W register)
80 // ASSERT_EQUAL_FP32(float, S register)
81 // ASSERT_EQUAL_64(int64_t, int_64t)
82 // ASSERT_EQUAL_FP64(double, double)
83 // ASSERT_EQUAL_64(int64_t, X register)
84 // ASSERT_EQUAL_64(X register, X register)
85 // ASSERT_EQUAL_FP64(double, D register)
86 //
87 // e.g. ASSERT_EQUAL_64(0.5, d30);
88 //
89 // If more advance computation is required before the assert then access the
90 // RegisterDump named core directly:
91 //
92 // ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
93 
94 
95 #if 0 // TODO(all): enable.
97 
98 static void InitializeVM() {
99  if (env.IsEmpty()) {
100  env = v8::Context::New();
101  }
102 }
103 #endif
104 
105 #define __ masm.
106 
107 #define BUF_SIZE 8192
108 #define SETUP() SETUP_SIZE(BUF_SIZE)
109 
110 #define INIT_V8() \
111  CcTest::InitializeVM(); \
112 
113 #ifdef USE_SIMULATOR
114 
115 // Run tests with the simulator.
116 #define SETUP_SIZE(buf_size) \
117  Isolate* isolate = Isolate::Current(); \
118  HandleScope scope(isolate); \
119  ASSERT(isolate != NULL); \
120  byte* buf = new byte[buf_size]; \
121  MacroAssembler masm(isolate, buf, buf_size); \
122  Decoder<DispatchingDecoderVisitor>* decoder = \
123  new Decoder<DispatchingDecoderVisitor>(); \
124  Simulator simulator(decoder); \
125  PrintDisassembler* pdis = NULL; \
126  RegisterDump core;
127 
128 /* if (Cctest::trace_sim()) { \
129  pdis = new PrintDisassembler(stdout); \
130  decoder.PrependVisitor(pdis); \
131  } \
132  */
133 
134 // Reset the assembler and simulator, so that instructions can be generated,
135 // but don't actually emit any code. This can be used by tests that need to
136 // emit instructions at the start of the buffer. Note that START_AFTER_RESET
137 // must be called before any callee-saved register is modified, and before an
138 // END is encountered.
139 //
140 // Most tests should call START, rather than call RESET directly.
141 #define RESET() \
142  __ Reset(); \
143  simulator.ResetState();
144 
145 #define START_AFTER_RESET() \
146  __ SetStackPointer(csp); \
147  __ PushCalleeSavedRegisters(); \
148  __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
149 
150 #define START() \
151  RESET(); \
152  START_AFTER_RESET();
153 
154 #define RUN() \
155  simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
156 
157 #define END() \
158  __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
159  core.Dump(&masm); \
160  __ PopCalleeSavedRegisters(); \
161  __ Ret(); \
162  __ GetCode(NULL);
163 
164 #define TEARDOWN() \
165  delete pdis; \
166  delete[] buf;
167 
168 #else // ifdef USE_SIMULATOR.
169 // Run the test on real hardware or models.
170 #define SETUP_SIZE(buf_size) \
171  Isolate* isolate = Isolate::Current(); \
172  HandleScope scope(isolate); \
173  ASSERT(isolate != NULL); \
174  byte* buf = new byte[buf_size]; \
175  MacroAssembler masm(isolate, buf, buf_size); \
176  RegisterDump core; \
177  CPU::SetUp();
178 
179 #define RESET() \
180  __ Reset();
181 
182 #define START_AFTER_RESET() \
183  __ SetStackPointer(csp); \
184  __ PushCalleeSavedRegisters();
185 
186 #define START() \
187  RESET(); \
188  START_AFTER_RESET();
189 
190 #define RUN() \
191  CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
192  { \
193  void (*test_function)(void); \
194  memcpy(&test_function, &buf, sizeof(buf)); \
195  test_function(); \
196  }
197 
198 #define END() \
199  core.Dump(&masm); \
200  __ PopCalleeSavedRegisters(); \
201  __ Ret(); \
202  __ GetCode(NULL);
203 
204 #define TEARDOWN() \
205  delete[] buf;
206 
207 #endif // ifdef USE_SIMULATOR.
208 
209 #define ASSERT_EQUAL_NZCV(expected) \
210  CHECK(EqualNzcv(expected, core.flags_nzcv()))
211 
212 #define ASSERT_EQUAL_REGISTERS(expected) \
213  CHECK(EqualRegisters(&expected, &core))
214 
215 #define ASSERT_EQUAL_32(expected, result) \
216  CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
217 
218 #define ASSERT_EQUAL_FP32(expected, result) \
219  CHECK(EqualFP32(expected, &core, result))
220 
221 #define ASSERT_EQUAL_64(expected, result) \
222  CHECK(Equal64(expected, &core, result))
223 
224 #define ASSERT_EQUAL_FP64(expected, result) \
225  CHECK(EqualFP64(expected, &core, result))
226 
227 #ifdef DEBUG
228 #define ASSERT_LITERAL_POOL_SIZE(expected) \
229  CHECK((expected) == (__ LiteralPoolSize()))
230 #else
231 #define ASSERT_LITERAL_POOL_SIZE(expected) \
232  ((void) 0)
233 #endif
234 
235 
236 TEST(stack_ops) {
237  INIT_V8();
238  SETUP();
239 
240  START();
241  // save csp.
242  __ Mov(x29, csp);
243 
244  // Set the csp to a known value.
245  __ Mov(x16, 0x1000);
246  __ Mov(csp, x16);
247  __ Mov(x0, csp);
248 
249  // Add immediate to the csp, and move the result to a normal register.
250  __ Add(csp, csp, Operand(0x50));
251  __ Mov(x1, csp);
252 
253  // Add extended to the csp, and move the result to a normal register.
254  __ Mov(x17, 0xfff);
255  __ Add(csp, csp, Operand(x17, SXTB));
256  __ Mov(x2, csp);
257 
258  // Create an csp using a logical instruction, and move to normal register.
259  __ Orr(csp, xzr, Operand(0x1fff));
260  __ Mov(x3, csp);
261 
262  // Write wcsp using a logical instruction.
263  __ Orr(wcsp, wzr, Operand(0xfffffff8L));
264  __ Mov(x4, csp);
265 
266  // Write csp, and read back wcsp.
267  __ Orr(csp, xzr, Operand(0xfffffff8L));
268  __ Mov(w5, wcsp);
269 
270  // restore csp.
271  __ Mov(csp, x29);
272  END();
273 
274  RUN();
275 
276  ASSERT_EQUAL_64(0x1000, x0);
277  ASSERT_EQUAL_64(0x1050, x1);
278  ASSERT_EQUAL_64(0x104f, x2);
279  ASSERT_EQUAL_64(0x1fff, x3);
280  ASSERT_EQUAL_64(0xfffffff8, x4);
281  ASSERT_EQUAL_64(0xfffffff8, x5);
282 
283  TEARDOWN();
284 }
285 
286 
287 TEST(mvn) {
288  INIT_V8();
289  SETUP();
290 
291  START();
292  __ Mvn(w0, 0xfff);
293  __ Mvn(x1, 0xfff);
294  __ Mvn(w2, Operand(w0, LSL, 1));
295  __ Mvn(x3, Operand(x1, LSL, 2));
296  __ Mvn(w4, Operand(w0, LSR, 3));
297  __ Mvn(x5, Operand(x1, LSR, 4));
298  __ Mvn(w6, Operand(w0, ASR, 11));
299  __ Mvn(x7, Operand(x1, ASR, 12));
300  __ Mvn(w8, Operand(w0, ROR, 13));
301  __ Mvn(x9, Operand(x1, ROR, 14));
302  __ Mvn(w10, Operand(w2, UXTB));
303  __ Mvn(x11, Operand(x2, SXTB, 1));
304  __ Mvn(w12, Operand(w2, UXTH, 2));
305  __ Mvn(x13, Operand(x2, SXTH, 3));
306  __ Mvn(x14, Operand(w2, UXTW, 4));
307  __ Mvn(x15, Operand(w2, SXTW, 4));
308  END();
309 
310  RUN();
311 
312  ASSERT_EQUAL_64(0xfffff000, x0);
313  ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
314  ASSERT_EQUAL_64(0x00001fff, x2);
315  ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
316  ASSERT_EQUAL_64(0xe00001ff, x4);
317  ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
318  ASSERT_EQUAL_64(0x00000001, x6);
319  ASSERT_EQUAL_64(0x0, x7);
320  ASSERT_EQUAL_64(0x7ff80000, x8);
321  ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
322  ASSERT_EQUAL_64(0xffffff00, x10);
323  ASSERT_EQUAL_64(0x0000000000000001UL, x11);
324  ASSERT_EQUAL_64(0xffff8003, x12);
325  ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
326  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
327  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
328 
329  TEARDOWN();
330 }
331 
332 
333 TEST(mov) {
334  INIT_V8();
335  SETUP();
336 
337  START();
338  __ Mov(x0, 0xffffffffffffffffL);
339  __ Mov(x1, 0xffffffffffffffffL);
340  __ Mov(x2, 0xffffffffffffffffL);
341  __ Mov(x3, 0xffffffffffffffffL);
342 
343  __ Mov(x0, 0x0123456789abcdefL);
344 
345  __ movz(x1, 0xabcdL << 16);
346  __ movk(x2, 0xabcdL << 32);
347  __ movn(x3, 0xabcdL << 48);
348 
349  __ Mov(x4, 0x0123456789abcdefL);
350  __ Mov(x5, x4);
351 
352  __ Mov(w6, -1);
353 
354  // Test that moves back to the same register have the desired effect. This
355  // is a no-op for X registers, and a truncation for W registers.
356  __ Mov(x7, 0x0123456789abcdefL);
357  __ Mov(x7, x7);
358  __ Mov(x8, 0x0123456789abcdefL);
359  __ Mov(w8, w8);
360  __ Mov(x9, 0x0123456789abcdefL);
361  __ Mov(x9, Operand(x9));
362  __ Mov(x10, 0x0123456789abcdefL);
363  __ Mov(w10, Operand(w10));
364 
365  __ Mov(w11, 0xfff);
366  __ Mov(x12, 0xfff);
367  __ Mov(w13, Operand(w11, LSL, 1));
368  __ Mov(x14, Operand(x12, LSL, 2));
369  __ Mov(w15, Operand(w11, LSR, 3));
370  __ Mov(x18, Operand(x12, LSR, 4));
371  __ Mov(w19, Operand(w11, ASR, 11));
372  __ Mov(x20, Operand(x12, ASR, 12));
373  __ Mov(w21, Operand(w11, ROR, 13));
374  __ Mov(x22, Operand(x12, ROR, 14));
375  __ Mov(w23, Operand(w13, UXTB));
376  __ Mov(x24, Operand(x13, SXTB, 1));
377  __ Mov(w25, Operand(w13, UXTH, 2));
378  __ Mov(x26, Operand(x13, SXTH, 3));
379  __ Mov(x27, Operand(w13, UXTW, 4));
380  END();
381 
382  RUN();
383 
384  ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
385  ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
386  ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
387  ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
388  ASSERT_EQUAL_64(x4, x5);
389  ASSERT_EQUAL_32(-1, w6);
390  ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
391  ASSERT_EQUAL_32(0x89abcdefL, w8);
392  ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
393  ASSERT_EQUAL_32(0x89abcdefL, w10);
394  ASSERT_EQUAL_64(0x00000fff, x11);
395  ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
396  ASSERT_EQUAL_64(0x00001ffe, x13);
397  ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
398  ASSERT_EQUAL_64(0x000001ff, x15);
399  ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
400  ASSERT_EQUAL_64(0x00000001, x19);
401  ASSERT_EQUAL_64(0x0, x20);
402  ASSERT_EQUAL_64(0x7ff80000, x21);
403  ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
404  ASSERT_EQUAL_64(0x000000fe, x23);
405  ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
406  ASSERT_EQUAL_64(0x00007ff8, x25);
407  ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
408  ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
409 
410  TEARDOWN();
411 }
412 
413 
414 TEST(mov_imm_w) {
415  INIT_V8();
416  SETUP();
417 
418  START();
419  __ Mov(w0, 0xffffffffL);
420  __ Mov(w1, 0xffff1234L);
421  __ Mov(w2, 0x1234ffffL);
422  __ Mov(w3, 0x00000000L);
423  __ Mov(w4, 0x00001234L);
424  __ Mov(w5, 0x12340000L);
425  __ Mov(w6, 0x12345678L);
426  END();
427 
428  RUN();
429 
430  ASSERT_EQUAL_64(0xffffffffL, x0);
431  ASSERT_EQUAL_64(0xffff1234L, x1);
432  ASSERT_EQUAL_64(0x1234ffffL, x2);
433  ASSERT_EQUAL_64(0x00000000L, x3);
434  ASSERT_EQUAL_64(0x00001234L, x4);
435  ASSERT_EQUAL_64(0x12340000L, x5);
436  ASSERT_EQUAL_64(0x12345678L, x6);
437 
438  TEARDOWN();
439 }
440 
441 
442 TEST(mov_imm_x) {
443  INIT_V8();
444  SETUP();
445 
446  START();
447  __ Mov(x0, 0xffffffffffffffffL);
448  __ Mov(x1, 0xffffffffffff1234L);
449  __ Mov(x2, 0xffffffff12345678L);
450  __ Mov(x3, 0xffff1234ffff5678L);
451  __ Mov(x4, 0x1234ffffffff5678L);
452  __ Mov(x5, 0x1234ffff5678ffffL);
453  __ Mov(x6, 0x12345678ffffffffL);
454  __ Mov(x7, 0x1234ffffffffffffL);
455  __ Mov(x8, 0x123456789abcffffL);
456  __ Mov(x9, 0x12345678ffff9abcL);
457  __ Mov(x10, 0x1234ffff56789abcL);
458  __ Mov(x11, 0xffff123456789abcL);
459  __ Mov(x12, 0x0000000000000000L);
460  __ Mov(x13, 0x0000000000001234L);
461  __ Mov(x14, 0x0000000012345678L);
462  __ Mov(x15, 0x0000123400005678L);
463  __ Mov(x18, 0x1234000000005678L);
464  __ Mov(x19, 0x1234000056780000L);
465  __ Mov(x20, 0x1234567800000000L);
466  __ Mov(x21, 0x1234000000000000L);
467  __ Mov(x22, 0x123456789abc0000L);
468  __ Mov(x23, 0x1234567800009abcL);
469  __ Mov(x24, 0x1234000056789abcL);
470  __ Mov(x25, 0x0000123456789abcL);
471  __ Mov(x26, 0x123456789abcdef0L);
472  __ Mov(x27, 0xffff000000000001L);
473  __ Mov(x28, 0x8000ffff00000000L);
474  END();
475 
476  RUN();
477 
478  ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
479  ASSERT_EQUAL_64(0xffffffff12345678L, x2);
480  ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
481  ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
482  ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
483  ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
484  ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
485  ASSERT_EQUAL_64(0x123456789abcffffL, x8);
486  ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
487  ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
488  ASSERT_EQUAL_64(0xffff123456789abcL, x11);
489  ASSERT_EQUAL_64(0x0000000000000000L, x12);
490  ASSERT_EQUAL_64(0x0000000000001234L, x13);
491  ASSERT_EQUAL_64(0x0000000012345678L, x14);
492  ASSERT_EQUAL_64(0x0000123400005678L, x15);
493  ASSERT_EQUAL_64(0x1234000000005678L, x18);
494  ASSERT_EQUAL_64(0x1234000056780000L, x19);
495  ASSERT_EQUAL_64(0x1234567800000000L, x20);
496  ASSERT_EQUAL_64(0x1234000000000000L, x21);
497  ASSERT_EQUAL_64(0x123456789abc0000L, x22);
498  ASSERT_EQUAL_64(0x1234567800009abcL, x23);
499  ASSERT_EQUAL_64(0x1234000056789abcL, x24);
500  ASSERT_EQUAL_64(0x0000123456789abcL, x25);
501  ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
502  ASSERT_EQUAL_64(0xffff000000000001L, x27);
503  ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
504 
505  TEARDOWN();
506 }
507 
508 
509 TEST(orr) {
510  INIT_V8();
511  SETUP();
512 
513  START();
514  __ Mov(x0, 0xf0f0);
515  __ Mov(x1, 0xf00000ff);
516 
517  __ Orr(x2, x0, Operand(x1));
518  __ Orr(w3, w0, Operand(w1, LSL, 28));
519  __ Orr(x4, x0, Operand(x1, LSL, 32));
520  __ Orr(x5, x0, Operand(x1, LSR, 4));
521  __ Orr(w6, w0, Operand(w1, ASR, 4));
522  __ Orr(x7, x0, Operand(x1, ASR, 4));
523  __ Orr(w8, w0, Operand(w1, ROR, 12));
524  __ Orr(x9, x0, Operand(x1, ROR, 12));
525  __ Orr(w10, w0, Operand(0xf));
526  __ Orr(x11, x0, Operand(0xf0000000f0000000L));
527  END();
528 
529  RUN();
530 
531  ASSERT_EQUAL_64(0xf000f0ff, x2);
532  ASSERT_EQUAL_64(0xf000f0f0, x3);
533  ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
534  ASSERT_EQUAL_64(0x0f00f0ff, x5);
535  ASSERT_EQUAL_64(0xff00f0ff, x6);
536  ASSERT_EQUAL_64(0x0f00f0ff, x7);
537  ASSERT_EQUAL_64(0x0ffff0f0, x8);
538  ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
539  ASSERT_EQUAL_64(0xf0ff, x10);
540  ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
541 
542  TEARDOWN();
543 }
544 
545 
546 TEST(orr_extend) {
547  INIT_V8();
548  SETUP();
549 
550  START();
551  __ Mov(x0, 1);
552  __ Mov(x1, 0x8000000080008080UL);
553  __ Orr(w6, w0, Operand(w1, UXTB));
554  __ Orr(x7, x0, Operand(x1, UXTH, 1));
555  __ Orr(w8, w0, Operand(w1, UXTW, 2));
556  __ Orr(x9, x0, Operand(x1, UXTX, 3));
557  __ Orr(w10, w0, Operand(w1, SXTB));
558  __ Orr(x11, x0, Operand(x1, SXTH, 1));
559  __ Orr(x12, x0, Operand(x1, SXTW, 2));
560  __ Orr(x13, x0, Operand(x1, SXTX, 3));
561  END();
562 
563  RUN();
564 
565  ASSERT_EQUAL_64(0x00000081, x6);
566  ASSERT_EQUAL_64(0x00010101, x7);
567  ASSERT_EQUAL_64(0x00020201, x8);
568  ASSERT_EQUAL_64(0x0000000400040401UL, x9);
569  ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
570  ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
571  ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
572  ASSERT_EQUAL_64(0x0000000400040401UL, x13);
573 
574  TEARDOWN();
575 }
576 
577 
578 TEST(bitwise_wide_imm) {
579  INIT_V8();
580  SETUP();
581 
582  START();
583  __ Mov(x0, 0);
584  __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
585 
586  __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
587  __ Orr(w11, w1, Operand(0x90abcdef));
588  END();
589 
590  RUN();
591 
592  ASSERT_EQUAL_64(0, x0);
593  ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
594  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
595  ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
596 
597  TEARDOWN();
598 }
599 
600 
601 TEST(orn) {
602  INIT_V8();
603  SETUP();
604 
605  START();
606  __ Mov(x0, 0xf0f0);
607  __ Mov(x1, 0xf00000ff);
608 
609  __ Orn(x2, x0, Operand(x1));
610  __ Orn(w3, w0, Operand(w1, LSL, 4));
611  __ Orn(x4, x0, Operand(x1, LSL, 4));
612  __ Orn(x5, x0, Operand(x1, LSR, 1));
613  __ Orn(w6, w0, Operand(w1, ASR, 1));
614  __ Orn(x7, x0, Operand(x1, ASR, 1));
615  __ Orn(w8, w0, Operand(w1, ROR, 16));
616  __ Orn(x9, x0, Operand(x1, ROR, 16));
617  __ Orn(w10, w0, Operand(0xffff));
618  __ Orn(x11, x0, Operand(0xffff0000ffffL));
619  END();
620 
621  RUN();
622 
623  ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
624  ASSERT_EQUAL_64(0xfffff0ff, x3);
625  ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
626  ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
627  ASSERT_EQUAL_64(0x07fffff0, x6);
628  ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
629  ASSERT_EQUAL_64(0xff00ffff, x8);
630  ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
631  ASSERT_EQUAL_64(0xfffff0f0, x10);
632  ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
633 
634  TEARDOWN();
635 }
636 
637 
638 TEST(orn_extend) {
639  INIT_V8();
640  SETUP();
641 
642  START();
643  __ Mov(x0, 1);
644  __ Mov(x1, 0x8000000080008081UL);
645  __ Orn(w6, w0, Operand(w1, UXTB));
646  __ Orn(x7, x0, Operand(x1, UXTH, 1));
647  __ Orn(w8, w0, Operand(w1, UXTW, 2));
648  __ Orn(x9, x0, Operand(x1, UXTX, 3));
649  __ Orn(w10, w0, Operand(w1, SXTB));
650  __ Orn(x11, x0, Operand(x1, SXTH, 1));
651  __ Orn(x12, x0, Operand(x1, SXTW, 2));
652  __ Orn(x13, x0, Operand(x1, SXTX, 3));
653  END();
654 
655  RUN();
656 
657  ASSERT_EQUAL_64(0xffffff7f, x6);
658  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
659  ASSERT_EQUAL_64(0xfffdfdfb, x8);
660  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
661  ASSERT_EQUAL_64(0x0000007f, x10);
662  ASSERT_EQUAL_64(0x0000fefd, x11);
663  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
664  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
665 
666  TEARDOWN();
667 }
668 
669 
670 TEST(and_) {
671  INIT_V8();
672  SETUP();
673 
674  START();
675  __ Mov(x0, 0xfff0);
676  __ Mov(x1, 0xf00000ff);
677 
678  __ And(x2, x0, Operand(x1));
679  __ And(w3, w0, Operand(w1, LSL, 4));
680  __ And(x4, x0, Operand(x1, LSL, 4));
681  __ And(x5, x0, Operand(x1, LSR, 1));
682  __ And(w6, w0, Operand(w1, ASR, 20));
683  __ And(x7, x0, Operand(x1, ASR, 20));
684  __ And(w8, w0, Operand(w1, ROR, 28));
685  __ And(x9, x0, Operand(x1, ROR, 28));
686  __ And(w10, w0, Operand(0xff00));
687  __ And(x11, x0, Operand(0xff));
688  END();
689 
690  RUN();
691 
692  ASSERT_EQUAL_64(0x000000f0, x2);
693  ASSERT_EQUAL_64(0x00000ff0, x3);
694  ASSERT_EQUAL_64(0x00000ff0, x4);
695  ASSERT_EQUAL_64(0x00000070, x5);
696  ASSERT_EQUAL_64(0x0000ff00, x6);
697  ASSERT_EQUAL_64(0x00000f00, x7);
698  ASSERT_EQUAL_64(0x00000ff0, x8);
699  ASSERT_EQUAL_64(0x00000000, x9);
700  ASSERT_EQUAL_64(0x0000ff00, x10);
701  ASSERT_EQUAL_64(0x000000f0, x11);
702 
703  TEARDOWN();
704 }
705 
706 
707 TEST(and_extend) {
708  INIT_V8();
709  SETUP();
710 
711  START();
712  __ Mov(x0, 0xffffffffffffffffUL);
713  __ Mov(x1, 0x8000000080008081UL);
714  __ And(w6, w0, Operand(w1, UXTB));
715  __ And(x7, x0, Operand(x1, UXTH, 1));
716  __ And(w8, w0, Operand(w1, UXTW, 2));
717  __ And(x9, x0, Operand(x1, UXTX, 3));
718  __ And(w10, w0, Operand(w1, SXTB));
719  __ And(x11, x0, Operand(x1, SXTH, 1));
720  __ And(x12, x0, Operand(x1, SXTW, 2));
721  __ And(x13, x0, Operand(x1, SXTX, 3));
722  END();
723 
724  RUN();
725 
726  ASSERT_EQUAL_64(0x00000081, x6);
727  ASSERT_EQUAL_64(0x00010102, x7);
728  ASSERT_EQUAL_64(0x00020204, x8);
729  ASSERT_EQUAL_64(0x0000000400040408UL, x9);
730  ASSERT_EQUAL_64(0xffffff81, x10);
731  ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
732  ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
733  ASSERT_EQUAL_64(0x0000000400040408UL, x13);
734 
735  TEARDOWN();
736 }
737 
738 
739 TEST(ands) {
740  INIT_V8();
741  SETUP();
742 
743  START();
744  __ Mov(x1, 0xf00000ff);
745  __ Ands(w0, w1, Operand(w1));
746  END();
747 
748  RUN();
749 
751  ASSERT_EQUAL_64(0xf00000ff, x0);
752 
753  START();
754  __ Mov(x0, 0xfff0);
755  __ Mov(x1, 0xf00000ff);
756  __ Ands(w0, w0, Operand(w1, LSR, 4));
757  END();
758 
759  RUN();
760 
762  ASSERT_EQUAL_64(0x00000000, x0);
763 
764  START();
765  __ Mov(x0, 0x8000000000000000L);
766  __ Mov(x1, 0x00000001);
767  __ Ands(x0, x0, Operand(x1, ROR, 1));
768  END();
769 
770  RUN();
771 
773  ASSERT_EQUAL_64(0x8000000000000000L, x0);
774 
775  START();
776  __ Mov(x0, 0xfff0);
777  __ Ands(w0, w0, Operand(0xf));
778  END();
779 
780  RUN();
781 
783  ASSERT_EQUAL_64(0x00000000, x0);
784 
785  START();
786  __ Mov(x0, 0xff000000);
787  __ Ands(w0, w0, Operand(0x80000000));
788  END();
789 
790  RUN();
791 
793  ASSERT_EQUAL_64(0x80000000, x0);
794 
795  TEARDOWN();
796 }
797 
798 
799 TEST(bic) {
800  INIT_V8();
801  SETUP();
802 
803  START();
804  __ Mov(x0, 0xfff0);
805  __ Mov(x1, 0xf00000ff);
806 
807  __ Bic(x2, x0, Operand(x1));
808  __ Bic(w3, w0, Operand(w1, LSL, 4));
809  __ Bic(x4, x0, Operand(x1, LSL, 4));
810  __ Bic(x5, x0, Operand(x1, LSR, 1));
811  __ Bic(w6, w0, Operand(w1, ASR, 20));
812  __ Bic(x7, x0, Operand(x1, ASR, 20));
813  __ Bic(w8, w0, Operand(w1, ROR, 28));
814  __ Bic(x9, x0, Operand(x1, ROR, 24));
815  __ Bic(x10, x0, Operand(0x1f));
816  __ Bic(x11, x0, Operand(0x100));
817 
818  // Test bic into csp when the constant cannot be encoded in the immediate
819  // field.
820  // Use x20 to preserve csp. We check for the result via x21 because the
821  // test infrastructure requires that csp be restored to its original value.
822  __ Mov(x20, csp);
823  __ Mov(x0, 0xffffff);
824  __ Bic(csp, x0, Operand(0xabcdef));
825  __ Mov(x21, csp);
826  __ Mov(csp, x20);
827  END();
828 
829  RUN();
830 
831  ASSERT_EQUAL_64(0x0000ff00, x2);
832  ASSERT_EQUAL_64(0x0000f000, x3);
833  ASSERT_EQUAL_64(0x0000f000, x4);
834  ASSERT_EQUAL_64(0x0000ff80, x5);
835  ASSERT_EQUAL_64(0x000000f0, x6);
836  ASSERT_EQUAL_64(0x0000f0f0, x7);
837  ASSERT_EQUAL_64(0x0000f000, x8);
838  ASSERT_EQUAL_64(0x0000ff00, x9);
839  ASSERT_EQUAL_64(0x0000ffe0, x10);
840  ASSERT_EQUAL_64(0x0000fef0, x11);
841 
842  ASSERT_EQUAL_64(0x543210, x21);
843 
844  TEARDOWN();
845 }
846 
847 
848 TEST(bic_extend) {
849  INIT_V8();
850  SETUP();
851 
852  START();
853  __ Mov(x0, 0xffffffffffffffffUL);
854  __ Mov(x1, 0x8000000080008081UL);
855  __ Bic(w6, w0, Operand(w1, UXTB));
856  __ Bic(x7, x0, Operand(x1, UXTH, 1));
857  __ Bic(w8, w0, Operand(w1, UXTW, 2));
858  __ Bic(x9, x0, Operand(x1, UXTX, 3));
859  __ Bic(w10, w0, Operand(w1, SXTB));
860  __ Bic(x11, x0, Operand(x1, SXTH, 1));
861  __ Bic(x12, x0, Operand(x1, SXTW, 2));
862  __ Bic(x13, x0, Operand(x1, SXTX, 3));
863  END();
864 
865  RUN();
866 
867  ASSERT_EQUAL_64(0xffffff7e, x6);
868  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
869  ASSERT_EQUAL_64(0xfffdfdfb, x8);
870  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
871  ASSERT_EQUAL_64(0x0000007e, x10);
872  ASSERT_EQUAL_64(0x0000fefd, x11);
873  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
874  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
875 
876  TEARDOWN();
877 }
878 
879 
880 TEST(bics) {
881  INIT_V8();
882  SETUP();
883 
884  START();
885  __ Mov(x1, 0xffff);
886  __ Bics(w0, w1, Operand(w1));
887  END();
888 
889  RUN();
890 
892  ASSERT_EQUAL_64(0x00000000, x0);
893 
894  START();
895  __ Mov(x0, 0xffffffff);
896  __ Bics(w0, w0, Operand(w0, LSR, 1));
897  END();
898 
899  RUN();
900 
902  ASSERT_EQUAL_64(0x80000000, x0);
903 
904  START();
905  __ Mov(x0, 0x8000000000000000L);
906  __ Mov(x1, 0x00000001);
907  __ Bics(x0, x0, Operand(x1, ROR, 1));
908  END();
909 
910  RUN();
911 
913  ASSERT_EQUAL_64(0x00000000, x0);
914 
915  START();
916  __ Mov(x0, 0xffffffffffffffffL);
917  __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
918  END();
919 
920  RUN();
921 
923  ASSERT_EQUAL_64(0x8000000000000000L, x0);
924 
925  START();
926  __ Mov(w0, 0xffff0000);
927  __ Bics(w0, w0, Operand(0xfffffff0));
928  END();
929 
930  RUN();
931 
933  ASSERT_EQUAL_64(0x00000000, x0);
934 
935  TEARDOWN();
936 }
937 
938 
939 TEST(eor) {
940  INIT_V8();
941  SETUP();
942 
943  START();
944  __ Mov(x0, 0xfff0);
945  __ Mov(x1, 0xf00000ff);
946 
947  __ Eor(x2, x0, Operand(x1));
948  __ Eor(w3, w0, Operand(w1, LSL, 4));
949  __ Eor(x4, x0, Operand(x1, LSL, 4));
950  __ Eor(x5, x0, Operand(x1, LSR, 1));
951  __ Eor(w6, w0, Operand(w1, ASR, 20));
952  __ Eor(x7, x0, Operand(x1, ASR, 20));
953  __ Eor(w8, w0, Operand(w1, ROR, 28));
954  __ Eor(x9, x0, Operand(x1, ROR, 28));
955  __ Eor(w10, w0, Operand(0xff00ff00));
956  __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
957  END();
958 
959  RUN();
960 
961  ASSERT_EQUAL_64(0xf000ff0f, x2);
962  ASSERT_EQUAL_64(0x0000f000, x3);
963  ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
964  ASSERT_EQUAL_64(0x7800ff8f, x5);
965  ASSERT_EQUAL_64(0xffff00f0, x6);
966  ASSERT_EQUAL_64(0x0000f0f0, x7);
967  ASSERT_EQUAL_64(0x0000f00f, x8);
968  ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
969  ASSERT_EQUAL_64(0xff0000f0, x10);
970  ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
971 
972  TEARDOWN();
973 }
974 
975 
976 TEST(eor_extend) {
977  INIT_V8();
978  SETUP();
979 
980  START();
981  __ Mov(x0, 0x1111111111111111UL);
982  __ Mov(x1, 0x8000000080008081UL);
983  __ Eor(w6, w0, Operand(w1, UXTB));
984  __ Eor(x7, x0, Operand(x1, UXTH, 1));
985  __ Eor(w8, w0, Operand(w1, UXTW, 2));
986  __ Eor(x9, x0, Operand(x1, UXTX, 3));
987  __ Eor(w10, w0, Operand(w1, SXTB));
988  __ Eor(x11, x0, Operand(x1, SXTH, 1));
989  __ Eor(x12, x0, Operand(x1, SXTW, 2));
990  __ Eor(x13, x0, Operand(x1, SXTX, 3));
991  END();
992 
993  RUN();
994 
995  ASSERT_EQUAL_64(0x11111190, x6);
996  ASSERT_EQUAL_64(0x1111111111101013UL, x7);
997  ASSERT_EQUAL_64(0x11131315, x8);
998  ASSERT_EQUAL_64(0x1111111511151519UL, x9);
999  ASSERT_EQUAL_64(0xeeeeee90, x10);
1000  ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
1001  ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
1002  ASSERT_EQUAL_64(0x1111111511151519UL, x13);
1003 
1004  TEARDOWN();
1005 }
1006 
1007 
1008 TEST(eon) {
1009  INIT_V8();
1010  SETUP();
1011 
1012  START();
1013  __ Mov(x0, 0xfff0);
1014  __ Mov(x1, 0xf00000ff);
1015 
1016  __ Eon(x2, x0, Operand(x1));
1017  __ Eon(w3, w0, Operand(w1, LSL, 4));
1018  __ Eon(x4, x0, Operand(x1, LSL, 4));
1019  __ Eon(x5, x0, Operand(x1, LSR, 1));
1020  __ Eon(w6, w0, Operand(w1, ASR, 20));
1021  __ Eon(x7, x0, Operand(x1, ASR, 20));
1022  __ Eon(w8, w0, Operand(w1, ROR, 28));
1023  __ Eon(x9, x0, Operand(x1, ROR, 28));
1024  __ Eon(w10, w0, Operand(0x03c003c0));
1025  __ Eon(x11, x0, Operand(0x0000100000001000L));
1026  END();
1027 
1028  RUN();
1029 
1030  ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
1031  ASSERT_EQUAL_64(0xffff0fff, x3);
1032  ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
1033  ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
1034  ASSERT_EQUAL_64(0x0000ff0f, x6);
1035  ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
1036  ASSERT_EQUAL_64(0xffff0ff0, x8);
1037  ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
1038  ASSERT_EQUAL_64(0xfc3f03cf, x10);
1039  ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
1040 
1041  TEARDOWN();
1042 }
1043 
1044 
1045 TEST(eon_extend) {
1046  INIT_V8();
1047  SETUP();
1048 
1049  START();
1050  __ Mov(x0, 0x1111111111111111UL);
1051  __ Mov(x1, 0x8000000080008081UL);
1052  __ Eon(w6, w0, Operand(w1, UXTB));
1053  __ Eon(x7, x0, Operand(x1, UXTH, 1));
1054  __ Eon(w8, w0, Operand(w1, UXTW, 2));
1055  __ Eon(x9, x0, Operand(x1, UXTX, 3));
1056  __ Eon(w10, w0, Operand(w1, SXTB));
1057  __ Eon(x11, x0, Operand(x1, SXTH, 1));
1058  __ Eon(x12, x0, Operand(x1, SXTW, 2));
1059  __ Eon(x13, x0, Operand(x1, SXTX, 3));
1060  END();
1061 
1062  RUN();
1063 
1064  ASSERT_EQUAL_64(0xeeeeee6f, x6);
1065  ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1066  ASSERT_EQUAL_64(0xeeececea, x8);
1067  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1068  ASSERT_EQUAL_64(0x1111116f, x10);
1069  ASSERT_EQUAL_64(0x111111111111efecUL, x11);
1070  ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
1071  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1072 
1073  TEARDOWN();
1074 }
1075 
1076 
1077 TEST(mul) {
1078  INIT_V8();
1079  SETUP();
1080 
1081  START();
1082  __ Mov(x16, 0);
1083  __ Mov(x17, 1);
1084  __ Mov(x18, 0xffffffff);
1085  __ Mov(x19, 0xffffffffffffffffUL);
1086 
1087  __ Mul(w0, w16, w16);
1088  __ Mul(w1, w16, w17);
1089  __ Mul(w2, w17, w18);
1090  __ Mul(w3, w18, w19);
1091  __ Mul(x4, x16, x16);
1092  __ Mul(x5, x17, x18);
1093  __ Mul(x6, x18, x19);
1094  __ Mul(x7, x19, x19);
1095  __ Smull(x8, w17, w18);
1096  __ Smull(x9, w18, w18);
1097  __ Smull(x10, w19, w19);
1098  __ Mneg(w11, w16, w16);
1099  __ Mneg(w12, w16, w17);
1100  __ Mneg(w13, w17, w18);
1101  __ Mneg(w14, w18, w19);
1102  __ Mneg(x20, x16, x16);
1103  __ Mneg(x21, x17, x18);
1104  __ Mneg(x22, x18, x19);
1105  __ Mneg(x23, x19, x19);
1106  END();
1107 
1108  RUN();
1109 
1110  ASSERT_EQUAL_64(0, x0);
1111  ASSERT_EQUAL_64(0, x1);
1112  ASSERT_EQUAL_64(0xffffffff, x2);
1113  ASSERT_EQUAL_64(1, x3);
1114  ASSERT_EQUAL_64(0, x4);
1115  ASSERT_EQUAL_64(0xffffffff, x5);
1116  ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
1117  ASSERT_EQUAL_64(1, x7);
1118  ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
1119  ASSERT_EQUAL_64(1, x9);
1120  ASSERT_EQUAL_64(1, x10);
1121  ASSERT_EQUAL_64(0, x11);
1122  ASSERT_EQUAL_64(0, x12);
1123  ASSERT_EQUAL_64(1, x13);
1124  ASSERT_EQUAL_64(0xffffffff, x14);
1125  ASSERT_EQUAL_64(0, x20);
1126  ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
1127  ASSERT_EQUAL_64(0xffffffff, x22);
1128  ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
1129 
1130  TEARDOWN();
1131 }
1132 
1133 
1134 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1135  SETUP();
1136  START();
1137  __ Mov(w0, a);
1138  __ Mov(w1, b);
1139  __ Smull(x2, w0, w1);
1140  END();
1141  RUN();
1142  ASSERT_EQUAL_64(expected, x2);
1143  TEARDOWN();
1144 }
1145 
1146 
1147 TEST(smull) {
1148  INIT_V8();
1149  SmullHelper(0, 0, 0);
1150  SmullHelper(1, 1, 1);
1151  SmullHelper(-1, -1, 1);
1152  SmullHelper(1, -1, -1);
1153  SmullHelper(0xffffffff80000000, 0x80000000, 1);
1154  SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1155 }
1156 
1157 
1158 TEST(madd) {
1159  INIT_V8();
1160  SETUP();
1161 
1162  START();
1163  __ Mov(x16, 0);
1164  __ Mov(x17, 1);
1165  __ Mov(x18, 0xffffffff);
1166  __ Mov(x19, 0xffffffffffffffffUL);
1167 
1168  __ Madd(w0, w16, w16, w16);
1169  __ Madd(w1, w16, w16, w17);
1170  __ Madd(w2, w16, w16, w18);
1171  __ Madd(w3, w16, w16, w19);
1172  __ Madd(w4, w16, w17, w17);
1173  __ Madd(w5, w17, w17, w18);
1174  __ Madd(w6, w17, w17, w19);
1175  __ Madd(w7, w17, w18, w16);
1176  __ Madd(w8, w17, w18, w18);
1177  __ Madd(w9, w18, w18, w17);
1178  __ Madd(w10, w18, w19, w18);
1179  __ Madd(w11, w19, w19, w19);
1180 
1181  __ Madd(x12, x16, x16, x16);
1182  __ Madd(x13, x16, x16, x17);
1183  __ Madd(x14, x16, x16, x18);
1184  __ Madd(x15, x16, x16, x19);
1185  __ Madd(x20, x16, x17, x17);
1186  __ Madd(x21, x17, x17, x18);
1187  __ Madd(x22, x17, x17, x19);
1188  __ Madd(x23, x17, x18, x16);
1189  __ Madd(x24, x17, x18, x18);
1190  __ Madd(x25, x18, x18, x17);
1191  __ Madd(x26, x18, x19, x18);
1192  __ Madd(x27, x19, x19, x19);
1193 
1194  END();
1195 
1196  RUN();
1197 
1198  ASSERT_EQUAL_64(0, x0);
1199  ASSERT_EQUAL_64(1, x1);
1200  ASSERT_EQUAL_64(0xffffffff, x2);
1201  ASSERT_EQUAL_64(0xffffffff, x3);
1202  ASSERT_EQUAL_64(1, x4);
1203  ASSERT_EQUAL_64(0, x5);
1204  ASSERT_EQUAL_64(0, x6);
1205  ASSERT_EQUAL_64(0xffffffff, x7);
1206  ASSERT_EQUAL_64(0xfffffffe, x8);
1207  ASSERT_EQUAL_64(2, x9);
1208  ASSERT_EQUAL_64(0, x10);
1209  ASSERT_EQUAL_64(0, x11);
1210 
1211  ASSERT_EQUAL_64(0, x12);
1212  ASSERT_EQUAL_64(1, x13);
1213  ASSERT_EQUAL_64(0xffffffff, x14);
1214  ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1215  ASSERT_EQUAL_64(1, x20);
1216  ASSERT_EQUAL_64(0x100000000UL, x21);
1217  ASSERT_EQUAL_64(0, x22);
1218  ASSERT_EQUAL_64(0xffffffff, x23);
1219  ASSERT_EQUAL_64(0x1fffffffe, x24);
1220  ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
1221  ASSERT_EQUAL_64(0, x26);
1222  ASSERT_EQUAL_64(0, x27);
1223 
1224  TEARDOWN();
1225 }
1226 
1227 
1228 TEST(msub) {
1229  INIT_V8();
1230  SETUP();
1231 
1232  START();
1233  __ Mov(x16, 0);
1234  __ Mov(x17, 1);
1235  __ Mov(x18, 0xffffffff);
1236  __ Mov(x19, 0xffffffffffffffffUL);
1237 
1238  __ Msub(w0, w16, w16, w16);
1239  __ Msub(w1, w16, w16, w17);
1240  __ Msub(w2, w16, w16, w18);
1241  __ Msub(w3, w16, w16, w19);
1242  __ Msub(w4, w16, w17, w17);
1243  __ Msub(w5, w17, w17, w18);
1244  __ Msub(w6, w17, w17, w19);
1245  __ Msub(w7, w17, w18, w16);
1246  __ Msub(w8, w17, w18, w18);
1247  __ Msub(w9, w18, w18, w17);
1248  __ Msub(w10, w18, w19, w18);
1249  __ Msub(w11, w19, w19, w19);
1250 
1251  __ Msub(x12, x16, x16, x16);
1252  __ Msub(x13, x16, x16, x17);
1253  __ Msub(x14, x16, x16, x18);
1254  __ Msub(x15, x16, x16, x19);
1255  __ Msub(x20, x16, x17, x17);
1256  __ Msub(x21, x17, x17, x18);
1257  __ Msub(x22, x17, x17, x19);
1258  __ Msub(x23, x17, x18, x16);
1259  __ Msub(x24, x17, x18, x18);
1260  __ Msub(x25, x18, x18, x17);
1261  __ Msub(x26, x18, x19, x18);
1262  __ Msub(x27, x19, x19, x19);
1263 
1264  END();
1265 
1266  RUN();
1267 
1268  ASSERT_EQUAL_64(0, x0);
1269  ASSERT_EQUAL_64(1, x1);
1270  ASSERT_EQUAL_64(0xffffffff, x2);
1271  ASSERT_EQUAL_64(0xffffffff, x3);
1272  ASSERT_EQUAL_64(1, x4);
1273  ASSERT_EQUAL_64(0xfffffffe, x5);
1274  ASSERT_EQUAL_64(0xfffffffe, x6);
1275  ASSERT_EQUAL_64(1, x7);
1276  ASSERT_EQUAL_64(0, x8);
1277  ASSERT_EQUAL_64(0, x9);
1278  ASSERT_EQUAL_64(0xfffffffe, x10);
1279  ASSERT_EQUAL_64(0xfffffffe, x11);
1280 
1281  ASSERT_EQUAL_64(0, x12);
1282  ASSERT_EQUAL_64(1, x13);
1283  ASSERT_EQUAL_64(0xffffffff, x14);
1284  ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
1285  ASSERT_EQUAL_64(1, x20);
1286  ASSERT_EQUAL_64(0xfffffffeUL, x21);
1287  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
1288  ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
1289  ASSERT_EQUAL_64(0, x24);
1290  ASSERT_EQUAL_64(0x200000000UL, x25);
1291  ASSERT_EQUAL_64(0x1fffffffeUL, x26);
1292  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
1293 
1294  TEARDOWN();
1295 }
1296 
1297 
1298 TEST(smulh) {
1299  INIT_V8();
1300  SETUP();
1301 
1302  START();
1303  __ Mov(x20, 0);
1304  __ Mov(x21, 1);
1305  __ Mov(x22, 0x0000000100000000L);
1306  __ Mov(x23, 0x12345678);
1307  __ Mov(x24, 0x0123456789abcdefL);
1308  __ Mov(x25, 0x0000000200000000L);
1309  __ Mov(x26, 0x8000000000000000UL);
1310  __ Mov(x27, 0xffffffffffffffffUL);
1311  __ Mov(x28, 0x5555555555555555UL);
1312  __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1313 
1314  __ Smulh(x0, x20, x24);
1315  __ Smulh(x1, x21, x24);
1316  __ Smulh(x2, x22, x23);
1317  __ Smulh(x3, x22, x24);
1318  __ Smulh(x4, x24, x25);
1319  __ Smulh(x5, x23, x27);
1320  __ Smulh(x6, x26, x26);
1321  __ Smulh(x7, x26, x27);
1322  __ Smulh(x8, x27, x27);
1323  __ Smulh(x9, x28, x28);
1324  __ Smulh(x10, x28, x29);
1325  __ Smulh(x11, x29, x29);
1326  END();
1327 
1328  RUN();
1329 
1330  ASSERT_EQUAL_64(0, x0);
1331  ASSERT_EQUAL_64(0, x1);
1332  ASSERT_EQUAL_64(0, x2);
1333  ASSERT_EQUAL_64(0x01234567, x3);
1334  ASSERT_EQUAL_64(0x02468acf, x4);
1335  ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
1336  ASSERT_EQUAL_64(0x4000000000000000UL, x6);
1337  ASSERT_EQUAL_64(0, x7);
1338  ASSERT_EQUAL_64(0, x8);
1339  ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1340  ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1341  ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1342 
1343  TEARDOWN();
1344 }
1345 
1346 
1347 TEST(smaddl_umaddl) {
1348  INIT_V8();
1349  SETUP();
1350 
1351  START();
1352  __ Mov(x17, 1);
1353  __ Mov(x18, 0xffffffff);
1354  __ Mov(x19, 0xffffffffffffffffUL);
1355  __ Mov(x20, 4);
1356  __ Mov(x21, 0x200000000UL);
1357 
1358  __ Smaddl(x9, w17, w18, x20);
1359  __ Smaddl(x10, w18, w18, x20);
1360  __ Smaddl(x11, w19, w19, x20);
1361  __ Smaddl(x12, w19, w19, x21);
1362  __ Umaddl(x13, w17, w18, x20);
1363  __ Umaddl(x14, w18, w18, x20);
1364  __ Umaddl(x15, w19, w19, x20);
1365  __ Umaddl(x22, w19, w19, x21);
1366  END();
1367 
1368  RUN();
1369 
1370  ASSERT_EQUAL_64(3, x9);
1371  ASSERT_EQUAL_64(5, x10);
1372  ASSERT_EQUAL_64(5, x11);
1373  ASSERT_EQUAL_64(0x200000001UL, x12);
1374  ASSERT_EQUAL_64(0x100000003UL, x13);
1375  ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
1376  ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
1377  ASSERT_EQUAL_64(0x1, x22);
1378 
1379  TEARDOWN();
1380 }
1381 
1382 
1383 TEST(smsubl_umsubl) {
1384  INIT_V8();
1385  SETUP();
1386 
1387  START();
1388  __ Mov(x17, 1);
1389  __ Mov(x18, 0xffffffff);
1390  __ Mov(x19, 0xffffffffffffffffUL);
1391  __ Mov(x20, 4);
1392  __ Mov(x21, 0x200000000UL);
1393 
1394  __ Smsubl(x9, w17, w18, x20);
1395  __ Smsubl(x10, w18, w18, x20);
1396  __ Smsubl(x11, w19, w19, x20);
1397  __ Smsubl(x12, w19, w19, x21);
1398  __ Umsubl(x13, w17, w18, x20);
1399  __ Umsubl(x14, w18, w18, x20);
1400  __ Umsubl(x15, w19, w19, x20);
1401  __ Umsubl(x22, w19, w19, x21);
1402  END();
1403 
1404  RUN();
1405 
1406  ASSERT_EQUAL_64(5, x9);
1407  ASSERT_EQUAL_64(3, x10);
1408  ASSERT_EQUAL_64(3, x11);
1409  ASSERT_EQUAL_64(0x1ffffffffUL, x12);
1410  ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
1411  ASSERT_EQUAL_64(0x200000003UL, x14);
1412  ASSERT_EQUAL_64(0x200000003UL, x15);
1413  ASSERT_EQUAL_64(0x3ffffffffUL, x22);
1414 
1415  TEARDOWN();
1416 }
1417 
1418 
1419 TEST(div) {
1420  INIT_V8();
1421  SETUP();
1422 
1423  START();
1424  __ Mov(x16, 1);
1425  __ Mov(x17, 0xffffffff);
1426  __ Mov(x18, 0xffffffffffffffffUL);
1427  __ Mov(x19, 0x80000000);
1428  __ Mov(x20, 0x8000000000000000UL);
1429  __ Mov(x21, 2);
1430 
1431  __ Udiv(w0, w16, w16);
1432  __ Udiv(w1, w17, w16);
1433  __ Sdiv(w2, w16, w16);
1434  __ Sdiv(w3, w16, w17);
1435  __ Sdiv(w4, w17, w18);
1436 
1437  __ Udiv(x5, x16, x16);
1438  __ Udiv(x6, x17, x18);
1439  __ Sdiv(x7, x16, x16);
1440  __ Sdiv(x8, x16, x17);
1441  __ Sdiv(x9, x17, x18);
1442 
1443  __ Udiv(w10, w19, w21);
1444  __ Sdiv(w11, w19, w21);
1445  __ Udiv(x12, x19, x21);
1446  __ Sdiv(x13, x19, x21);
1447  __ Udiv(x14, x20, x21);
1448  __ Sdiv(x15, x20, x21);
1449 
1450  __ Udiv(w22, w19, w17);
1451  __ Sdiv(w23, w19, w17);
1452  __ Udiv(x24, x20, x18);
1453  __ Sdiv(x25, x20, x18);
1454 
1455  __ Udiv(x26, x16, x21);
1456  __ Sdiv(x27, x16, x21);
1457  __ Udiv(x28, x18, x21);
1458  __ Sdiv(x29, x18, x21);
1459 
1460  __ Mov(x17, 0);
1461  __ Udiv(w18, w16, w17);
1462  __ Sdiv(w19, w16, w17);
1463  __ Udiv(x20, x16, x17);
1464  __ Sdiv(x21, x16, x17);
1465  END();
1466 
1467  RUN();
1468 
1469  ASSERT_EQUAL_64(1, x0);
1470  ASSERT_EQUAL_64(0xffffffff, x1);
1471  ASSERT_EQUAL_64(1, x2);
1472  ASSERT_EQUAL_64(0xffffffff, x3);
1473  ASSERT_EQUAL_64(1, x4);
1474  ASSERT_EQUAL_64(1, x5);
1475  ASSERT_EQUAL_64(0, x6);
1476  ASSERT_EQUAL_64(1, x7);
1477  ASSERT_EQUAL_64(0, x8);
1478  ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
1479  ASSERT_EQUAL_64(0x40000000, x10);
1480  ASSERT_EQUAL_64(0xC0000000, x11);
1481  ASSERT_EQUAL_64(0x40000000, x12);
1482  ASSERT_EQUAL_64(0x40000000, x13);
1483  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
1484  ASSERT_EQUAL_64(0xC000000000000000UL, x15);
1485  ASSERT_EQUAL_64(0, x22);
1486  ASSERT_EQUAL_64(0x80000000, x23);
1487  ASSERT_EQUAL_64(0, x24);
1488  ASSERT_EQUAL_64(0x8000000000000000UL, x25);
1489  ASSERT_EQUAL_64(0, x26);
1490  ASSERT_EQUAL_64(0, x27);
1491  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
1492  ASSERT_EQUAL_64(0, x29);
1493  ASSERT_EQUAL_64(0, x18);
1494  ASSERT_EQUAL_64(0, x19);
1495  ASSERT_EQUAL_64(0, x20);
1496  ASSERT_EQUAL_64(0, x21);
1497 
1498  TEARDOWN();
1499 }
1500 
1501 
1502 TEST(rbit_rev) {
1503  INIT_V8();
1504  SETUP();
1505 
1506  START();
1507  __ Mov(x24, 0xfedcba9876543210UL);
1508  __ Rbit(w0, w24);
1509  __ Rbit(x1, x24);
1510  __ Rev16(w2, w24);
1511  __ Rev16(x3, x24);
1512  __ Rev(w4, w24);
1513  __ Rev32(x5, x24);
1514  __ Rev(x6, x24);
1515  END();
1516 
1517  RUN();
1518 
1519  ASSERT_EQUAL_64(0x084c2a6e, x0);
1520  ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1521  ASSERT_EQUAL_64(0x54761032, x2);
1522  ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
1523  ASSERT_EQUAL_64(0x10325476, x4);
1524  ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
1525  ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
1526 
1527  TEARDOWN();
1528 }
1529 
1530 
1531 TEST(clz_cls) {
1532  INIT_V8();
1533  SETUP();
1534 
1535  START();
1536  __ Mov(x24, 0x0008000000800000UL);
1537  __ Mov(x25, 0xff800000fff80000UL);
1538  __ Mov(x26, 0);
1539  __ Clz(w0, w24);
1540  __ Clz(x1, x24);
1541  __ Clz(w2, w25);
1542  __ Clz(x3, x25);
1543  __ Clz(w4, w26);
1544  __ Clz(x5, x26);
1545  __ Cls(w6, w24);
1546  __ Cls(x7, x24);
1547  __ Cls(w8, w25);
1548  __ Cls(x9, x25);
1549  __ Cls(w10, w26);
1550  __ Cls(x11, x26);
1551  END();
1552 
1553  RUN();
1554 
1555  ASSERT_EQUAL_64(8, x0);
1556  ASSERT_EQUAL_64(12, x1);
1557  ASSERT_EQUAL_64(0, x2);
1558  ASSERT_EQUAL_64(0, x3);
1559  ASSERT_EQUAL_64(32, x4);
1560  ASSERT_EQUAL_64(64, x5);
1561  ASSERT_EQUAL_64(7, x6);
1562  ASSERT_EQUAL_64(11, x7);
1563  ASSERT_EQUAL_64(12, x8);
1564  ASSERT_EQUAL_64(8, x9);
1565  ASSERT_EQUAL_64(31, x10);
1566  ASSERT_EQUAL_64(63, x11);
1567 
1568  TEARDOWN();
1569 }
1570 
1571 
1572 TEST(label) {
1573  INIT_V8();
1574  SETUP();
1575 
1576  Label label_1, label_2, label_3, label_4;
1577 
1578  START();
1579  __ Mov(x0, 0x1);
1580  __ Mov(x1, 0x0);
1581  __ Mov(x22, lr); // Save lr.
1582 
1583  __ B(&label_1);
1584  __ B(&label_1);
1585  __ B(&label_1); // Multiple branches to the same label.
1586  __ Mov(x0, 0x0);
1587  __ Bind(&label_2);
1588  __ B(&label_3); // Forward branch.
1589  __ Mov(x0, 0x0);
1590  __ Bind(&label_1);
1591  __ B(&label_2); // Backward branch.
1592  __ Mov(x0, 0x0);
1593  __ Bind(&label_3);
1594  __ Bl(&label_4);
1595  END();
1596 
1597  __ Bind(&label_4);
1598  __ Mov(x1, 0x1);
1599  __ Mov(lr, x22);
1600  END();
1601 
1602  RUN();
1603 
1604  ASSERT_EQUAL_64(0x1, x0);
1605  ASSERT_EQUAL_64(0x1, x1);
1606 
1607  TEARDOWN();
1608 }
1609 
1610 
1611 TEST(branch_at_start) {
1612  INIT_V8();
1613  SETUP();
1614 
1615  Label good, exit;
1616 
1617  // Test that branches can exist at the start of the buffer. (This is a
1618  // boundary condition in the label-handling code.) To achieve this, we have
1619  // to work around the code generated by START.
1620  RESET();
1621  __ B(&good);
1622 
1624  __ Mov(x0, 0x0);
1625  END();
1626 
1627  __ Bind(&exit);
1629  __ Mov(x0, 0x1);
1630  END();
1631 
1632  __ Bind(&good);
1633  __ B(&exit);
1634  END();
1635 
1636  RUN();
1637 
1638  ASSERT_EQUAL_64(0x1, x0);
1639  TEARDOWN();
1640 }
1641 
1642 
1643 TEST(adr) {
1644  INIT_V8();
1645  SETUP();
1646 
1647  Label label_1, label_2, label_3, label_4;
1648 
1649  START();
1650  __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
1651  __ Adr(x1, &label_3); // Set to zero to indicate success.
1652 
1653  __ Adr(x2, &label_1); // Multiple forward references to the same label.
1654  __ Adr(x3, &label_1);
1655  __ Adr(x4, &label_1);
1656 
1657  __ Bind(&label_2);
1658  __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
1659  __ Eor(x6, x2, Operand(x4));
1660  __ Orr(x0, x0, Operand(x5));
1661  __ Orr(x0, x0, Operand(x6));
1662  __ Br(x2); // label_1, label_3
1663 
1664  __ Bind(&label_3);
1665  __ Adr(x2, &label_3); // Self-reference (offset 0).
1666  __ Eor(x1, x1, Operand(x2));
1667  __ Adr(x2, &label_4); // Simple forward reference.
1668  __ Br(x2); // label_4
1669 
1670  __ Bind(&label_1);
1671  __ Adr(x2, &label_3); // Multiple reverse references to the same label.
1672  __ Adr(x3, &label_3);
1673  __ Adr(x4, &label_3);
1674  __ Adr(x5, &label_2); // Simple reverse reference.
1675  __ Br(x5); // label_2
1676 
1677  __ Bind(&label_4);
1678  END();
1679 
1680  RUN();
1681 
1682  ASSERT_EQUAL_64(0x0, x0);
1683  ASSERT_EQUAL_64(0x0, x1);
1684 
1685  TEARDOWN();
1686 }
1687 
1688 
1689 TEST(branch_cond) {
1690  INIT_V8();
1691  SETUP();
1692 
1693  Label wrong;
1694 
1695  START();
1696  __ Mov(x0, 0x1);
1697  __ Mov(x1, 0x1);
1698  __ Mov(x2, 0x8000000000000000L);
1699 
1700  // For each 'cmp' instruction below, condition codes other than the ones
1701  // following it would branch.
1702 
1703  __ Cmp(x1, 0);
1704  __ B(&wrong, eq);
1705  __ B(&wrong, lo);
1706  __ B(&wrong, mi);
1707  __ B(&wrong, vs);
1708  __ B(&wrong, ls);
1709  __ B(&wrong, lt);
1710  __ B(&wrong, le);
1711  Label ok_1;
1712  __ B(&ok_1, ne);
1713  __ Mov(x0, 0x0);
1714  __ Bind(&ok_1);
1715 
1716  __ Cmp(x1, 1);
1717  __ B(&wrong, ne);
1718  __ B(&wrong, lo);
1719  __ B(&wrong, mi);
1720  __ B(&wrong, vs);
1721  __ B(&wrong, hi);
1722  __ B(&wrong, lt);
1723  __ B(&wrong, gt);
1724  Label ok_2;
1725  __ B(&ok_2, pl);
1726  __ Mov(x0, 0x0);
1727  __ Bind(&ok_2);
1728 
1729  __ Cmp(x1, 2);
1730  __ B(&wrong, eq);
1731  __ B(&wrong, hs);
1732  __ B(&wrong, pl);
1733  __ B(&wrong, vs);
1734  __ B(&wrong, hi);
1735  __ B(&wrong, ge);
1736  __ B(&wrong, gt);
1737  Label ok_3;
1738  __ B(&ok_3, vc);
1739  __ Mov(x0, 0x0);
1740  __ Bind(&ok_3);
1741 
1742  __ Cmp(x2, 1);
1743  __ B(&wrong, eq);
1744  __ B(&wrong, lo);
1745  __ B(&wrong, mi);
1746  __ B(&wrong, vc);
1747  __ B(&wrong, ls);
1748  __ B(&wrong, ge);
1749  __ B(&wrong, gt);
1750  Label ok_4;
1751  __ B(&ok_4, le);
1752  __ Mov(x0, 0x0);
1753  __ Bind(&ok_4);
1754 
1755  Label ok_5;
1756  __ b(&ok_5, al);
1757  __ Mov(x0, 0x0);
1758  __ Bind(&ok_5);
1759 
1760  Label ok_6;
1761  __ b(&ok_6, nv);
1762  __ Mov(x0, 0x0);
1763  __ Bind(&ok_6);
1764 
1765  END();
1766 
1767  __ Bind(&wrong);
1768  __ Mov(x0, 0x0);
1769  END();
1770 
1771  RUN();
1772 
1773  ASSERT_EQUAL_64(0x1, x0);
1774 
1775  TEARDOWN();
1776 }
1777 
1778 
1779 TEST(branch_to_reg) {
1780  INIT_V8();
1781  SETUP();
1782 
1783  // Test br.
1784  Label fn1, after_fn1;
1785 
1786  START();
1787  __ Mov(x29, lr);
1788 
1789  __ Mov(x1, 0);
1790  __ B(&after_fn1);
1791 
1792  __ Bind(&fn1);
1793  __ Mov(x0, lr);
1794  __ Mov(x1, 42);
1795  __ Br(x0);
1796 
1797  __ Bind(&after_fn1);
1798  __ Bl(&fn1);
1799 
1800  // Test blr.
1801  Label fn2, after_fn2;
1802 
1803  __ Mov(x2, 0);
1804  __ B(&after_fn2);
1805 
1806  __ Bind(&fn2);
1807  __ Mov(x0, lr);
1808  __ Mov(x2, 84);
1809  __ Blr(x0);
1810 
1811  __ Bind(&after_fn2);
1812  __ Bl(&fn2);
1813  __ Mov(x3, lr);
1814 
1815  __ Mov(lr, x29);
1816  END();
1817 
1818  RUN();
1819 
1820  ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1821  ASSERT_EQUAL_64(42, x1);
1822  ASSERT_EQUAL_64(84, x2);
1823 
1824  TEARDOWN();
1825 }
1826 
1827 
1828 TEST(compare_branch) {
1829  INIT_V8();
1830  SETUP();
1831 
1832  START();
1833  __ Mov(x0, 0);
1834  __ Mov(x1, 0);
1835  __ Mov(x2, 0);
1836  __ Mov(x3, 0);
1837  __ Mov(x4, 0);
1838  __ Mov(x5, 0);
1839  __ Mov(x16, 0);
1840  __ Mov(x17, 42);
1841 
1842  Label zt, zt_end;
1843  __ Cbz(w16, &zt);
1844  __ B(&zt_end);
1845  __ Bind(&zt);
1846  __ Mov(x0, 1);
1847  __ Bind(&zt_end);
1848 
1849  Label zf, zf_end;
1850  __ Cbz(x17, &zf);
1851  __ B(&zf_end);
1852  __ Bind(&zf);
1853  __ Mov(x1, 1);
1854  __ Bind(&zf_end);
1855 
1856  Label nzt, nzt_end;
1857  __ Cbnz(w17, &nzt);
1858  __ B(&nzt_end);
1859  __ Bind(&nzt);
1860  __ Mov(x2, 1);
1861  __ Bind(&nzt_end);
1862 
1863  Label nzf, nzf_end;
1864  __ Cbnz(x16, &nzf);
1865  __ B(&nzf_end);
1866  __ Bind(&nzf);
1867  __ Mov(x3, 1);
1868  __ Bind(&nzf_end);
1869 
1870  __ Mov(x18, 0xffffffff00000000UL);
1871 
1872  Label a, a_end;
1873  __ Cbz(w18, &a);
1874  __ B(&a_end);
1875  __ Bind(&a);
1876  __ Mov(x4, 1);
1877  __ Bind(&a_end);
1878 
1879  Label b, b_end;
1880  __ Cbnz(w18, &b);
1881  __ B(&b_end);
1882  __ Bind(&b);
1883  __ Mov(x5, 1);
1884  __ Bind(&b_end);
1885 
1886  END();
1887 
1888  RUN();
1889 
1890  ASSERT_EQUAL_64(1, x0);
1891  ASSERT_EQUAL_64(0, x1);
1892  ASSERT_EQUAL_64(1, x2);
1893  ASSERT_EQUAL_64(0, x3);
1894  ASSERT_EQUAL_64(1, x4);
1895  ASSERT_EQUAL_64(0, x5);
1896 
1897  TEARDOWN();
1898 }
1899 
1900 
1901 TEST(test_branch) {
1902  INIT_V8();
1903  SETUP();
1904 
1905  START();
1906  __ Mov(x0, 0);
1907  __ Mov(x1, 0);
1908  __ Mov(x2, 0);
1909  __ Mov(x3, 0);
1910  __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1911 
1912  Label bz, bz_end;
1913  __ Tbz(w16, 0, &bz);
1914  __ B(&bz_end);
1915  __ Bind(&bz);
1916  __ Mov(x0, 1);
1917  __ Bind(&bz_end);
1918 
1919  Label bo, bo_end;
1920  __ Tbz(x16, 63, &bo);
1921  __ B(&bo_end);
1922  __ Bind(&bo);
1923  __ Mov(x1, 1);
1924  __ Bind(&bo_end);
1925 
1926  Label nbz, nbz_end;
1927  __ Tbnz(x16, 61, &nbz);
1928  __ B(&nbz_end);
1929  __ Bind(&nbz);
1930  __ Mov(x2, 1);
1931  __ Bind(&nbz_end);
1932 
1933  Label nbo, nbo_end;
1934  __ Tbnz(w16, 2, &nbo);
1935  __ B(&nbo_end);
1936  __ Bind(&nbo);
1937  __ Mov(x3, 1);
1938  __ Bind(&nbo_end);
1939  END();
1940 
1941  RUN();
1942 
1943  ASSERT_EQUAL_64(1, x0);
1944  ASSERT_EQUAL_64(0, x1);
1945  ASSERT_EQUAL_64(1, x2);
1946  ASSERT_EQUAL_64(0, x3);
1947 
1948  TEARDOWN();
1949 }
1950 
1951 
1952 TEST(far_branch_backward) {
1953  INIT_V8();
1954 
1955  // Test that the MacroAssembler correctly resolves backward branches to labels
1956  // that are outside the immediate range of branch instructions.
1957  int max_range =
1961 
1962  SETUP_SIZE(max_range + 1000 * kInstructionSize);
1963 
1964  START();
1965 
1966  Label done, fail;
1967  Label test_tbz, test_cbz, test_bcond;
1968  Label success_tbz, success_cbz, success_bcond;
1969 
1970  __ Mov(x0, 0);
1971  __ Mov(x1, 1);
1972  __ Mov(x10, 0);
1973 
1974  __ B(&test_tbz);
1975  __ Bind(&success_tbz);
1976  __ Orr(x0, x0, 1 << 0);
1977  __ B(&test_cbz);
1978  __ Bind(&success_cbz);
1979  __ Orr(x0, x0, 1 << 1);
1980  __ B(&test_bcond);
1981  __ Bind(&success_bcond);
1982  __ Orr(x0, x0, 1 << 2);
1983 
1984  __ B(&done);
1985 
1986  // Generate enough code to overflow the immediate range of the three types of
1987  // branches below.
1988  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1989  if (i % 100 == 0) {
1990  // If we do land in this code, we do not want to execute so many nops
1991  // before reaching the end of test (especially if tracing is activated).
1992  __ B(&fail);
1993  } else {
1994  __ Nop();
1995  }
1996  }
1997  __ B(&fail);
1998 
1999  __ Bind(&test_tbz);
2000  __ Tbz(x10, 7, &success_tbz);
2001  __ Bind(&test_cbz);
2002  __ Cbz(x10, &success_cbz);
2003  __ Bind(&test_bcond);
2004  __ Cmp(x10, 0);
2005  __ B(eq, &success_bcond);
2006 
2007  // For each out-of-range branch instructions, at least two instructions should
2008  // have been generated.
2009  CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2010 
2011  __ Bind(&fail);
2012  __ Mov(x1, 0);
2013  __ Bind(&done);
2014 
2015  END();
2016 
2017  RUN();
2018 
2019  ASSERT_EQUAL_64(0x7, x0);
2020  ASSERT_EQUAL_64(0x1, x1);
2021 
2022  TEARDOWN();
2023 }
2024 
2025 
2026 TEST(far_branch_simple_veneer) {
2027  INIT_V8();
2028 
2029  // Test that the MacroAssembler correctly emits veneers for forward branches
2030  // to labels that are outside the immediate range of branch instructions.
2031  int max_range =
2035 
2036  SETUP_SIZE(max_range + 1000 * kInstructionSize);
2037 
2038  START();
2039 
2040  Label done, fail;
2041  Label test_tbz, test_cbz, test_bcond;
2042  Label success_tbz, success_cbz, success_bcond;
2043 
2044  __ Mov(x0, 0);
2045  __ Mov(x1, 1);
2046  __ Mov(x10, 0);
2047 
2048  __ Bind(&test_tbz);
2049  __ Tbz(x10, 7, &success_tbz);
2050  __ Bind(&test_cbz);
2051  __ Cbz(x10, &success_cbz);
2052  __ Bind(&test_bcond);
2053  __ Cmp(x10, 0);
2054  __ B(eq, &success_bcond);
2055 
2056  // Generate enough code to overflow the immediate range of the three types of
2057  // branches below.
2058  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2059  if (i % 100 == 0) {
2060  // If we do land in this code, we do not want to execute so many nops
2061  // before reaching the end of test (especially if tracing is activated).
2062  // Also, the branches give the MacroAssembler the opportunity to emit the
2063  // veneers.
2064  __ B(&fail);
2065  } else {
2066  __ Nop();
2067  }
2068  }
2069  __ B(&fail);
2070 
2071  __ Bind(&success_tbz);
2072  __ Orr(x0, x0, 1 << 0);
2073  __ B(&test_cbz);
2074  __ Bind(&success_cbz);
2075  __ Orr(x0, x0, 1 << 1);
2076  __ B(&test_bcond);
2077  __ Bind(&success_bcond);
2078  __ Orr(x0, x0, 1 << 2);
2079 
2080  __ B(&done);
2081  __ Bind(&fail);
2082  __ Mov(x1, 0);
2083  __ Bind(&done);
2084 
2085  END();
2086 
2087  RUN();
2088 
2089  ASSERT_EQUAL_64(0x7, x0);
2090  ASSERT_EQUAL_64(0x1, x1);
2091 
2092  TEARDOWN();
2093 }
2094 
2095 
2096 TEST(far_branch_veneer_link_chain) {
2097  INIT_V8();
2098 
2099  // Test that the MacroAssembler correctly emits veneers for forward branches
2100  // that target out-of-range labels and are part of multiple instructions
2101  // jumping to that label.
2102  //
2103  // We test the three situations with the different types of instruction:
2104  // (1)- When the branch is at the start of the chain with tbz.
2105  // (2)- When the branch is in the middle of the chain with cbz.
2106  // (3)- When the branch is at the end of the chain with bcond.
2107  int max_range =
2111 
2112  SETUP_SIZE(max_range + 1000 * kInstructionSize);
2113 
2114  START();
2115 
2116  Label skip, fail, done;
2117  Label test_tbz, test_cbz, test_bcond;
2118  Label success_tbz, success_cbz, success_bcond;
2119 
2120  __ Mov(x0, 0);
2121  __ Mov(x1, 1);
2122  __ Mov(x10, 0);
2123 
2124  __ B(&skip);
2125  // Branches at the start of the chain for situations (2) and (3).
2126  __ B(&success_cbz);
2127  __ B(&success_bcond);
2128  __ Nop();
2129  __ B(&success_bcond);
2130  __ B(&success_cbz);
2131  __ Bind(&skip);
2132 
2133  __ Bind(&test_tbz);
2134  __ Tbz(x10, 7, &success_tbz);
2135  __ Bind(&test_cbz);
2136  __ Cbz(x10, &success_cbz);
2137  __ Bind(&test_bcond);
2138  __ Cmp(x10, 0);
2139  __ B(eq, &success_bcond);
2140 
2141  skip.Unuse();
2142  __ B(&skip);
2143  // Branches at the end of the chain for situations (1) and (2).
2144  __ B(&success_cbz);
2145  __ B(&success_tbz);
2146  __ Nop();
2147  __ B(&success_tbz);
2148  __ B(&success_cbz);
2149  __ Bind(&skip);
2150 
2151  // Generate enough code to overflow the immediate range of the three types of
2152  // branches below.
2153  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2154  if (i % 100 == 0) {
2155  // If we do land in this code, we do not want to execute so many nops
2156  // before reaching the end of test (especially if tracing is activated).
2157  // Also, the branches give the MacroAssembler the opportunity to emit the
2158  // veneers.
2159  __ B(&fail);
2160  } else {
2161  __ Nop();
2162  }
2163  }
2164  __ B(&fail);
2165 
2166  __ Bind(&success_tbz);
2167  __ Orr(x0, x0, 1 << 0);
2168  __ B(&test_cbz);
2169  __ Bind(&success_cbz);
2170  __ Orr(x0, x0, 1 << 1);
2171  __ B(&test_bcond);
2172  __ Bind(&success_bcond);
2173  __ Orr(x0, x0, 1 << 2);
2174 
2175  __ B(&done);
2176  __ Bind(&fail);
2177  __ Mov(x1, 0);
2178  __ Bind(&done);
2179 
2180  END();
2181 
2182  RUN();
2183 
2184  ASSERT_EQUAL_64(0x7, x0);
2185  ASSERT_EQUAL_64(0x1, x1);
2186 
2187  TEARDOWN();
2188 }
2189 
2190 
2191 TEST(far_branch_veneer_broken_link_chain) {
2192  INIT_V8();
2193 
2194  // Check that the MacroAssembler correctly handles the situation when removing
2195  // a branch from the link chain of a label and the two links on each side of
2196  // the removed branch cannot be linked together (out of range).
2197  //
2198  // We test with tbz because it has a small range.
2200  int inter_range = max_range / 2 + max_range / 10;
2201 
2202  SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2203 
2204  START();
2205 
2206  Label skip, fail, done;
2207  Label test_1, test_2, test_3;
2208  Label far_target;
2209 
2210  __ Mov(x0, 0); // Indicates the origin of the branch.
2211  __ Mov(x1, 1);
2212  __ Mov(x10, 0);
2213 
2214  // First instruction in the label chain.
2215  __ Bind(&test_1);
2216  __ Mov(x0, 1);
2217  __ B(&far_target);
2218 
2219  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2220  if (i % 100 == 0) {
2221  // Do not allow generating veneers. They should not be needed.
2222  __ b(&fail);
2223  } else {
2224  __ Nop();
2225  }
2226  }
2227 
2228  // Will need a veneer to point to reach the target.
2229  __ Bind(&test_2);
2230  __ Mov(x0, 2);
2231  __ Tbz(x10, 7, &far_target);
2232 
2233  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2234  if (i % 100 == 0) {
2235  // Do not allow generating veneers. They should not be needed.
2236  __ b(&fail);
2237  } else {
2238  __ Nop();
2239  }
2240  }
2241 
2242  // Does not need a veneer to reach the target, but the initial branch
2243  // instruction is out of range.
2244  __ Bind(&test_3);
2245  __ Mov(x0, 3);
2246  __ Tbz(x10, 7, &far_target);
2247 
2248  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2249  if (i % 100 == 0) {
2250  // Allow generating veneers.
2251  __ B(&fail);
2252  } else {
2253  __ Nop();
2254  }
2255  }
2256 
2257  __ B(&fail);
2258 
2259  __ Bind(&far_target);
2260  __ Cmp(x0, 1);
2261  __ B(eq, &test_2);
2262  __ Cmp(x0, 2);
2263  __ B(eq, &test_3);
2264 
2265  __ B(&done);
2266  __ Bind(&fail);
2267  __ Mov(x1, 0);
2268  __ Bind(&done);
2269 
2270  END();
2271 
2272  RUN();
2273 
2274  ASSERT_EQUAL_64(0x3, x0);
2275  ASSERT_EQUAL_64(0x1, x1);
2276 
2277  TEARDOWN();
2278 }
2279 
2280 
2281 TEST(branch_type) {
2282  INIT_V8();
2283 
2284  SETUP();
2285 
2286  Label fail, done;
2287 
2288  START();
2289  __ Mov(x0, 0x0);
2290  __ Mov(x10, 0x7);
2291  __ Mov(x11, 0x0);
2292 
2293  // Test non taken branches.
2294  __ Cmp(x10, 0x7);
2295  __ B(&fail, ne);
2296  __ B(&fail, never);
2297  __ B(&fail, reg_zero, x10);
2298  __ B(&fail, reg_not_zero, x11);
2299  __ B(&fail, reg_bit_clear, x10, 0);
2300  __ B(&fail, reg_bit_set, x10, 3);
2301 
2302  // Test taken branches.
2303  Label l1, l2, l3, l4, l5;
2304  __ Cmp(x10, 0x7);
2305  __ B(&l1, eq);
2306  __ B(&fail);
2307  __ Bind(&l1);
2308  __ B(&l2, always);
2309  __ B(&fail);
2310  __ Bind(&l2);
2311  __ B(&l3, reg_not_zero, x10);
2312  __ B(&fail);
2313  __ Bind(&l3);
2314  __ B(&l4, reg_bit_clear, x10, 15);
2315  __ B(&fail);
2316  __ Bind(&l4);
2317  __ B(&l5, reg_bit_set, x10, 1);
2318  __ B(&fail);
2319  __ Bind(&l5);
2320 
2321  __ B(&done);
2322 
2323  __ Bind(&fail);
2324  __ Mov(x0, 0x1);
2325 
2326  __ Bind(&done);
2327 
2328  END();
2329 
2330  RUN();
2331 
2332  ASSERT_EQUAL_64(0x0, x0);
2333 
2334  TEARDOWN();
2335 }
2336 
2337 
2338 TEST(ldr_str_offset) {
2339  INIT_V8();
2340  SETUP();
2341 
2342  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2343  uint64_t dst[5] = {0, 0, 0, 0, 0};
2344  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2345  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2346 
2347  START();
2348  __ Mov(x17, src_base);
2349  __ Mov(x18, dst_base);
2350  __ Ldr(w0, MemOperand(x17));
2351  __ Str(w0, MemOperand(x18));
2352  __ Ldr(w1, MemOperand(x17, 4));
2353  __ Str(w1, MemOperand(x18, 12));
2354  __ Ldr(x2, MemOperand(x17, 8));
2355  __ Str(x2, MemOperand(x18, 16));
2356  __ Ldrb(w3, MemOperand(x17, 1));
2357  __ Strb(w3, MemOperand(x18, 25));
2358  __ Ldrh(w4, MemOperand(x17, 2));
2359  __ Strh(w4, MemOperand(x18, 33));
2360  END();
2361 
2362  RUN();
2363 
2364  ASSERT_EQUAL_64(0x76543210, x0);
2365  ASSERT_EQUAL_64(0x76543210, dst[0]);
2366  ASSERT_EQUAL_64(0xfedcba98, x1);
2367  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2368  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2369  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2370  ASSERT_EQUAL_64(0x32, x3);
2371  ASSERT_EQUAL_64(0x3200, dst[3]);
2372  ASSERT_EQUAL_64(0x7654, x4);
2373  ASSERT_EQUAL_64(0x765400, dst[4]);
2374  ASSERT_EQUAL_64(src_base, x17);
2375  ASSERT_EQUAL_64(dst_base, x18);
2376 
2377  TEARDOWN();
2378 }
2379 
2380 
2381 TEST(ldr_str_wide) {
2382  INIT_V8();
2383  SETUP();
2384 
2385  uint32_t src[8192];
2386  uint32_t dst[8192];
2387  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2388  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2389  memset(src, 0xaa, 8192 * sizeof(src[0]));
2390  memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2391  src[0] = 0;
2392  src[6144] = 6144;
2393  src[8191] = 8191;
2394 
2395  START();
2396  __ Mov(x22, src_base);
2397  __ Mov(x23, dst_base);
2398  __ Mov(x24, src_base);
2399  __ Mov(x25, dst_base);
2400  __ Mov(x26, src_base);
2401  __ Mov(x27, dst_base);
2402 
2403  __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2404  __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2405  __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2406  __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2407  __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2408  __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2409  END();
2410 
2411  RUN();
2412 
2413  ASSERT_EQUAL_32(8191, w0);
2414  ASSERT_EQUAL_32(8191, dst[8191]);
2415  ASSERT_EQUAL_64(src_base, x22);
2416  ASSERT_EQUAL_64(dst_base, x23);
2417  ASSERT_EQUAL_32(0, w1);
2418  ASSERT_EQUAL_32(0, dst[0]);
2419  ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2420  ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2421  ASSERT_EQUAL_32(6144, w2);
2422  ASSERT_EQUAL_32(6144, dst[6144]);
2423  ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2424  ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2425 
2426  TEARDOWN();
2427 }
2428 
2429 
2430 TEST(ldr_str_preindex) {
2431  INIT_V8();
2432  SETUP();
2433 
2434  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2435  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2436  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2437  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2438 
2439  START();
2440  __ Mov(x17, src_base);
2441  __ Mov(x18, dst_base);
2442  __ Mov(x19, src_base);
2443  __ Mov(x20, dst_base);
2444  __ Mov(x21, src_base + 16);
2445  __ Mov(x22, dst_base + 40);
2446  __ Mov(x23, src_base);
2447  __ Mov(x24, dst_base);
2448  __ Mov(x25, src_base);
2449  __ Mov(x26, dst_base);
2450  __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2451  __ Str(w0, MemOperand(x18, 12, PreIndex));
2452  __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2453  __ Str(x1, MemOperand(x20, 16, PreIndex));
2454  __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2455  __ Str(w2, MemOperand(x22, -4, PreIndex));
2456  __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2457  __ Strb(w3, MemOperand(x24, 25, PreIndex));
2458  __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2459  __ Strh(w4, MemOperand(x26, 41, PreIndex));
2460  END();
2461 
2462  RUN();
2463 
2464  ASSERT_EQUAL_64(0xfedcba98, x0);
2465  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2466  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2467  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2468  ASSERT_EQUAL_64(0x01234567, x2);
2469  ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
2470  ASSERT_EQUAL_64(0x32, x3);
2471  ASSERT_EQUAL_64(0x3200, dst[3]);
2472  ASSERT_EQUAL_64(0x9876, x4);
2473  ASSERT_EQUAL_64(0x987600, dst[5]);
2474  ASSERT_EQUAL_64(src_base + 4, x17);
2475  ASSERT_EQUAL_64(dst_base + 12, x18);
2476  ASSERT_EQUAL_64(src_base + 8, x19);
2477  ASSERT_EQUAL_64(dst_base + 16, x20);
2478  ASSERT_EQUAL_64(src_base + 12, x21);
2479  ASSERT_EQUAL_64(dst_base + 36, x22);
2480  ASSERT_EQUAL_64(src_base + 1, x23);
2481  ASSERT_EQUAL_64(dst_base + 25, x24);
2482  ASSERT_EQUAL_64(src_base + 3, x25);
2483  ASSERT_EQUAL_64(dst_base + 41, x26);
2484 
2485  TEARDOWN();
2486 }
2487 
2488 
2489 TEST(ldr_str_postindex) {
2490  INIT_V8();
2491  SETUP();
2492 
2493  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2494  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2495  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2496  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2497 
2498  START();
2499  __ Mov(x17, src_base + 4);
2500  __ Mov(x18, dst_base + 12);
2501  __ Mov(x19, src_base + 8);
2502  __ Mov(x20, dst_base + 16);
2503  __ Mov(x21, src_base + 8);
2504  __ Mov(x22, dst_base + 32);
2505  __ Mov(x23, src_base + 1);
2506  __ Mov(x24, dst_base + 25);
2507  __ Mov(x25, src_base + 3);
2508  __ Mov(x26, dst_base + 41);
2509  __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2510  __ Str(w0, MemOperand(x18, 12, PostIndex));
2511  __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2512  __ Str(x1, MemOperand(x20, 16, PostIndex));
2513  __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2514  __ Str(x2, MemOperand(x22, -32, PostIndex));
2515  __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2516  __ Strb(w3, MemOperand(x24, 5, PostIndex));
2517  __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2518  __ Strh(w4, MemOperand(x26, -41, PostIndex));
2519  END();
2520 
2521  RUN();
2522 
2523  ASSERT_EQUAL_64(0xfedcba98, x0);
2524  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2525  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2526  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2527  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2528  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2529  ASSERT_EQUAL_64(0x32, x3);
2530  ASSERT_EQUAL_64(0x3200, dst[3]);
2531  ASSERT_EQUAL_64(0x9876, x4);
2532  ASSERT_EQUAL_64(0x987600, dst[5]);
2533  ASSERT_EQUAL_64(src_base + 8, x17);
2534  ASSERT_EQUAL_64(dst_base + 24, x18);
2535  ASSERT_EQUAL_64(src_base + 16, x19);
2536  ASSERT_EQUAL_64(dst_base + 32, x20);
2537  ASSERT_EQUAL_64(src_base, x21);
2538  ASSERT_EQUAL_64(dst_base, x22);
2539  ASSERT_EQUAL_64(src_base + 2, x23);
2540  ASSERT_EQUAL_64(dst_base + 30, x24);
2541  ASSERT_EQUAL_64(src_base, x25);
2542  ASSERT_EQUAL_64(dst_base, x26);
2543 
2544  TEARDOWN();
2545 }
2546 
2547 
2548 TEST(load_signed) {
2549  INIT_V8();
2550  SETUP();
2551 
2552  uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2553  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2554 
2555  START();
2556  __ Mov(x24, src_base);
2557  __ Ldrsb(w0, MemOperand(x24));
2558  __ Ldrsb(w1, MemOperand(x24, 4));
2559  __ Ldrsh(w2, MemOperand(x24));
2560  __ Ldrsh(w3, MemOperand(x24, 4));
2561  __ Ldrsb(x4, MemOperand(x24));
2562  __ Ldrsb(x5, MemOperand(x24, 4));
2563  __ Ldrsh(x6, MemOperand(x24));
2564  __ Ldrsh(x7, MemOperand(x24, 4));
2565  __ Ldrsw(x8, MemOperand(x24));
2566  __ Ldrsw(x9, MemOperand(x24, 4));
2567  END();
2568 
2569  RUN();
2570 
2571  ASSERT_EQUAL_64(0xffffff80, x0);
2572  ASSERT_EQUAL_64(0x0000007f, x1);
2573  ASSERT_EQUAL_64(0xffff8080, x2);
2574  ASSERT_EQUAL_64(0x00007f7f, x3);
2575  ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
2576  ASSERT_EQUAL_64(0x000000000000007fUL, x5);
2577  ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
2578  ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
2579  ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
2580  ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
2581 
2582  TEARDOWN();
2583 }
2584 
2585 
2586 TEST(load_store_regoffset) {
2587  INIT_V8();
2588  SETUP();
2589 
2590  uint32_t src[3] = {1, 2, 3};
2591  uint32_t dst[4] = {0, 0, 0, 0};
2592  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2593  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2594 
2595  START();
2596  __ Mov(x16, src_base);
2597  __ Mov(x17, dst_base);
2598  __ Mov(x18, src_base + 3 * sizeof(src[0]));
2599  __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2600  __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2601  __ Mov(x24, 0);
2602  __ Mov(x25, 4);
2603  __ Mov(x26, -4);
2604  __ Mov(x27, 0xfffffffc); // 32-bit -4.
2605  __ Mov(x28, 0xfffffffe); // 32-bit -2.
2606  __ Mov(x29, 0xffffffff); // 32-bit -1.
2607 
2608  __ Ldr(w0, MemOperand(x16, x24));
2609  __ Ldr(x1, MemOperand(x16, x25));
2610  __ Ldr(w2, MemOperand(x18, x26));
2611  __ Ldr(w3, MemOperand(x18, x27, SXTW));
2612  __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2613  __ Str(w0, MemOperand(x17, x24));
2614  __ Str(x1, MemOperand(x17, x25));
2615  __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2616  END();
2617 
2618  RUN();
2619 
2620  ASSERT_EQUAL_64(1, x0);
2621  ASSERT_EQUAL_64(0x0000000300000002UL, x1);
2622  ASSERT_EQUAL_64(3, x2);
2623  ASSERT_EQUAL_64(3, x3);
2624  ASSERT_EQUAL_64(2, x4);
2625  ASSERT_EQUAL_32(1, dst[0]);
2626  ASSERT_EQUAL_32(2, dst[1]);
2627  ASSERT_EQUAL_32(3, dst[2]);
2628  ASSERT_EQUAL_32(3, dst[3]);
2629 
2630  TEARDOWN();
2631 }
2632 
2633 
2634 TEST(load_store_float) {
2635  INIT_V8();
2636  SETUP();
2637 
2638  float src[3] = {1.0, 2.0, 3.0};
2639  float dst[3] = {0.0, 0.0, 0.0};
2640  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2641  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2642 
2643  START();
2644  __ Mov(x17, src_base);
2645  __ Mov(x18, dst_base);
2646  __ Mov(x19, src_base);
2647  __ Mov(x20, dst_base);
2648  __ Mov(x21, src_base);
2649  __ Mov(x22, dst_base);
2650  __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2651  __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2652  __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2653  __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2654  __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2655  __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2656  END();
2657 
2658  RUN();
2659 
2660  ASSERT_EQUAL_FP32(2.0, s0);
2661  ASSERT_EQUAL_FP32(2.0, dst[0]);
2662  ASSERT_EQUAL_FP32(1.0, s1);
2663  ASSERT_EQUAL_FP32(1.0, dst[2]);
2664  ASSERT_EQUAL_FP32(3.0, s2);
2665  ASSERT_EQUAL_FP32(3.0, dst[1]);
2666  ASSERT_EQUAL_64(src_base, x17);
2667  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2668  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2669  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2670  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2671  ASSERT_EQUAL_64(dst_base, x22);
2672 
2673  TEARDOWN();
2674 }
2675 
2676 
2677 TEST(load_store_double) {
2678  INIT_V8();
2679  SETUP();
2680 
2681  double src[3] = {1.0, 2.0, 3.0};
2682  double dst[3] = {0.0, 0.0, 0.0};
2683  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2684  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2685 
2686  START();
2687  __ Mov(x17, src_base);
2688  __ Mov(x18, dst_base);
2689  __ Mov(x19, src_base);
2690  __ Mov(x20, dst_base);
2691  __ Mov(x21, src_base);
2692  __ Mov(x22, dst_base);
2693  __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2694  __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2695  __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2696  __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2697  __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2698  __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2699  END();
2700 
2701  RUN();
2702 
2703  ASSERT_EQUAL_FP64(2.0, d0);
2704  ASSERT_EQUAL_FP64(2.0, dst[0]);
2705  ASSERT_EQUAL_FP64(1.0, d1);
2706  ASSERT_EQUAL_FP64(1.0, dst[2]);
2707  ASSERT_EQUAL_FP64(3.0, d2);
2708  ASSERT_EQUAL_FP64(3.0, dst[1]);
2709  ASSERT_EQUAL_64(src_base, x17);
2710  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2711  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2712  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2713  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2714  ASSERT_EQUAL_64(dst_base, x22);
2715 
2716  TEARDOWN();
2717 }
2718 
2719 
2720 TEST(ldp_stp_float) {
2721  INIT_V8();
2722  SETUP();
2723 
2724  float src[2] = {1.0, 2.0};
2725  float dst[3] = {0.0, 0.0, 0.0};
2726  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2727  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2728 
2729  START();
2730  __ Mov(x16, src_base);
2731  __ Mov(x17, dst_base);
2732  __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2733  __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2734  END();
2735 
2736  RUN();
2737 
2738  ASSERT_EQUAL_FP32(1.0, s31);
2739  ASSERT_EQUAL_FP32(2.0, s0);
2740  ASSERT_EQUAL_FP32(0.0, dst[0]);
2741  ASSERT_EQUAL_FP32(2.0, dst[1]);
2742  ASSERT_EQUAL_FP32(1.0, dst[2]);
2743  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2744  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2745 
2746  TEARDOWN();
2747 }
2748 
2749 
2750 TEST(ldp_stp_double) {
2751  INIT_V8();
2752  SETUP();
2753 
2754  double src[2] = {1.0, 2.0};
2755  double dst[3] = {0.0, 0.0, 0.0};
2756  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2757  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2758 
2759  START();
2760  __ Mov(x16, src_base);
2761  __ Mov(x17, dst_base);
2762  __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2763  __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2764  END();
2765 
2766  RUN();
2767 
2768  ASSERT_EQUAL_FP64(1.0, d31);
2769  ASSERT_EQUAL_FP64(2.0, d0);
2770  ASSERT_EQUAL_FP64(0.0, dst[0]);
2771  ASSERT_EQUAL_FP64(2.0, dst[1]);
2772  ASSERT_EQUAL_FP64(1.0, dst[2]);
2773  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2774  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2775 
2776  TEARDOWN();
2777 }
2778 
2779 
2780 TEST(ldp_stp_offset) {
2781  INIT_V8();
2782  SETUP();
2783 
2784  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2785  0xffeeddccbbaa9988UL};
2786  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2787  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2788  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2789 
2790  START();
2791  __ Mov(x16, src_base);
2792  __ Mov(x17, dst_base);
2793  __ Mov(x18, src_base + 24);
2794  __ Mov(x19, dst_base + 56);
2795  __ Ldp(w0, w1, MemOperand(x16));
2796  __ Ldp(w2, w3, MemOperand(x16, 4));
2797  __ Ldp(x4, x5, MemOperand(x16, 8));
2798  __ Ldp(w6, w7, MemOperand(x18, -12));
2799  __ Ldp(x8, x9, MemOperand(x18, -16));
2800  __ Stp(w0, w1, MemOperand(x17));
2801  __ Stp(w2, w3, MemOperand(x17, 8));
2802  __ Stp(x4, x5, MemOperand(x17, 16));
2803  __ Stp(w6, w7, MemOperand(x19, -24));
2804  __ Stp(x8, x9, MemOperand(x19, -16));
2805  END();
2806 
2807  RUN();
2808 
2809  ASSERT_EQUAL_64(0x44556677, x0);
2810  ASSERT_EQUAL_64(0x00112233, x1);
2811  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2812  ASSERT_EQUAL_64(0x00112233, x2);
2813  ASSERT_EQUAL_64(0xccddeeff, x3);
2814  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2815  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2816  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2817  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2818  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2819  ASSERT_EQUAL_64(0x8899aabb, x6);
2820  ASSERT_EQUAL_64(0xbbaa9988, x7);
2821  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2822  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2823  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2824  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2825  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2826  ASSERT_EQUAL_64(src_base, x16);
2827  ASSERT_EQUAL_64(dst_base, x17);
2828  ASSERT_EQUAL_64(src_base + 24, x18);
2829  ASSERT_EQUAL_64(dst_base + 56, x19);
2830 
2831  TEARDOWN();
2832 }
2833 
2834 
2835 TEST(ldnp_stnp_offset) {
2836  INIT_V8();
2837  SETUP();
2838 
2839  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2840  0xffeeddccbbaa9988UL};
2841  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2842  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2843  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2844 
2845  START();
2846  __ Mov(x16, src_base);
2847  __ Mov(x17, dst_base);
2848  __ Mov(x18, src_base + 24);
2849  __ Mov(x19, dst_base + 56);
2850  __ Ldnp(w0, w1, MemOperand(x16));
2851  __ Ldnp(w2, w3, MemOperand(x16, 4));
2852  __ Ldnp(x4, x5, MemOperand(x16, 8));
2853  __ Ldnp(w6, w7, MemOperand(x18, -12));
2854  __ Ldnp(x8, x9, MemOperand(x18, -16));
2855  __ Stnp(w0, w1, MemOperand(x17));
2856  __ Stnp(w2, w3, MemOperand(x17, 8));
2857  __ Stnp(x4, x5, MemOperand(x17, 16));
2858  __ Stnp(w6, w7, MemOperand(x19, -24));
2859  __ Stnp(x8, x9, MemOperand(x19, -16));
2860  END();
2861 
2862  RUN();
2863 
2864  ASSERT_EQUAL_64(0x44556677, x0);
2865  ASSERT_EQUAL_64(0x00112233, x1);
2866  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2867  ASSERT_EQUAL_64(0x00112233, x2);
2868  ASSERT_EQUAL_64(0xccddeeff, x3);
2869  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2870  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2871  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2872  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2873  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2874  ASSERT_EQUAL_64(0x8899aabb, x6);
2875  ASSERT_EQUAL_64(0xbbaa9988, x7);
2876  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2877  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2878  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2879  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2880  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2881  ASSERT_EQUAL_64(src_base, x16);
2882  ASSERT_EQUAL_64(dst_base, x17);
2883  ASSERT_EQUAL_64(src_base + 24, x18);
2884  ASSERT_EQUAL_64(dst_base + 56, x19);
2885 
2886  TEARDOWN();
2887 }
2888 
2889 
2890 TEST(ldp_stp_preindex) {
2891  INIT_V8();
2892  SETUP();
2893 
2894  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2895  0xffeeddccbbaa9988UL};
2896  uint64_t dst[5] = {0, 0, 0, 0, 0};
2897  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2898  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2899 
2900  START();
2901  __ Mov(x16, src_base);
2902  __ Mov(x17, dst_base);
2903  __ Mov(x18, dst_base + 16);
2904  __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2905  __ Mov(x19, x16);
2906  __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2907  __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2908  __ Mov(x20, x17);
2909  __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2910  __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2911  __ Mov(x21, x16);
2912  __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2913  __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2914  __ Mov(x22, x18);
2915  __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2916  END();
2917 
2918  RUN();
2919 
2920  ASSERT_EQUAL_64(0x00112233, x0);
2921  ASSERT_EQUAL_64(0xccddeeff, x1);
2922  ASSERT_EQUAL_64(0x44556677, x2);
2923  ASSERT_EQUAL_64(0x00112233, x3);
2924  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
2925  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2926  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2927  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2928  ASSERT_EQUAL_64(0x0011223344556677UL, x6);
2929  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
2930  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2931  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2932  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2933  ASSERT_EQUAL_64(src_base, x16);
2934  ASSERT_EQUAL_64(dst_base, x17);
2935  ASSERT_EQUAL_64(dst_base + 16, x18);
2936  ASSERT_EQUAL_64(src_base + 4, x19);
2937  ASSERT_EQUAL_64(dst_base + 4, x20);
2938  ASSERT_EQUAL_64(src_base + 8, x21);
2939  ASSERT_EQUAL_64(dst_base + 24, x22);
2940 
2941  TEARDOWN();
2942 }
2943 
2944 
2945 TEST(ldp_stp_postindex) {
2946  INIT_V8();
2947  SETUP();
2948 
2949  uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2950  0xffeeddccbbaa9988UL, 0x7766554433221100UL};
2951  uint64_t dst[5] = {0, 0, 0, 0, 0};
2952  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2953  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2954 
2955  START();
2956  __ Mov(x16, src_base);
2957  __ Mov(x17, dst_base);
2958  __ Mov(x18, dst_base + 16);
2959  __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
2960  __ Mov(x19, x16);
2961  __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
2962  __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
2963  __ Mov(x20, x17);
2964  __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
2965  __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
2966  __ Mov(x21, x16);
2967  __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
2968  __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
2969  __ Mov(x22, x18);
2970  __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
2971  END();
2972 
2973  RUN();
2974 
2975  ASSERT_EQUAL_64(0x44556677, x0);
2976  ASSERT_EQUAL_64(0x00112233, x1);
2977  ASSERT_EQUAL_64(0x00112233, x2);
2978  ASSERT_EQUAL_64(0xccddeeff, x3);
2979  ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
2980  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2981  ASSERT_EQUAL_64(0x0011223344556677UL, x4);
2982  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
2983  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
2984  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
2985  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2986  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
2987  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
2988  ASSERT_EQUAL_64(src_base, x16);
2989  ASSERT_EQUAL_64(dst_base, x17);
2990  ASSERT_EQUAL_64(dst_base + 16, x18);
2991  ASSERT_EQUAL_64(src_base + 4, x19);
2992  ASSERT_EQUAL_64(dst_base + 4, x20);
2993  ASSERT_EQUAL_64(src_base + 8, x21);
2994  ASSERT_EQUAL_64(dst_base + 24, x22);
2995 
2996  TEARDOWN();
2997 }
2998 
2999 
3000 TEST(ldp_sign_extend) {
3001  INIT_V8();
3002  SETUP();
3003 
3004  uint32_t src[2] = {0x80000000, 0x7fffffff};
3005  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3006 
3007  START();
3008  __ Mov(x24, src_base);
3009  __ Ldpsw(x0, x1, MemOperand(x24));
3010  END();
3011 
3012  RUN();
3013 
3014  ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
3015  ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
3016 
3017  TEARDOWN();
3018 }
3019 
3020 
3021 TEST(ldur_stur) {
3022  INIT_V8();
3023  SETUP();
3024 
3025  int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3026  int64_t dst[5] = {0, 0, 0, 0, 0};
3027  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3028  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3029 
3030  START();
3031  __ Mov(x17, src_base);
3032  __ Mov(x18, dst_base);
3033  __ Mov(x19, src_base + 16);
3034  __ Mov(x20, dst_base + 32);
3035  __ Mov(x21, dst_base + 40);
3036  __ Ldr(w0, MemOperand(x17, 1));
3037  __ Str(w0, MemOperand(x18, 2));
3038  __ Ldr(x1, MemOperand(x17, 3));
3039  __ Str(x1, MemOperand(x18, 9));
3040  __ Ldr(w2, MemOperand(x19, -9));
3041  __ Str(w2, MemOperand(x20, -5));
3042  __ Ldrb(w3, MemOperand(x19, -1));
3043  __ Strb(w3, MemOperand(x21, -1));
3044  END();
3045 
3046  RUN();
3047 
3048  ASSERT_EQUAL_64(0x6789abcd, x0);
3049  ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
3050  ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
3051  ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
3052  ASSERT_EQUAL_64(0x000000ab, dst[2]);
3053  ASSERT_EQUAL_64(0xabcdef01, x2);
3054  ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
3055  ASSERT_EQUAL_64(0x00000001, x3);
3056  ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
3057  ASSERT_EQUAL_64(src_base, x17);
3058  ASSERT_EQUAL_64(dst_base, x18);
3059  ASSERT_EQUAL_64(src_base + 16, x19);
3060  ASSERT_EQUAL_64(dst_base + 32, x20);
3061 
3062  TEARDOWN();
3063 }
3064 
3065 
3066 #if 0 // TODO(all) enable.
3067 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3068 TEST(ldr_literal) {
3069  INIT_V8();
3070  SETUP();
3071 
3072  START();
3073  __ Ldr(x2, 0x1234567890abcdefUL);
3074  __ Ldr(w3, 0xfedcba09);
3075  __ Ldr(d13, 1.234);
3076  __ Ldr(s25, 2.5);
3077  END();
3078 
3079  RUN();
3080 
3081  ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
3082  ASSERT_EQUAL_64(0xfedcba09, x3);
3083  ASSERT_EQUAL_FP64(1.234, d13);
3084  ASSERT_EQUAL_FP32(2.5, s25);
3085 
3086  TEARDOWN();
3087 }
3088 
3089 
3090 static void LdrLiteralRangeHelper(ptrdiff_t range_,
3091  LiteralPoolEmitOption option,
3092  bool expect_dump) {
3093  ASSERT(range_ > 0);
3094  SETUP_SIZE(range_ + 1024);
3095 
3096  Label label_1, label_2;
3097 
3098  size_t range = static_cast<size_t>(range_);
3099  size_t code_size = 0;
3100  size_t pool_guard_size;
3101 
3102  if (option == NoJumpRequired) {
3103  // Space for an explicit branch.
3104  pool_guard_size = sizeof(Instr);
3105  } else {
3106  pool_guard_size = 0;
3107  }
3108 
3109  START();
3110  // Force a pool dump so the pool starts off empty.
3111  __ EmitLiteralPool(JumpRequired);
3113 
3114  __ Ldr(x0, 0x1234567890abcdefUL);
3115  __ Ldr(w1, 0xfedcba09);
3116  __ Ldr(d0, 1.234);
3117  __ Ldr(s1, 2.5);
3119 
3120  code_size += 4 * sizeof(Instr);
3121 
3122  // Check that the requested range (allowing space for a branch over the pool)
3123  // can be handled by this test.
3124  ASSERT((code_size + pool_guard_size) <= range);
3125 
3126  // Emit NOPs up to 'range', leaving space for the pool guard.
3127  while ((code_size + pool_guard_size) < range) {
3128  __ Nop();
3129  code_size += sizeof(Instr);
3130  }
3131 
3132  // Emit the guard sequence before the literal pool.
3133  if (option == NoJumpRequired) {
3134  __ B(&label_1);
3135  code_size += sizeof(Instr);
3136  }
3137 
3138  ASSERT(code_size == range);
3140 
3141  // Possibly generate a literal pool.
3142  __ CheckLiteralPool(option);
3143  __ Bind(&label_1);
3144  if (expect_dump) {
3146  } else {
3148  }
3149 
3150  // Force a pool flush to check that a second pool functions correctly.
3151  __ EmitLiteralPool(JumpRequired);
3153 
3154  // These loads should be after the pool (and will require a new one).
3155  __ Ldr(x4, 0x34567890abcdef12UL);
3156  __ Ldr(w5, 0xdcba09fe);
3157  __ Ldr(d4, 123.4);
3158  __ Ldr(s5, 250.0);
3160  END();
3161 
3162  RUN();
3163 
3164  // Check that the literals loaded correctly.
3165  ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
3166  ASSERT_EQUAL_64(0xfedcba09, x1);
3167  ASSERT_EQUAL_FP64(1.234, d0);
3168  ASSERT_EQUAL_FP32(2.5, s1);
3169  ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
3170  ASSERT_EQUAL_64(0xdcba09fe, x5);
3171  ASSERT_EQUAL_FP64(123.4, d4);
3172  ASSERT_EQUAL_FP32(250.0, s5);
3173 
3174  TEARDOWN();
3175 }
3176 
3177 
3178 TEST(ldr_literal_range_1) {
3179  INIT_V8();
3180  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3181  NoJumpRequired,
3182  true);
3183 }
3184 
3185 
3186 TEST(ldr_literal_range_2) {
3187  INIT_V8();
3188  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3189  NoJumpRequired,
3190  false);
3191 }
3192 
3193 
3194 TEST(ldr_literal_range_3) {
3195  INIT_V8();
3196  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3197  JumpRequired,
3198  true);
3199 }
3200 
3201 
3202 TEST(ldr_literal_range_4) {
3203  INIT_V8();
3204  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3205  JumpRequired,
3206  false);
3207 }
3208 
3209 
3210 TEST(ldr_literal_range_5) {
3211  INIT_V8();
3212  LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3213  JumpRequired,
3214  false);
3215 }
3216 
3217 
3218 TEST(ldr_literal_range_6) {
3219  INIT_V8();
3220  LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3221  JumpRequired,
3222  false);
3223 }
3224 #endif
3225 
3226 TEST(add_sub_imm) {
3227  INIT_V8();
3228  SETUP();
3229 
3230  START();
3231  __ Mov(x0, 0x0);
3232  __ Mov(x1, 0x1111);
3233  __ Mov(x2, 0xffffffffffffffffL);
3234  __ Mov(x3, 0x8000000000000000L);
3235 
3236  __ Add(x10, x0, Operand(0x123));
3237  __ Add(x11, x1, Operand(0x122000));
3238  __ Add(x12, x0, Operand(0xabc << 12));
3239  __ Add(x13, x2, Operand(1));
3240 
3241  __ Add(w14, w0, Operand(0x123));
3242  __ Add(w15, w1, Operand(0x122000));
3243  __ Add(w16, w0, Operand(0xabc << 12));
3244  __ Add(w17, w2, Operand(1));
3245 
3246  __ Sub(x20, x0, Operand(0x1));
3247  __ Sub(x21, x1, Operand(0x111));
3248  __ Sub(x22, x1, Operand(0x1 << 12));
3249  __ Sub(x23, x3, Operand(1));
3250 
3251  __ Sub(w24, w0, Operand(0x1));
3252  __ Sub(w25, w1, Operand(0x111));
3253  __ Sub(w26, w1, Operand(0x1 << 12));
3254  __ Sub(w27, w3, Operand(1));
3255  END();
3256 
3257  RUN();
3258 
3259  ASSERT_EQUAL_64(0x123, x10);
3260  ASSERT_EQUAL_64(0x123111, x11);
3261  ASSERT_EQUAL_64(0xabc000, x12);
3262  ASSERT_EQUAL_64(0x0, x13);
3263 
3264  ASSERT_EQUAL_32(0x123, w14);
3265  ASSERT_EQUAL_32(0x123111, w15);
3266  ASSERT_EQUAL_32(0xabc000, w16);
3267  ASSERT_EQUAL_32(0x0, w17);
3268 
3269  ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
3270  ASSERT_EQUAL_64(0x1000, x21);
3271  ASSERT_EQUAL_64(0x111, x22);
3272  ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
3273 
3274  ASSERT_EQUAL_32(0xffffffff, w24);
3275  ASSERT_EQUAL_32(0x1000, w25);
3276  ASSERT_EQUAL_32(0x111, w26);
3277  ASSERT_EQUAL_32(0xffffffff, w27);
3278 
3279  TEARDOWN();
3280 }
3281 
3282 
3283 TEST(add_sub_wide_imm) {
3284  INIT_V8();
3285  SETUP();
3286 
3287  START();
3288  __ Mov(x0, 0x0);
3289  __ Mov(x1, 0x1);
3290 
3291  __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3292  __ Add(x11, x1, Operand(0xffffffff));
3293 
3294  __ Add(w12, w0, Operand(0x12345678));
3295  __ Add(w13, w1, Operand(0xffffffff));
3296 
3297  __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3298 
3299  __ Sub(w21, w0, Operand(0x12345678));
3300  END();
3301 
3302  RUN();
3303 
3304  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
3305  ASSERT_EQUAL_64(0x100000000UL, x11);
3306 
3307  ASSERT_EQUAL_32(0x12345678, w12);
3308  ASSERT_EQUAL_64(0x0, x13);
3309 
3310  ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
3311 
3312  ASSERT_EQUAL_32(-0x12345678, w21);
3313 
3314  TEARDOWN();
3315 }
3316 
3317 
3318 TEST(add_sub_shifted) {
3319  INIT_V8();
3320  SETUP();
3321 
3322  START();
3323  __ Mov(x0, 0);
3324  __ Mov(x1, 0x0123456789abcdefL);
3325  __ Mov(x2, 0xfedcba9876543210L);
3326  __ Mov(x3, 0xffffffffffffffffL);
3327 
3328  __ Add(x10, x1, Operand(x2));
3329  __ Add(x11, x0, Operand(x1, LSL, 8));
3330  __ Add(x12, x0, Operand(x1, LSR, 8));
3331  __ Add(x13, x0, Operand(x1, ASR, 8));
3332  __ Add(x14, x0, Operand(x2, ASR, 8));
3333  __ Add(w15, w0, Operand(w1, ASR, 8));
3334  __ Add(w18, w3, Operand(w1, ROR, 8));
3335  __ Add(x19, x3, Operand(x1, ROR, 8));
3336 
3337  __ Sub(x20, x3, Operand(x2));
3338  __ Sub(x21, x3, Operand(x1, LSL, 8));
3339  __ Sub(x22, x3, Operand(x1, LSR, 8));
3340  __ Sub(x23, x3, Operand(x1, ASR, 8));
3341  __ Sub(x24, x3, Operand(x2, ASR, 8));
3342  __ Sub(w25, w3, Operand(w1, ASR, 8));
3343  __ Sub(w26, w3, Operand(w1, ROR, 8));
3344  __ Sub(x27, x3, Operand(x1, ROR, 8));
3345  END();
3346 
3347  RUN();
3348 
3349  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3350  ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
3351  ASSERT_EQUAL_64(0x000123456789abcdL, x12);
3352  ASSERT_EQUAL_64(0x000123456789abcdL, x13);
3353  ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
3354  ASSERT_EQUAL_64(0xff89abcd, x15);
3355  ASSERT_EQUAL_64(0xef89abcc, x18);
3356  ASSERT_EQUAL_64(0xef0123456789abccL, x19);
3357 
3358  ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
3359  ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
3360  ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
3361  ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
3362  ASSERT_EQUAL_64(0x000123456789abcdL, x24);
3363  ASSERT_EQUAL_64(0x00765432, x25);
3364  ASSERT_EQUAL_64(0x10765432, x26);
3365  ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
3366 
3367  TEARDOWN();
3368 }
3369 
3370 
3371 TEST(add_sub_extended) {
3372  INIT_V8();
3373  SETUP();
3374 
3375  START();
3376  __ Mov(x0, 0);
3377  __ Mov(x1, 0x0123456789abcdefL);
3378  __ Mov(x2, 0xfedcba9876543210L);
3379  __ Mov(w3, 0x80);
3380 
3381  __ Add(x10, x0, Operand(x1, UXTB, 0));
3382  __ Add(x11, x0, Operand(x1, UXTB, 1));
3383  __ Add(x12, x0, Operand(x1, UXTH, 2));
3384  __ Add(x13, x0, Operand(x1, UXTW, 4));
3385 
3386  __ Add(x14, x0, Operand(x1, SXTB, 0));
3387  __ Add(x15, x0, Operand(x1, SXTB, 1));
3388  __ Add(x16, x0, Operand(x1, SXTH, 2));
3389  __ Add(x17, x0, Operand(x1, SXTW, 3));
3390  __ Add(x18, x0, Operand(x2, SXTB, 0));
3391  __ Add(x19, x0, Operand(x2, SXTB, 1));
3392  __ Add(x20, x0, Operand(x2, SXTH, 2));
3393  __ Add(x21, x0, Operand(x2, SXTW, 3));
3394 
3395  __ Add(x22, x1, Operand(x2, SXTB, 1));
3396  __ Sub(x23, x1, Operand(x2, SXTB, 1));
3397 
3398  __ Add(w24, w1, Operand(w2, UXTB, 2));
3399  __ Add(w25, w0, Operand(w1, SXTB, 0));
3400  __ Add(w26, w0, Operand(w1, SXTB, 1));
3401  __ Add(w27, w2, Operand(w1, SXTW, 3));
3402 
3403  __ Add(w28, w0, Operand(w1, SXTW, 3));
3404  __ Add(x29, x0, Operand(w1, SXTW, 3));
3405 
3406  __ Sub(x30, x0, Operand(w3, SXTB, 1));
3407  END();
3408 
3409  RUN();
3410 
3411  ASSERT_EQUAL_64(0xefL, x10);
3412  ASSERT_EQUAL_64(0x1deL, x11);
3413  ASSERT_EQUAL_64(0x337bcL, x12);
3414  ASSERT_EQUAL_64(0x89abcdef0L, x13);
3415 
3416  ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
3417  ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
3418  ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
3419  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3420  ASSERT_EQUAL_64(0x10L, x18);
3421  ASSERT_EQUAL_64(0x20L, x19);
3422  ASSERT_EQUAL_64(0xc840L, x20);
3423  ASSERT_EQUAL_64(0x3b2a19080L, x21);
3424 
3425  ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
3426  ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
3427 
3428  ASSERT_EQUAL_32(0x89abce2f, w24);
3429  ASSERT_EQUAL_32(0xffffffef, w25);
3430  ASSERT_EQUAL_32(0xffffffde, w26);
3431  ASSERT_EQUAL_32(0xc3b2a188, w27);
3432 
3433  ASSERT_EQUAL_32(0x4d5e6f78, w28);
3434  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3435 
3436  ASSERT_EQUAL_64(256, x30);
3437 
3438  TEARDOWN();
3439 }
3440 
3441 
3442 TEST(add_sub_negative) {
3443  INIT_V8();
3444  SETUP();
3445 
3446  START();
3447  __ Mov(x0, 0);
3448  __ Mov(x1, 4687);
3449  __ Mov(x2, 0x1122334455667788);
3450  __ Mov(w3, 0x11223344);
3451  __ Mov(w4, 400000);
3452 
3453  __ Add(x10, x0, -42);
3454  __ Add(x11, x1, -687);
3455  __ Add(x12, x2, -0x88);
3456 
3457  __ Sub(x13, x0, -600);
3458  __ Sub(x14, x1, -313);
3459  __ Sub(x15, x2, -0x555);
3460 
3461  __ Add(w19, w3, -0x344);
3462  __ Add(w20, w4, -2000);
3463 
3464  __ Sub(w21, w3, -0xbc);
3465  __ Sub(w22, w4, -2000);
3466  END();
3467 
3468  RUN();
3469 
3470  ASSERT_EQUAL_64(-42, x10);
3471  ASSERT_EQUAL_64(4000, x11);
3472  ASSERT_EQUAL_64(0x1122334455667700, x12);
3473 
3474  ASSERT_EQUAL_64(600, x13);
3475  ASSERT_EQUAL_64(5000, x14);
3476  ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3477 
3478  ASSERT_EQUAL_32(0x11223000, w19);
3479  ASSERT_EQUAL_32(398000, w20);
3480 
3481  ASSERT_EQUAL_32(0x11223400, w21);
3482  ASSERT_EQUAL_32(402000, w22);
3483 
3484  TEARDOWN();
3485 }
3486 
3487 
3488 TEST(add_sub_zero) {
3489  INIT_V8();
3490  SETUP();
3491 
3492  START();
3493  __ Mov(x0, 0);
3494  __ Mov(x1, 0);
3495  __ Mov(x2, 0);
3496 
3497  Label blob1;
3498  __ Bind(&blob1);
3499  __ Add(x0, x0, 0);
3500  __ Sub(x1, x1, 0);
3501  __ Sub(x2, x2, xzr);
3502  CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
3503 
3504  Label blob2;
3505  __ Bind(&blob2);
3506  __ Add(w3, w3, 0);
3507  CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
3508 
3509  Label blob3;
3510  __ Bind(&blob3);
3511  __ Sub(w3, w3, wzr);
3512  CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
3513 
3514  END();
3515 
3516  RUN();
3517 
3518  ASSERT_EQUAL_64(0, x0);
3519  ASSERT_EQUAL_64(0, x1);
3520  ASSERT_EQUAL_64(0, x2);
3521 
3522  TEARDOWN();
3523 }
3524 
3525 
3526 TEST(claim_drop_zero) {
3527  INIT_V8();
3528  SETUP();
3529 
3530  START();
3531 
3532  Label start;
3533  __ Bind(&start);
3534  __ Claim(0);
3535  __ Drop(0);
3536  __ Claim(xzr, 8);
3537  __ Drop(xzr, 8);
3538  __ Claim(xzr, 0);
3539  __ Drop(xzr, 0);
3540  __ Claim(x7, 0);
3541  __ Drop(x7, 0);
3542  __ ClaimBySMI(xzr, 8);
3543  __ DropBySMI(xzr, 8);
3544  __ ClaimBySMI(xzr, 0);
3545  __ DropBySMI(xzr, 0);
3546  CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
3547 
3548  END();
3549 
3550  RUN();
3551 
3552  TEARDOWN();
3553 }
3554 
3555 
3556 TEST(neg) {
3557  INIT_V8();
3558  SETUP();
3559 
3560  START();
3561  __ Mov(x0, 0xf123456789abcdefL);
3562 
3563  // Immediate.
3564  __ Neg(x1, 0x123);
3565  __ Neg(w2, 0x123);
3566 
3567  // Shifted.
3568  __ Neg(x3, Operand(x0, LSL, 1));
3569  __ Neg(w4, Operand(w0, LSL, 2));
3570  __ Neg(x5, Operand(x0, LSR, 3));
3571  __ Neg(w6, Operand(w0, LSR, 4));
3572  __ Neg(x7, Operand(x0, ASR, 5));
3573  __ Neg(w8, Operand(w0, ASR, 6));
3574 
3575  // Extended.
3576  __ Neg(w9, Operand(w0, UXTB));
3577  __ Neg(x10, Operand(x0, SXTB, 1));
3578  __ Neg(w11, Operand(w0, UXTH, 2));
3579  __ Neg(x12, Operand(x0, SXTH, 3));
3580  __ Neg(w13, Operand(w0, UXTW, 4));
3581  __ Neg(x14, Operand(x0, SXTW, 4));
3582  END();
3583 
3584  RUN();
3585 
3586  ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
3587  ASSERT_EQUAL_64(0xfffffedd, x2);
3588  ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
3589  ASSERT_EQUAL_64(0xd950c844, x4);
3590  ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
3591  ASSERT_EQUAL_64(0xf7654322, x6);
3592  ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3593  ASSERT_EQUAL_64(0x01d950c9, x8);
3594  ASSERT_EQUAL_64(0xffffff11, x9);
3595  ASSERT_EQUAL_64(0x0000000000000022UL, x10);
3596  ASSERT_EQUAL_64(0xfffcc844, x11);
3597  ASSERT_EQUAL_64(0x0000000000019088UL, x12);
3598  ASSERT_EQUAL_64(0x65432110, x13);
3599  ASSERT_EQUAL_64(0x0000000765432110UL, x14);
3600 
3601  TEARDOWN();
3602 }
3603 
3604 
3605 TEST(adc_sbc_shift) {
3606  INIT_V8();
3607  SETUP();
3608 
3609  START();
3610  __ Mov(x0, 0);
3611  __ Mov(x1, 1);
3612  __ Mov(x2, 0x0123456789abcdefL);
3613  __ Mov(x3, 0xfedcba9876543210L);
3614  __ Mov(x4, 0xffffffffffffffffL);
3615 
3616  // Clear the C flag.
3617  __ Adds(x0, x0, Operand(0));
3618 
3619  __ Adc(x5, x2, Operand(x3));
3620  __ Adc(x6, x0, Operand(x1, LSL, 60));
3621  __ Sbc(x7, x4, Operand(x3, LSR, 4));
3622  __ Adc(x8, x2, Operand(x3, ASR, 4));
3623  __ Adc(x9, x2, Operand(x3, ROR, 8));
3624 
3625  __ Adc(w10, w2, Operand(w3));
3626  __ Adc(w11, w0, Operand(w1, LSL, 30));
3627  __ Sbc(w12, w4, Operand(w3, LSR, 4));
3628  __ Adc(w13, w2, Operand(w3, ASR, 4));
3629  __ Adc(w14, w2, Operand(w3, ROR, 8));
3630 
3631  // Set the C flag.
3632  __ Cmp(w0, Operand(w0));
3633 
3634  __ Adc(x18, x2, Operand(x3));
3635  __ Adc(x19, x0, Operand(x1, LSL, 60));
3636  __ Sbc(x20, x4, Operand(x3, LSR, 4));
3637  __ Adc(x21, x2, Operand(x3, ASR, 4));
3638  __ Adc(x22, x2, Operand(x3, ROR, 8));
3639 
3640  __ Adc(w23, w2, Operand(w3));
3641  __ Adc(w24, w0, Operand(w1, LSL, 30));
3642  __ Sbc(w25, w4, Operand(w3, LSR, 4));
3643  __ Adc(w26, w2, Operand(w3, ASR, 4));
3644  __ Adc(w27, w2, Operand(w3, ROR, 8));
3645  END();
3646 
3647  RUN();
3648 
3649  ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
3650  ASSERT_EQUAL_64(1L << 60, x6);
3651  ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
3652  ASSERT_EQUAL_64(0x0111111111111110L, x8);
3653  ASSERT_EQUAL_64(0x1222222222222221L, x9);
3654 
3655  ASSERT_EQUAL_32(0xffffffff, w10);
3656  ASSERT_EQUAL_32(1 << 30, w11);
3657  ASSERT_EQUAL_32(0xf89abcdd, w12);
3658  ASSERT_EQUAL_32(0x91111110, w13);
3659  ASSERT_EQUAL_32(0x9a222221, w14);
3660 
3661  ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
3662  ASSERT_EQUAL_64((1L << 60) + 1, x19);
3663  ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
3664  ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
3665  ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
3666 
3667  ASSERT_EQUAL_32(0xffffffff + 1, w23);
3668  ASSERT_EQUAL_32((1 << 30) + 1, w24);
3669  ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3670  ASSERT_EQUAL_32(0x91111110 + 1, w26);
3671  ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3672 
3673  // Check that adc correctly sets the condition flags.
3674  START();
3675  __ Mov(x0, 1);
3676  __ Mov(x1, 0xffffffffffffffffL);
3677  // Clear the C flag.
3678  __ Adds(x0, x0, Operand(0));
3679  __ Adcs(x10, x0, Operand(x1));
3680  END();
3681 
3682  RUN();
3683 
3685  ASSERT_EQUAL_64(0, x10);
3686 
3687  START();
3688  __ Mov(x0, 1);
3689  __ Mov(x1, 0x8000000000000000L);
3690  // Clear the C flag.
3691  __ Adds(x0, x0, Operand(0));
3692  __ Adcs(x10, x0, Operand(x1, ASR, 63));
3693  END();
3694 
3695  RUN();
3696 
3698  ASSERT_EQUAL_64(0, x10);
3699 
3700  START();
3701  __ Mov(x0, 0x10);
3702  __ Mov(x1, 0x07ffffffffffffffL);
3703  // Clear the C flag.
3704  __ Adds(x0, x0, Operand(0));
3705  __ Adcs(x10, x0, Operand(x1, LSL, 4));
3706  END();
3707 
3708  RUN();
3709 
3711  ASSERT_EQUAL_64(0x8000000000000000L, x10);
3712 
3713  // Check that sbc correctly sets the condition flags.
3714  START();
3715  __ Mov(x0, 0);
3716  __ Mov(x1, 0xffffffffffffffffL);
3717  // Clear the C flag.
3718  __ Adds(x0, x0, Operand(0));
3719  __ Sbcs(x10, x0, Operand(x1));
3720  END();
3721 
3722  RUN();
3723 
3725  ASSERT_EQUAL_64(0, x10);
3726 
3727  START();
3728  __ Mov(x0, 1);
3729  __ Mov(x1, 0xffffffffffffffffL);
3730  // Clear the C flag.
3731  __ Adds(x0, x0, Operand(0));
3732  __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3733  END();
3734 
3735  RUN();
3736 
3738  ASSERT_EQUAL_64(0x8000000000000001L, x10);
3739 
3740  START();
3741  __ Mov(x0, 0);
3742  // Clear the C flag.
3743  __ Adds(x0, x0, Operand(0));
3744  __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
3745  END();
3746 
3747  RUN();
3748 
3750  ASSERT_EQUAL_64(0, x10);
3751 
3752  START()
3753  __ Mov(w0, 0x7fffffff);
3754  // Clear the C flag.
3755  __ Adds(x0, x0, Operand(0));
3756  __ Ngcs(w10, w0);
3757  END();
3758 
3759  RUN();
3760 
3762  ASSERT_EQUAL_64(0x80000000, x10);
3763 
3764  START();
3765  // Clear the C flag.
3766  __ Adds(x0, x0, Operand(0));
3767  __ Ngcs(x10, 0x7fffffffffffffffL);
3768  END();
3769 
3770  RUN();
3771 
3773  ASSERT_EQUAL_64(0x8000000000000000L, x10);
3774 
3775  START()
3776  __ Mov(x0, 0);
3777  // Set the C flag.
3778  __ Cmp(x0, Operand(x0));
3779  __ Sbcs(x10, x0, Operand(1));
3780  END();
3781 
3782  RUN();
3783 
3785  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3786 
3787  START()
3788  __ Mov(x0, 0);
3789  // Set the C flag.
3790  __ Cmp(x0, Operand(x0));
3791  __ Ngcs(x10, 0x7fffffffffffffffL);
3792  END();
3793 
3794  RUN();
3795 
3797  ASSERT_EQUAL_64(0x8000000000000001L, x10);
3798 
3799  TEARDOWN();
3800 }
3801 
3802 
3803 TEST(adc_sbc_extend) {
3804  INIT_V8();
3805  SETUP();
3806 
3807  START();
3808  // Clear the C flag.
3809  __ Adds(x0, x0, Operand(0));
3810 
3811  __ Mov(x0, 0);
3812  __ Mov(x1, 1);
3813  __ Mov(x2, 0x0123456789abcdefL);
3814 
3815  __ Adc(x10, x1, Operand(w2, UXTB, 1));
3816  __ Adc(x11, x1, Operand(x2, SXTH, 2));
3817  __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3818  __ Adc(x13, x1, Operand(x2, UXTX, 4));
3819 
3820  __ Adc(w14, w1, Operand(w2, UXTB, 1));
3821  __ Adc(w15, w1, Operand(w2, SXTH, 2));
3822  __ Adc(w9, w1, Operand(w2, UXTW, 4));
3823 
3824  // Set the C flag.
3825  __ Cmp(w0, Operand(w0));
3826 
3827  __ Adc(x20, x1, Operand(w2, UXTB, 1));
3828  __ Adc(x21, x1, Operand(x2, SXTH, 2));
3829  __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3830  __ Adc(x23, x1, Operand(x2, UXTX, 4));
3831 
3832  __ Adc(w24, w1, Operand(w2, UXTB, 1));
3833  __ Adc(w25, w1, Operand(w2, SXTH, 2));
3834  __ Adc(w26, w1, Operand(w2, UXTW, 4));
3835  END();
3836 
3837  RUN();
3838 
3839  ASSERT_EQUAL_64(0x1df, x10);
3840  ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
3841  ASSERT_EQUAL_64(0xfffffff765432110L, x12);
3842  ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
3843 
3844  ASSERT_EQUAL_32(0x1df, w14);
3845  ASSERT_EQUAL_32(0xffff37bd, w15);
3846  ASSERT_EQUAL_32(0x9abcdef1, w9);
3847 
3848  ASSERT_EQUAL_64(0x1df + 1, x20);
3849  ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
3850  ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
3851  ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
3852 
3853  ASSERT_EQUAL_32(0x1df + 1, w24);
3854  ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3855  ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3856 
3857  // Check that adc correctly sets the condition flags.
3858  START();
3859  __ Mov(x0, 0xff);
3860  __ Mov(x1, 0xffffffffffffffffL);
3861  // Clear the C flag.
3862  __ Adds(x0, x0, Operand(0));
3863  __ Adcs(x10, x0, Operand(x1, SXTX, 1));
3864  END();
3865 
3866  RUN();
3867 
3869 
3870  START();
3871  __ Mov(x0, 0x7fffffffffffffffL);
3872  __ Mov(x1, 1);
3873  // Clear the C flag.
3874  __ Adds(x0, x0, Operand(0));
3875  __ Adcs(x10, x0, Operand(x1, UXTB, 2));
3876  END();
3877 
3878  RUN();
3879 
3881 
3882  START();
3883  __ Mov(x0, 0x7fffffffffffffffL);
3884  // Clear the C flag.
3885  __ Adds(x0, x0, Operand(0));
3886  __ Adcs(x10, x0, Operand(1));
3887  END();
3888 
3889  RUN();
3890 
3892 
3893  TEARDOWN();
3894 }
3895 
3896 
3897 TEST(adc_sbc_wide_imm) {
3898  INIT_V8();
3899  SETUP();
3900 
3901  START();
3902  __ Mov(x0, 0);
3903 
3904  // Clear the C flag.
3905  __ Adds(x0, x0, Operand(0));
3906 
3907  __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
3908  __ Adc(w8, w0, Operand(0xffffffff));
3909  __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
3910  __ Sbc(w10, w0, Operand(0xffffffff));
3911  __ Ngc(x11, Operand(0xffffffff00000000UL));
3912  __ Ngc(w12, Operand(0xffff0000));
3913 
3914  // Set the C flag.
3915  __ Cmp(w0, Operand(w0));
3916 
3917  __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
3918  __ Adc(w19, w0, Operand(0xffffffff));
3919  __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
3920  __ Sbc(w21, w0, Operand(0xffffffff));
3921  __ Ngc(x22, Operand(0xffffffff00000000UL));
3922  __ Ngc(w23, Operand(0xffff0000));
3923  END();
3924 
3925  RUN();
3926 
3927  ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
3928  ASSERT_EQUAL_64(0xffffffff, x8);
3929  ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
3930  ASSERT_EQUAL_64(0, x10);
3931  ASSERT_EQUAL_64(0xffffffff, x11);
3932  ASSERT_EQUAL_64(0xffff, x12);
3933 
3934  ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
3935  ASSERT_EQUAL_64(0, x19);
3936  ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
3937  ASSERT_EQUAL_64(1, x21);
3938  ASSERT_EQUAL_64(0x100000000UL, x22);
3939  ASSERT_EQUAL_64(0x10000, x23);
3940 
3941  TEARDOWN();
3942 }
3943 
3944 
3946  INIT_V8();
3947  SETUP();
3948 
3949  START();
3950  __ Mov(x0, 0);
3951  __ Mov(x1, 0x1111111111111111L);
3952  __ Neg(x10, Operand(x0));
3953  __ Neg(x11, Operand(x1));
3954  __ Neg(w12, Operand(w1));
3955  // Clear the C flag.
3956  __ Adds(x0, x0, Operand(0));
3957  __ Ngc(x13, Operand(x0));
3958  // Set the C flag.
3959  __ Cmp(x0, Operand(x0));
3960  __ Ngc(w14, Operand(w0));
3961  END();
3962 
3963  RUN();
3964 
3965  ASSERT_EQUAL_64(0, x10);
3966  ASSERT_EQUAL_64(-0x1111111111111111L, x11);
3967  ASSERT_EQUAL_32(-0x11111111, w12);
3968  ASSERT_EQUAL_64(-1L, x13);
3969  ASSERT_EQUAL_32(0, w14);
3970 
3971  START();
3972  __ Mov(x0, 0);
3973  __ Cmp(x0, Operand(x0));
3974  END();
3975 
3976  RUN();
3977 
3979 
3980  START();
3981  __ Mov(w0, 0);
3982  __ Cmp(w0, Operand(w0));
3983  END();
3984 
3985  RUN();
3986 
3988 
3989  START();
3990  __ Mov(x0, 0);
3991  __ Mov(x1, 0x1111111111111111L);
3992  __ Cmp(x0, Operand(x1));
3993  END();
3994 
3995  RUN();
3996 
3998 
3999  START();
4000  __ Mov(w0, 0);
4001  __ Mov(w1, 0x11111111);
4002  __ Cmp(w0, Operand(w1));
4003  END();
4004 
4005  RUN();
4006 
4008 
4009  START();
4010  __ Mov(x1, 0x1111111111111111L);
4011  __ Cmp(x1, Operand(0));
4012  END();
4013 
4014  RUN();
4015 
4017 
4018  START();
4019  __ Mov(w1, 0x11111111);
4020  __ Cmp(w1, Operand(0));
4021  END();
4022 
4023  RUN();
4024 
4026 
4027  START();
4028  __ Mov(x0, 1);
4029  __ Mov(x1, 0x7fffffffffffffffL);
4030  __ Cmn(x1, Operand(x0));
4031  END();
4032 
4033  RUN();
4034 
4036 
4037  START();
4038  __ Mov(w0, 1);
4039  __ Mov(w1, 0x7fffffff);
4040  __ Cmn(w1, Operand(w0));
4041  END();
4042 
4043  RUN();
4044 
4046 
4047  START();
4048  __ Mov(x0, 1);
4049  __ Mov(x1, 0xffffffffffffffffL);
4050  __ Cmn(x1, Operand(x0));
4051  END();
4052 
4053  RUN();
4054 
4056 
4057  START();
4058  __ Mov(w0, 1);
4059  __ Mov(w1, 0xffffffff);
4060  __ Cmn(w1, Operand(w0));
4061  END();
4062 
4063  RUN();
4064 
4066 
4067  START();
4068  __ Mov(w0, 0);
4069  __ Mov(w1, 1);
4070  // Clear the C flag.
4071  __ Adds(w0, w0, Operand(0));
4072  __ Ngcs(w0, Operand(w1));
4073  END();
4074 
4075  RUN();
4076 
4078 
4079  START();
4080  __ Mov(w0, 0);
4081  __ Mov(w1, 0);
4082  // Set the C flag.
4083  __ Cmp(w0, Operand(w0));
4084  __ Ngcs(w0, Operand(w1));
4085  END();
4086 
4087  RUN();
4088 
4090 
4091  TEARDOWN();
4092 }
4093 
4094 
4095 TEST(cmp_shift) {
4096  INIT_V8();
4097  SETUP();
4098 
4099  START();
4100  __ Mov(x18, 0xf0000000);
4101  __ Mov(x19, 0xf000000010000000UL);
4102  __ Mov(x20, 0xf0000000f0000000UL);
4103  __ Mov(x21, 0x7800000078000000UL);
4104  __ Mov(x22, 0x3c0000003c000000UL);
4105  __ Mov(x23, 0x8000000780000000UL);
4106  __ Mov(x24, 0x0000000f00000000UL);
4107  __ Mov(x25, 0x00000003c0000000UL);
4108  __ Mov(x26, 0x8000000780000000UL);
4109  __ Mov(x27, 0xc0000003);
4110 
4111  __ Cmp(w20, Operand(w21, LSL, 1));
4112  __ Mrs(x0, NZCV);
4113 
4114  __ Cmp(x20, Operand(x22, LSL, 2));
4115  __ Mrs(x1, NZCV);
4116 
4117  __ Cmp(w19, Operand(w23, LSR, 3));
4118  __ Mrs(x2, NZCV);
4119 
4120  __ Cmp(x18, Operand(x24, LSR, 4));
4121  __ Mrs(x3, NZCV);
4122 
4123  __ Cmp(w20, Operand(w25, ASR, 2));
4124  __ Mrs(x4, NZCV);
4125 
4126  __ Cmp(x20, Operand(x26, ASR, 3));
4127  __ Mrs(x5, NZCV);
4128 
4129  __ Cmp(w27, Operand(w22, ROR, 28));
4130  __ Mrs(x6, NZCV);
4131 
4132  __ Cmp(x20, Operand(x21, ROR, 31));
4133  __ Mrs(x7, NZCV);
4134  END();
4135 
4136  RUN();
4137 
4138  ASSERT_EQUAL_32(ZCFlag, w0);
4139  ASSERT_EQUAL_32(ZCFlag, w1);
4140  ASSERT_EQUAL_32(ZCFlag, w2);
4141  ASSERT_EQUAL_32(ZCFlag, w3);
4142  ASSERT_EQUAL_32(ZCFlag, w4);
4143  ASSERT_EQUAL_32(ZCFlag, w5);
4144  ASSERT_EQUAL_32(ZCFlag, w6);
4145  ASSERT_EQUAL_32(ZCFlag, w7);
4146 
4147  TEARDOWN();
4148 }
4149 
4150 
4151 TEST(cmp_extend) {
4152  INIT_V8();
4153  SETUP();
4154 
4155  START();
4156  __ Mov(w20, 0x2);
4157  __ Mov(w21, 0x1);
4158  __ Mov(x22, 0xffffffffffffffffUL);
4159  __ Mov(x23, 0xff);
4160  __ Mov(x24, 0xfffffffffffffffeUL);
4161  __ Mov(x25, 0xffff);
4162  __ Mov(x26, 0xffffffff);
4163 
4164  __ Cmp(w20, Operand(w21, LSL, 1));
4165  __ Mrs(x0, NZCV);
4166 
4167  __ Cmp(x22, Operand(x23, SXTB, 0));
4168  __ Mrs(x1, NZCV);
4169 
4170  __ Cmp(x24, Operand(x23, SXTB, 1));
4171  __ Mrs(x2, NZCV);
4172 
4173  __ Cmp(x24, Operand(x23, UXTB, 1));
4174  __ Mrs(x3, NZCV);
4175 
4176  __ Cmp(w22, Operand(w25, UXTH));
4177  __ Mrs(x4, NZCV);
4178 
4179  __ Cmp(x22, Operand(x25, SXTH));
4180  __ Mrs(x5, NZCV);
4181 
4182  __ Cmp(x22, Operand(x26, UXTW));
4183  __ Mrs(x6, NZCV);
4184 
4185  __ Cmp(x24, Operand(x26, SXTW, 1));
4186  __ Mrs(x7, NZCV);
4187  END();
4188 
4189  RUN();
4190 
4191  ASSERT_EQUAL_32(ZCFlag, w0);
4192  ASSERT_EQUAL_32(ZCFlag, w1);
4193  ASSERT_EQUAL_32(ZCFlag, w2);
4194  ASSERT_EQUAL_32(NCFlag, w3);
4195  ASSERT_EQUAL_32(NCFlag, w4);
4196  ASSERT_EQUAL_32(ZCFlag, w5);
4197  ASSERT_EQUAL_32(NCFlag, w6);
4198  ASSERT_EQUAL_32(ZCFlag, w7);
4199 
4200  TEARDOWN();
4201 }
4202 
4203 
4204 TEST(ccmp) {
4205  INIT_V8();
4206  SETUP();
4207 
4208  START();
4209  __ Mov(w16, 0);
4210  __ Mov(w17, 1);
4211  __ Cmp(w16, w16);
4212  __ Ccmp(w16, w17, NCFlag, eq);
4213  __ Mrs(x0, NZCV);
4214 
4215  __ Cmp(w16, w16);
4216  __ Ccmp(w16, w17, NCFlag, ne);
4217  __ Mrs(x1, NZCV);
4218 
4219  __ Cmp(x16, x16);
4220  __ Ccmn(x16, 2, NZCVFlag, eq);
4221  __ Mrs(x2, NZCV);
4222 
4223  __ Cmp(x16, x16);
4224  __ Ccmn(x16, 2, NZCVFlag, ne);
4225  __ Mrs(x3, NZCV);
4226 
4227  __ ccmp(x16, x16, NZCVFlag, al);
4228  __ Mrs(x4, NZCV);
4229 
4230  __ ccmp(x16, x16, NZCVFlag, nv);
4231  __ Mrs(x5, NZCV);
4232 
4233  END();
4234 
4235  RUN();
4236 
4237  ASSERT_EQUAL_32(NFlag, w0);
4238  ASSERT_EQUAL_32(NCFlag, w1);
4239  ASSERT_EQUAL_32(NoFlag, w2);
4241  ASSERT_EQUAL_32(ZCFlag, w4);
4242  ASSERT_EQUAL_32(ZCFlag, w5);
4243 
4244  TEARDOWN();
4245 }
4246 
4247 
4248 TEST(ccmp_wide_imm) {
4249  INIT_V8();
4250  SETUP();
4251 
4252  START();
4253  __ Mov(w20, 0);
4254 
4255  __ Cmp(w20, Operand(w20));
4256  __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4257  __ Mrs(x0, NZCV);
4258 
4259  __ Cmp(w20, Operand(w20));
4260  __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4261  __ Mrs(x1, NZCV);
4262  END();
4263 
4264  RUN();
4265 
4266  ASSERT_EQUAL_32(NFlag, w0);
4267  ASSERT_EQUAL_32(NoFlag, w1);
4268 
4269  TEARDOWN();
4270 }
4271 
4272 
4273 TEST(ccmp_shift_extend) {
4274  INIT_V8();
4275  SETUP();
4276 
4277  START();
4278  __ Mov(w20, 0x2);
4279  __ Mov(w21, 0x1);
4280  __ Mov(x22, 0xffffffffffffffffUL);
4281  __ Mov(x23, 0xff);
4282  __ Mov(x24, 0xfffffffffffffffeUL);
4283 
4284  __ Cmp(w20, Operand(w20));
4285  __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4286  __ Mrs(x0, NZCV);
4287 
4288  __ Cmp(w20, Operand(w20));
4289  __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4290  __ Mrs(x1, NZCV);
4291 
4292  __ Cmp(w20, Operand(w20));
4293  __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4294  __ Mrs(x2, NZCV);
4295 
4296  __ Cmp(w20, Operand(w20));
4297  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4298  __ Mrs(x3, NZCV);
4299 
4300  __ Cmp(w20, Operand(w20));
4301  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4302  __ Mrs(x4, NZCV);
4303  END();
4304 
4305  RUN();
4306 
4307  ASSERT_EQUAL_32(ZCFlag, w0);
4308  ASSERT_EQUAL_32(ZCFlag, w1);
4309  ASSERT_EQUAL_32(ZCFlag, w2);
4310  ASSERT_EQUAL_32(NCFlag, w3);
4312 
4313  TEARDOWN();
4314 }
4315 
4316 
4317 TEST(csel) {
4318  INIT_V8();
4319  SETUP();
4320 
4321  START();
4322  __ Mov(x16, 0);
4323  __ Mov(x24, 0x0000000f0000000fUL);
4324  __ Mov(x25, 0x0000001f0000001fUL);
4325  __ Mov(x26, 0);
4326  __ Mov(x27, 0);
4327 
4328  __ Cmp(w16, 0);
4329  __ Csel(w0, w24, w25, eq);
4330  __ Csel(w1, w24, w25, ne);
4331  __ Csinc(w2, w24, w25, mi);
4332  __ Csinc(w3, w24, w25, pl);
4333 
4334  __ csel(w13, w24, w25, al);
4335  __ csel(x14, x24, x25, nv);
4336 
4337  __ Cmp(x16, 1);
4338  __ Csinv(x4, x24, x25, gt);
4339  __ Csinv(x5, x24, x25, le);
4340  __ Csneg(x6, x24, x25, hs);
4341  __ Csneg(x7, x24, x25, lo);
4342 
4343  __ Cset(w8, ne);
4344  __ Csetm(w9, ne);
4345  __ Cinc(x10, x25, ne);
4346  __ Cinv(x11, x24, ne);
4347  __ Cneg(x12, x24, ne);
4348 
4349  __ csel(w15, w24, w25, al);
4350  __ csel(x18, x24, x25, nv);
4351 
4352  __ CzeroX(x24, ne);
4353  __ CzeroX(x25, eq);
4354 
4355  __ CmovX(x26, x25, ne);
4356  __ CmovX(x27, x25, eq);
4357  END();
4358 
4359  RUN();
4360 
4361  ASSERT_EQUAL_64(0x0000000f, x0);
4362  ASSERT_EQUAL_64(0x0000001f, x1);
4363  ASSERT_EQUAL_64(0x00000020, x2);
4364  ASSERT_EQUAL_64(0x0000000f, x3);
4365  ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4366  ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
4367  ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4368  ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
4369  ASSERT_EQUAL_64(0x00000001, x8);
4370  ASSERT_EQUAL_64(0xffffffff, x9);
4371  ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
4372  ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
4373  ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
4374  ASSERT_EQUAL_64(0x0000000f, x13);
4375  ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
4376  ASSERT_EQUAL_64(0x0000000f, x15);
4377  ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
4378  ASSERT_EQUAL_64(0, x24);
4379  ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
4380  ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
4381  ASSERT_EQUAL_64(0, x27);
4382 
4383  TEARDOWN();
4384 }
4385 
4386 
4387 TEST(csel_imm) {
4388  INIT_V8();
4389  SETUP();
4390 
4391  START();
4392  __ Mov(x18, 0);
4393  __ Mov(x19, 0x80000000);
4394  __ Mov(x20, 0x8000000000000000UL);
4395 
4396  __ Cmp(x18, Operand(0));
4397  __ Csel(w0, w19, -2, ne);
4398  __ Csel(w1, w19, -1, ne);
4399  __ Csel(w2, w19, 0, ne);
4400  __ Csel(w3, w19, 1, ne);
4401  __ Csel(w4, w19, 2, ne);
4402  __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4403  __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4404  __ Csel(w7, w19, 3, eq);
4405 
4406  __ Csel(x8, x20, -2, ne);
4407  __ Csel(x9, x20, -1, ne);
4408  __ Csel(x10, x20, 0, ne);
4409  __ Csel(x11, x20, 1, ne);
4410  __ Csel(x12, x20, 2, ne);
4411  __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4412  __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4413  __ Csel(x15, x20, 3, eq);
4414 
4415  END();
4416 
4417  RUN();
4418 
4419  ASSERT_EQUAL_32(-2, w0);
4420  ASSERT_EQUAL_32(-1, w1);
4421  ASSERT_EQUAL_32(0, w2);
4422  ASSERT_EQUAL_32(1, w3);
4423  ASSERT_EQUAL_32(2, w4);
4424  ASSERT_EQUAL_32(-1, w5);
4425  ASSERT_EQUAL_32(0x40000000, w6);
4426  ASSERT_EQUAL_32(0x80000000, w7);
4427 
4428  ASSERT_EQUAL_64(-2, x8);
4429  ASSERT_EQUAL_64(-1, x9);
4430  ASSERT_EQUAL_64(0, x10);
4431  ASSERT_EQUAL_64(1, x11);
4432  ASSERT_EQUAL_64(2, x12);
4433  ASSERT_EQUAL_64(-1, x13);
4434  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
4435  ASSERT_EQUAL_64(0x8000000000000000UL, x15);
4436 
4437  TEARDOWN();
4438 }
4439 
4440 
4441 TEST(lslv) {
4442  INIT_V8();
4443  SETUP();
4444 
4445  uint64_t value = 0x0123456789abcdefUL;
4446  int shift[] = {1, 3, 5, 9, 17, 33};
4447 
4448  START();
4449  __ Mov(x0, value);
4450  __ Mov(w1, shift[0]);
4451  __ Mov(w2, shift[1]);
4452  __ Mov(w3, shift[2]);
4453  __ Mov(w4, shift[3]);
4454  __ Mov(w5, shift[4]);
4455  __ Mov(w6, shift[5]);
4456 
4457  __ lslv(x0, x0, xzr);
4458 
4459  __ Lsl(x16, x0, x1);
4460  __ Lsl(x17, x0, x2);
4461  __ Lsl(x18, x0, x3);
4462  __ Lsl(x19, x0, x4);
4463  __ Lsl(x20, x0, x5);
4464  __ Lsl(x21, x0, x6);
4465 
4466  __ Lsl(w22, w0, w1);
4467  __ Lsl(w23, w0, w2);
4468  __ Lsl(w24, w0, w3);
4469  __ Lsl(w25, w0, w4);
4470  __ Lsl(w26, w0, w5);
4471  __ Lsl(w27, w0, w6);
4472  END();
4473 
4474  RUN();
4475 
4476  ASSERT_EQUAL_64(value, x0);
4477  ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4478  ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4479  ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4480  ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4481  ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4482  ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4483  ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4484  ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4485  ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4486  ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4487  ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4488  ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4489 
4490  TEARDOWN();
4491 }
4492 
4493 
4494 TEST(lsrv) {
4495  INIT_V8();
4496  SETUP();
4497 
4498  uint64_t value = 0x0123456789abcdefUL;
4499  int shift[] = {1, 3, 5, 9, 17, 33};
4500 
4501  START();
4502  __ Mov(x0, value);
4503  __ Mov(w1, shift[0]);
4504  __ Mov(w2, shift[1]);
4505  __ Mov(w3, shift[2]);
4506  __ Mov(w4, shift[3]);
4507  __ Mov(w5, shift[4]);
4508  __ Mov(w6, shift[5]);
4509 
4510  __ lsrv(x0, x0, xzr);
4511 
4512  __ Lsr(x16, x0, x1);
4513  __ Lsr(x17, x0, x2);
4514  __ Lsr(x18, x0, x3);
4515  __ Lsr(x19, x0, x4);
4516  __ Lsr(x20, x0, x5);
4517  __ Lsr(x21, x0, x6);
4518 
4519  __ Lsr(w22, w0, w1);
4520  __ Lsr(w23, w0, w2);
4521  __ Lsr(w24, w0, w3);
4522  __ Lsr(w25, w0, w4);
4523  __ Lsr(w26, w0, w5);
4524  __ Lsr(w27, w0, w6);
4525  END();
4526 
4527  RUN();
4528 
4529  ASSERT_EQUAL_64(value, x0);
4530  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4531  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4532  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4533  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4534  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4535  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4536 
4537  value &= 0xffffffffUL;
4538  ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4539  ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4540  ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4541  ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4542  ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4543  ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4544 
4545  TEARDOWN();
4546 }
4547 
4548 
4549 TEST(asrv) {
4550  INIT_V8();
4551  SETUP();
4552 
4553  int64_t value = 0xfedcba98fedcba98UL;
4554  int shift[] = {1, 3, 5, 9, 17, 33};
4555 
4556  START();
4557  __ Mov(x0, value);
4558  __ Mov(w1, shift[0]);
4559  __ Mov(w2, shift[1]);
4560  __ Mov(w3, shift[2]);
4561  __ Mov(w4, shift[3]);
4562  __ Mov(w5, shift[4]);
4563  __ Mov(w6, shift[5]);
4564 
4565  __ asrv(x0, x0, xzr);
4566 
4567  __ Asr(x16, x0, x1);
4568  __ Asr(x17, x0, x2);
4569  __ Asr(x18, x0, x3);
4570  __ Asr(x19, x0, x4);
4571  __ Asr(x20, x0, x5);
4572  __ Asr(x21, x0, x6);
4573 
4574  __ Asr(w22, w0, w1);
4575  __ Asr(w23, w0, w2);
4576  __ Asr(w24, w0, w3);
4577  __ Asr(w25, w0, w4);
4578  __ Asr(w26, w0, w5);
4579  __ Asr(w27, w0, w6);
4580  END();
4581 
4582  RUN();
4583 
4584  ASSERT_EQUAL_64(value, x0);
4585  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4586  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4587  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4588  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4589  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4590  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4591 
4592  int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4593  ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4594  ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4595  ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4596  ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4597  ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4598  ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4599 
4600  TEARDOWN();
4601 }
4602 
4603 
4604 TEST(rorv) {
4605  INIT_V8();
4606  SETUP();
4607 
4608  uint64_t value = 0x0123456789abcdefUL;
4609  int shift[] = {4, 8, 12, 16, 24, 36};
4610 
4611  START();
4612  __ Mov(x0, value);
4613  __ Mov(w1, shift[0]);
4614  __ Mov(w2, shift[1]);
4615  __ Mov(w3, shift[2]);
4616  __ Mov(w4, shift[3]);
4617  __ Mov(w5, shift[4]);
4618  __ Mov(w6, shift[5]);
4619 
4620  __ rorv(x0, x0, xzr);
4621 
4622  __ Ror(x16, x0, x1);
4623  __ Ror(x17, x0, x2);
4624  __ Ror(x18, x0, x3);
4625  __ Ror(x19, x0, x4);
4626  __ Ror(x20, x0, x5);
4627  __ Ror(x21, x0, x6);
4628 
4629  __ Ror(w22, w0, w1);
4630  __ Ror(w23, w0, w2);
4631  __ Ror(w24, w0, w3);
4632  __ Ror(w25, w0, w4);
4633  __ Ror(w26, w0, w5);
4634  __ Ror(w27, w0, w6);
4635  END();
4636 
4637  RUN();
4638 
4639  ASSERT_EQUAL_64(value, x0);
4640  ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
4641  ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
4642  ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
4643  ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
4644  ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
4645  ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
4646  ASSERT_EQUAL_32(0xf89abcde, w22);
4647  ASSERT_EQUAL_32(0xef89abcd, w23);
4648  ASSERT_EQUAL_32(0xdef89abc, w24);
4649  ASSERT_EQUAL_32(0xcdef89ab, w25);
4650  ASSERT_EQUAL_32(0xabcdef89, w26);
4651  ASSERT_EQUAL_32(0xf89abcde, w27);
4652 
4653  TEARDOWN();
4654 }
4655 
4656 
4657 TEST(bfm) {
4658  INIT_V8();
4659  SETUP();
4660 
4661  START();
4662  __ Mov(x1, 0x0123456789abcdefL);
4663 
4664  __ Mov(x10, 0x8888888888888888L);
4665  __ Mov(x11, 0x8888888888888888L);
4666  __ Mov(x12, 0x8888888888888888L);
4667  __ Mov(x13, 0x8888888888888888L);
4668  __ Mov(w20, 0x88888888);
4669  __ Mov(w21, 0x88888888);
4670 
4671  __ bfm(x10, x1, 16, 31);
4672  __ bfm(x11, x1, 32, 15);
4673 
4674  __ bfm(w20, w1, 16, 23);
4675  __ bfm(w21, w1, 24, 15);
4676 
4677  // Aliases.
4678  __ Bfi(x12, x1, 16, 8);
4679  __ Bfxil(x13, x1, 16, 8);
4680  END();
4681 
4682  RUN();
4683 
4684 
4685  ASSERT_EQUAL_64(0x88888888888889abL, x10);
4686  ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
4687 
4688  ASSERT_EQUAL_32(0x888888ab, w20);
4689  ASSERT_EQUAL_32(0x88cdef88, w21);
4690 
4691  ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
4692  ASSERT_EQUAL_64(0x88888888888888abL, x13);
4693 
4694  TEARDOWN();
4695 }
4696 
4697 
4698 TEST(sbfm) {
4699  INIT_V8();
4700  SETUP();
4701 
4702  START();
4703  __ Mov(x1, 0x0123456789abcdefL);
4704  __ Mov(x2, 0xfedcba9876543210L);
4705 
4706  __ sbfm(x10, x1, 16, 31);
4707  __ sbfm(x11, x1, 32, 15);
4708  __ sbfm(x12, x1, 32, 47);
4709  __ sbfm(x13, x1, 48, 35);
4710 
4711  __ sbfm(w14, w1, 16, 23);
4712  __ sbfm(w15, w1, 24, 15);
4713  __ sbfm(w16, w2, 16, 23);
4714  __ sbfm(w17, w2, 24, 15);
4715 
4716  // Aliases.
4717  __ Asr(x18, x1, 32);
4718  __ Asr(x19, x2, 32);
4719  __ Sbfiz(x20, x1, 8, 16);
4720  __ Sbfiz(x21, x2, 8, 16);
4721  __ Sbfx(x22, x1, 8, 16);
4722  __ Sbfx(x23, x2, 8, 16);
4723  __ Sxtb(x24, w1);
4724  __ Sxtb(x25, x2);
4725  __ Sxth(x26, w1);
4726  __ Sxth(x27, x2);
4727  __ Sxtw(x28, w1);
4728  __ Sxtw(x29, x2);
4729  END();
4730 
4731  RUN();
4732 
4733 
4734  ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
4735  ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
4736  ASSERT_EQUAL_64(0x4567L, x12);
4737  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4738 
4739  ASSERT_EQUAL_32(0xffffffab, w14);
4740  ASSERT_EQUAL_32(0xffcdef00, w15);
4741  ASSERT_EQUAL_32(0x54, w16);
4742  ASSERT_EQUAL_32(0x00321000, w17);
4743 
4744  ASSERT_EQUAL_64(0x01234567L, x18);
4745  ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
4746  ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
4747  ASSERT_EQUAL_64(0x321000L, x21);
4748  ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
4749  ASSERT_EQUAL_64(0x5432L, x23);
4750  ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
4751  ASSERT_EQUAL_64(0x10, x25);
4752  ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
4753  ASSERT_EQUAL_64(0x3210, x27);
4754  ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
4755  ASSERT_EQUAL_64(0x76543210, x29);
4756 
4757  TEARDOWN();
4758 }
4759 
4760 
4761 TEST(ubfm) {
4762  INIT_V8();
4763  SETUP();
4764 
4765  START();
4766  __ Mov(x1, 0x0123456789abcdefL);
4767  __ Mov(x2, 0xfedcba9876543210L);
4768 
4769  __ Mov(x10, 0x8888888888888888L);
4770  __ Mov(x11, 0x8888888888888888L);
4771 
4772  __ ubfm(x10, x1, 16, 31);
4773  __ ubfm(x11, x1, 32, 15);
4774  __ ubfm(x12, x1, 32, 47);
4775  __ ubfm(x13, x1, 48, 35);
4776 
4777  __ ubfm(w25, w1, 16, 23);
4778  __ ubfm(w26, w1, 24, 15);
4779  __ ubfm(w27, w2, 16, 23);
4780  __ ubfm(w28, w2, 24, 15);
4781 
4782  // Aliases
4783  __ Lsl(x15, x1, 63);
4784  __ Lsl(x16, x1, 0);
4785  __ Lsr(x17, x1, 32);
4786  __ Ubfiz(x18, x1, 8, 16);
4787  __ Ubfx(x19, x1, 8, 16);
4788  __ Uxtb(x20, x1);
4789  __ Uxth(x21, x1);
4790  __ Uxtw(x22, x1);
4791  END();
4792 
4793  RUN();
4794 
4795  ASSERT_EQUAL_64(0x00000000000089abL, x10);
4796  ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
4797  ASSERT_EQUAL_64(0x4567L, x12);
4798  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4799 
4800  ASSERT_EQUAL_32(0x000000ab, w25);
4801  ASSERT_EQUAL_32(0x00cdef00, w26);
4802  ASSERT_EQUAL_32(0x54, w27);
4803  ASSERT_EQUAL_32(0x00321000, w28);
4804 
4805  ASSERT_EQUAL_64(0x8000000000000000L, x15);
4806  ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
4807  ASSERT_EQUAL_64(0x01234567L, x17);
4808  ASSERT_EQUAL_64(0xcdef00L, x18);
4809  ASSERT_EQUAL_64(0xabcdL, x19);
4810  ASSERT_EQUAL_64(0xefL, x20);
4811  ASSERT_EQUAL_64(0xcdefL, x21);
4812  ASSERT_EQUAL_64(0x89abcdefL, x22);
4813 
4814  TEARDOWN();
4815 }
4816 
4817 
4818 TEST(extr) {
4819  INIT_V8();
4820  SETUP();
4821 
4822  START();
4823  __ Mov(x1, 0x0123456789abcdefL);
4824  __ Mov(x2, 0xfedcba9876543210L);
4825 
4826  __ Extr(w10, w1, w2, 0);
4827  __ Extr(w11, w1, w2, 1);
4828  __ Extr(x12, x2, x1, 2);
4829 
4830  __ Ror(w13, w1, 0);
4831  __ Ror(w14, w2, 17);
4832  __ Ror(w15, w1, 31);
4833  __ Ror(x18, x2, 1);
4834  __ Ror(x19, x1, 63);
4835  END();
4836 
4837  RUN();
4838 
4839  ASSERT_EQUAL_64(0x76543210, x10);
4840  ASSERT_EQUAL_64(0xbb2a1908, x11);
4841  ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
4842  ASSERT_EQUAL_64(0x89abcdef, x13);
4843  ASSERT_EQUAL_64(0x19083b2a, x14);
4844  ASSERT_EQUAL_64(0x13579bdf, x15);
4845  ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
4846  ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
4847 
4848  TEARDOWN();
4849 }
4850 
4851 
4852 TEST(fmov_imm) {
4853  INIT_V8();
4854  SETUP();
4855 
4856  START();
4857  __ Fmov(s11, 1.0);
4858  __ Fmov(d22, -13.0);
4859  __ Fmov(s1, 255.0);
4860  __ Fmov(d2, 12.34567);
4861  __ Fmov(s3, 0.0);
4862  __ Fmov(d4, 0.0);
4863  __ Fmov(s5, kFP32PositiveInfinity);
4864  __ Fmov(d6, kFP64NegativeInfinity);
4865  END();
4866 
4867  RUN();
4868 
4869  ASSERT_EQUAL_FP32(1.0, s11);
4870  ASSERT_EQUAL_FP64(-13.0, d22);
4871  ASSERT_EQUAL_FP32(255.0, s1);
4872  ASSERT_EQUAL_FP64(12.34567, d2);
4873  ASSERT_EQUAL_FP32(0.0, s3);
4874  ASSERT_EQUAL_FP64(0.0, d4);
4875  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
4876  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
4877 
4878  TEARDOWN();
4879 }
4880 
4881 
4882 TEST(fmov_reg) {
4883  INIT_V8();
4884  SETUP();
4885 
4886  START();
4887  __ Fmov(s20, 1.0);
4888  __ Fmov(w10, s20);
4889  __ Fmov(s30, w10);
4890  __ Fmov(s5, s20);
4891  __ Fmov(d1, -13.0);
4892  __ Fmov(x1, d1);
4893  __ Fmov(d2, x1);
4894  __ Fmov(d4, d1);
4895  __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
4896  __ Fmov(s6, s6);
4897  END();
4898 
4899  RUN();
4900 
4901  ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
4902  ASSERT_EQUAL_FP32(1.0, s30);
4903  ASSERT_EQUAL_FP32(1.0, s5);
4904  ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
4905  ASSERT_EQUAL_FP64(-13.0, d2);
4906  ASSERT_EQUAL_FP64(-13.0, d4);
4907  ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
4908 
4909  TEARDOWN();
4910 }
4911 
4912 
4913 TEST(fadd) {
4914  INIT_V8();
4915  SETUP();
4916 
4917  START();
4918  __ Fmov(s14, -0.0f);
4919  __ Fmov(s15, kFP32PositiveInfinity);
4920  __ Fmov(s16, kFP32NegativeInfinity);
4921  __ Fmov(s17, 3.25f);
4922  __ Fmov(s18, 1.0f);
4923  __ Fmov(s19, 0.0f);
4924 
4925  __ Fmov(d26, -0.0);
4926  __ Fmov(d27, kFP64PositiveInfinity);
4927  __ Fmov(d28, kFP64NegativeInfinity);
4928  __ Fmov(d29, 0.0);
4929  __ Fmov(d30, -2.0);
4930  __ Fmov(d31, 2.25);
4931 
4932  __ Fadd(s0, s17, s18);
4933  __ Fadd(s1, s18, s19);
4934  __ Fadd(s2, s14, s18);
4935  __ Fadd(s3, s15, s18);
4936  __ Fadd(s4, s16, s18);
4937  __ Fadd(s5, s15, s16);
4938  __ Fadd(s6, s16, s15);
4939 
4940  __ Fadd(d7, d30, d31);
4941  __ Fadd(d8, d29, d31);
4942  __ Fadd(d9, d26, d31);
4943  __ Fadd(d10, d27, d31);
4944  __ Fadd(d11, d28, d31);
4945  __ Fadd(d12, d27, d28);
4946  __ Fadd(d13, d28, d27);
4947  END();
4948 
4949  RUN();
4950 
4951  ASSERT_EQUAL_FP32(4.25, s0);
4952  ASSERT_EQUAL_FP32(1.0, s1);
4953  ASSERT_EQUAL_FP32(1.0, s2);
4954  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
4955  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
4956  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4957  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4958  ASSERT_EQUAL_FP64(0.25, d7);
4959  ASSERT_EQUAL_FP64(2.25, d8);
4960  ASSERT_EQUAL_FP64(2.25, d9);
4961  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
4962  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
4963  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4964  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4965 
4966  TEARDOWN();
4967 }
4968 
4969 
4970 TEST(fsub) {
4971  INIT_V8();
4972  SETUP();
4973 
4974  START();
4975  __ Fmov(s14, -0.0f);
4976  __ Fmov(s15, kFP32PositiveInfinity);
4977  __ Fmov(s16, kFP32NegativeInfinity);
4978  __ Fmov(s17, 3.25f);
4979  __ Fmov(s18, 1.0f);
4980  __ Fmov(s19, 0.0f);
4981 
4982  __ Fmov(d26, -0.0);
4983  __ Fmov(d27, kFP64PositiveInfinity);
4984  __ Fmov(d28, kFP64NegativeInfinity);
4985  __ Fmov(d29, 0.0);
4986  __ Fmov(d30, -2.0);
4987  __ Fmov(d31, 2.25);
4988 
4989  __ Fsub(s0, s17, s18);
4990  __ Fsub(s1, s18, s19);
4991  __ Fsub(s2, s14, s18);
4992  __ Fsub(s3, s18, s15);
4993  __ Fsub(s4, s18, s16);
4994  __ Fsub(s5, s15, s15);
4995  __ Fsub(s6, s16, s16);
4996 
4997  __ Fsub(d7, d30, d31);
4998  __ Fsub(d8, d29, d31);
4999  __ Fsub(d9, d26, d31);
5000  __ Fsub(d10, d31, d27);
5001  __ Fsub(d11, d31, d28);
5002  __ Fsub(d12, d27, d27);
5003  __ Fsub(d13, d28, d28);
5004  END();
5005 
5006  RUN();
5007 
5008  ASSERT_EQUAL_FP32(2.25, s0);
5009  ASSERT_EQUAL_FP32(1.0, s1);
5010  ASSERT_EQUAL_FP32(-1.0, s2);
5011  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5012  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5013  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5014  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5015  ASSERT_EQUAL_FP64(-4.25, d7);
5016  ASSERT_EQUAL_FP64(-2.25, d8);
5017  ASSERT_EQUAL_FP64(-2.25, d9);
5018  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5019  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5020  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5021  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5022 
5023  TEARDOWN();
5024 }
5025 
5026 
5027 TEST(fmul) {
5028  INIT_V8();
5029  SETUP();
5030 
5031  START();
5032  __ Fmov(s14, -0.0f);
5033  __ Fmov(s15, kFP32PositiveInfinity);
5034  __ Fmov(s16, kFP32NegativeInfinity);
5035  __ Fmov(s17, 3.25f);
5036  __ Fmov(s18, 2.0f);
5037  __ Fmov(s19, 0.0f);
5038  __ Fmov(s20, -2.0f);
5039 
5040  __ Fmov(d26, -0.0);
5041  __ Fmov(d27, kFP64PositiveInfinity);
5042  __ Fmov(d28, kFP64NegativeInfinity);
5043  __ Fmov(d29, 0.0);
5044  __ Fmov(d30, -2.0);
5045  __ Fmov(d31, 2.25);
5046 
5047  __ Fmul(s0, s17, s18);
5048  __ Fmul(s1, s18, s19);
5049  __ Fmul(s2, s14, s14);
5050  __ Fmul(s3, s15, s20);
5051  __ Fmul(s4, s16, s20);
5052  __ Fmul(s5, s15, s19);
5053  __ Fmul(s6, s19, s16);
5054 
5055  __ Fmul(d7, d30, d31);
5056  __ Fmul(d8, d29, d31);
5057  __ Fmul(d9, d26, d26);
5058  __ Fmul(d10, d27, d30);
5059  __ Fmul(d11, d28, d30);
5060  __ Fmul(d12, d27, d29);
5061  __ Fmul(d13, d29, d28);
5062  END();
5063 
5064  RUN();
5065 
5066  ASSERT_EQUAL_FP32(6.5, s0);
5067  ASSERT_EQUAL_FP32(0.0, s1);
5068  ASSERT_EQUAL_FP32(0.0, s2);
5069  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5070  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5071  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5072  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5073  ASSERT_EQUAL_FP64(-4.5, d7);
5074  ASSERT_EQUAL_FP64(0.0, d8);
5075  ASSERT_EQUAL_FP64(0.0, d9);
5076  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5077  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5078  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5079  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5080 
5081  TEARDOWN();
5082 }
5083 
5084 
5085 static void FmaddFmsubHelper(double n, double m, double a,
5086  double fmadd, double fmsub,
5087  double fnmadd, double fnmsub) {
5088  SETUP();
5089  START();
5090 
5091  __ Fmov(d0, n);
5092  __ Fmov(d1, m);
5093  __ Fmov(d2, a);
5094  __ Fmadd(d28, d0, d1, d2);
5095  __ Fmsub(d29, d0, d1, d2);
5096  __ Fnmadd(d30, d0, d1, d2);
5097  __ Fnmsub(d31, d0, d1, d2);
5098 
5099  END();
5100  RUN();
5101 
5102  ASSERT_EQUAL_FP64(fmadd, d28);
5103  ASSERT_EQUAL_FP64(fmsub, d29);
5104  ASSERT_EQUAL_FP64(fnmadd, d30);
5105  ASSERT_EQUAL_FP64(fnmsub, d31);
5106 
5107  TEARDOWN();
5108 }
5109 
5110 
5111 TEST(fmadd_fmsub_double) {
5112  INIT_V8();
5113 
5114  // It's hard to check the result of fused operations because the only way to
5115  // calculate the result is using fma, which is what the simulator uses anyway.
5116  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5117 
5118  // Basic operation.
5119  FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5120  FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
5121 
5122  // Check the sign of exact zeroes.
5123  // n m a fmadd fmsub fnmadd fnmsub
5124  FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5125  FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5126  FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5127  FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5128  FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5129  FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5130  FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5131  FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5132 
5133  // Check NaN generation.
5134  FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5135  kFP64DefaultNaN, kFP64DefaultNaN,
5136  kFP64DefaultNaN, kFP64DefaultNaN);
5137  FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5138  kFP64DefaultNaN, kFP64DefaultNaN,
5139  kFP64DefaultNaN, kFP64DefaultNaN);
5140  FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5141  kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5142  kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5143  kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
5144  kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
5145  FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5146  kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5147  kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5148  kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
5149  kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
5150 }
5151 
5152 
5153 static void FmaddFmsubHelper(float n, float m, float a,
5154  float fmadd, float fmsub,
5155  float fnmadd, float fnmsub) {
5156  SETUP();
5157  START();
5158 
5159  __ Fmov(s0, n);
5160  __ Fmov(s1, m);
5161  __ Fmov(s2, a);
5162  __ Fmadd(s28, s0, s1, s2);
5163  __ Fmsub(s29, s0, s1, s2);
5164  __ Fnmadd(s30, s0, s1, s2);
5165  __ Fnmsub(s31, s0, s1, s2);
5166 
5167  END();
5168  RUN();
5169 
5170  ASSERT_EQUAL_FP32(fmadd, s28);
5171  ASSERT_EQUAL_FP32(fmsub, s29);
5172  ASSERT_EQUAL_FP32(fnmadd, s30);
5173  ASSERT_EQUAL_FP32(fnmsub, s31);
5174 
5175  TEARDOWN();
5176 }
5177 
5178 
5179 TEST(fmadd_fmsub_float) {
5180  INIT_V8();
5181  // It's hard to check the result of fused operations because the only way to
5182  // calculate the result is using fma, which is what the simulator uses anyway.
5183  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5184 
5185  // Basic operation.
5186  FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5187  FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
5188 
5189  // Check the sign of exact zeroes.
5190  // n m a fmadd fmsub fnmadd fnmsub
5191  FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5192  FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5193  FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5194  FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5195  FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5196  FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5197  FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5198  FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5199 
5200  // Check NaN generation.
5201  FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5202  kFP32DefaultNaN, kFP32DefaultNaN,
5203  kFP32DefaultNaN, kFP32DefaultNaN);
5204  FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5205  kFP32DefaultNaN, kFP32DefaultNaN,
5206  kFP32DefaultNaN, kFP32DefaultNaN);
5207  FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5208  kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5209  kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5210  kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
5211  kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
5212  FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5213  kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5214  kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5215  kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
5216  kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
5217 }
5218 
5219 
5220 TEST(fmadd_fmsub_double_nans) {
5221  INIT_V8();
5222  // Make sure that NaN propagation works correctly.
5223  double s1 = rawbits_to_double(0x7ff5555511111111);
5224  double s2 = rawbits_to_double(0x7ff5555522222222);
5225  double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5226  double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5227  double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5228  double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5229  ASSERT(IsSignallingNaN(s1));
5230  ASSERT(IsSignallingNaN(s2));
5231  ASSERT(IsSignallingNaN(sa));
5232  ASSERT(IsQuietNaN(q1));
5233  ASSERT(IsQuietNaN(q2));
5234  ASSERT(IsQuietNaN(qa));
5235 
5236  // The input NaNs after passing through ProcessNaN.
5237  double s1_proc = rawbits_to_double(0x7ffd555511111111);
5238  double s2_proc = rawbits_to_double(0x7ffd555522222222);
5239  double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5240  double q1_proc = q1;
5241  double q2_proc = q2;
5242  double qa_proc = qa;
5243  ASSERT(IsQuietNaN(s1_proc));
5244  ASSERT(IsQuietNaN(s2_proc));
5245  ASSERT(IsQuietNaN(sa_proc));
5246  ASSERT(IsQuietNaN(q1_proc));
5247  ASSERT(IsQuietNaN(q2_proc));
5248  ASSERT(IsQuietNaN(qa_proc));
5249 
5250  // Negated NaNs as it would be done on ARMv8 hardware.
5251  double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
5252  double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
5253  double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
5254  double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
5255  ASSERT(IsQuietNaN(s1_proc_neg));
5256  ASSERT(IsQuietNaN(sa_proc_neg));
5257  ASSERT(IsQuietNaN(q1_proc_neg));
5258  ASSERT(IsQuietNaN(qa_proc_neg));
5259 
5260  // Quiet NaNs are propagated.
5261  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5262  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5263  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5264  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5265  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5266  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5267  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5268 
5269  // Signalling NaNs are propagated, and made quiet.
5270  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5271  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5272  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5273  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5274  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5275  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5276  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5277 
5278  // Signalling NaNs take precedence over quiet NaNs.
5279  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5280  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5281  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5282  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5283  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5284  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5285  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5286 
5287  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5288  FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5289  kFP64DefaultNaN, kFP64DefaultNaN,
5290  kFP64DefaultNaN, kFP64DefaultNaN);
5291  FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5292  kFP64DefaultNaN, kFP64DefaultNaN,
5293  kFP64DefaultNaN, kFP64DefaultNaN);
5294  FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5295  kFP64DefaultNaN, kFP64DefaultNaN,
5296  kFP64DefaultNaN, kFP64DefaultNaN);
5297  FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5298  kFP64DefaultNaN, kFP64DefaultNaN,
5299  kFP64DefaultNaN, kFP64DefaultNaN);
5300 }
5301 
5302 
5303 TEST(fmadd_fmsub_float_nans) {
5304  INIT_V8();
5305  // Make sure that NaN propagation works correctly.
5306  float s1 = rawbits_to_float(0x7f951111);
5307  float s2 = rawbits_to_float(0x7f952222);
5308  float sa = rawbits_to_float(0x7f95aaaa);
5309  float q1 = rawbits_to_float(0x7fea1111);
5310  float q2 = rawbits_to_float(0x7fea2222);
5311  float qa = rawbits_to_float(0x7feaaaaa);
5312  ASSERT(IsSignallingNaN(s1));
5313  ASSERT(IsSignallingNaN(s2));
5314  ASSERT(IsSignallingNaN(sa));
5315  ASSERT(IsQuietNaN(q1));
5316  ASSERT(IsQuietNaN(q2));
5317  ASSERT(IsQuietNaN(qa));
5318 
5319  // The input NaNs after passing through ProcessNaN.
5320  float s1_proc = rawbits_to_float(0x7fd51111);
5321  float s2_proc = rawbits_to_float(0x7fd52222);
5322  float sa_proc = rawbits_to_float(0x7fd5aaaa);
5323  float q1_proc = q1;
5324  float q2_proc = q2;
5325  float qa_proc = qa;
5326  ASSERT(IsQuietNaN(s1_proc));
5327  ASSERT(IsQuietNaN(s2_proc));
5328  ASSERT(IsQuietNaN(sa_proc));
5329  ASSERT(IsQuietNaN(q1_proc));
5330  ASSERT(IsQuietNaN(q2_proc));
5331  ASSERT(IsQuietNaN(qa_proc));
5332 
5333  // Negated NaNs as it would be done on ARMv8 hardware.
5334  float s1_proc_neg = rawbits_to_float(0xffd51111);
5335  float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
5336  float q1_proc_neg = rawbits_to_float(0xffea1111);
5337  float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
5338  ASSERT(IsQuietNaN(s1_proc_neg));
5339  ASSERT(IsQuietNaN(sa_proc_neg));
5340  ASSERT(IsQuietNaN(q1_proc_neg));
5341  ASSERT(IsQuietNaN(qa_proc_neg));
5342 
5343  // Quiet NaNs are propagated.
5344  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5345  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5346  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5347  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5348  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5349  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5350  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5351 
5352  // Signalling NaNs are propagated, and made quiet.
5353  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5354  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5355  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5356  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5357  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5358  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5359  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5360 
5361  // Signalling NaNs take precedence over quiet NaNs.
5362  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5363  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5364  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5365  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5366  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5367  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5368  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5369 
5370  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5371  FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5372  kFP32DefaultNaN, kFP32DefaultNaN,
5373  kFP32DefaultNaN, kFP32DefaultNaN);
5374  FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5375  kFP32DefaultNaN, kFP32DefaultNaN,
5376  kFP32DefaultNaN, kFP32DefaultNaN);
5377  FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5378  kFP32DefaultNaN, kFP32DefaultNaN,
5379  kFP32DefaultNaN, kFP32DefaultNaN);
5380  FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5381  kFP32DefaultNaN, kFP32DefaultNaN,
5382  kFP32DefaultNaN, kFP32DefaultNaN);
5383 }
5384 
5385 
5386 TEST(fdiv) {
5387  INIT_V8();
5388  SETUP();
5389 
5390  START();
5391  __ Fmov(s14, -0.0f);
5392  __ Fmov(s15, kFP32PositiveInfinity);
5393  __ Fmov(s16, kFP32NegativeInfinity);
5394  __ Fmov(s17, 3.25f);
5395  __ Fmov(s18, 2.0f);
5396  __ Fmov(s19, 2.0f);
5397  __ Fmov(s20, -2.0f);
5398 
5399  __ Fmov(d26, -0.0);
5400  __ Fmov(d27, kFP64PositiveInfinity);
5401  __ Fmov(d28, kFP64NegativeInfinity);
5402  __ Fmov(d29, 0.0);
5403  __ Fmov(d30, -2.0);
5404  __ Fmov(d31, 2.25);
5405 
5406  __ Fdiv(s0, s17, s18);
5407  __ Fdiv(s1, s18, s19);
5408  __ Fdiv(s2, s14, s18);
5409  __ Fdiv(s3, s18, s15);
5410  __ Fdiv(s4, s18, s16);
5411  __ Fdiv(s5, s15, s16);
5412  __ Fdiv(s6, s14, s14);
5413 
5414  __ Fdiv(d7, d31, d30);
5415  __ Fdiv(d8, d29, d31);
5416  __ Fdiv(d9, d26, d31);
5417  __ Fdiv(d10, d31, d27);
5418  __ Fdiv(d11, d31, d28);
5419  __ Fdiv(d12, d28, d27);
5420  __ Fdiv(d13, d29, d29);
5421  END();
5422 
5423  RUN();
5424 
5425  ASSERT_EQUAL_FP32(1.625f, s0);
5426  ASSERT_EQUAL_FP32(1.0f, s1);
5427  ASSERT_EQUAL_FP32(-0.0f, s2);
5428  ASSERT_EQUAL_FP32(0.0f, s3);
5429  ASSERT_EQUAL_FP32(-0.0f, s4);
5430  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5431  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5432  ASSERT_EQUAL_FP64(-1.125, d7);
5433  ASSERT_EQUAL_FP64(0.0, d8);
5434  ASSERT_EQUAL_FP64(-0.0, d9);
5435  ASSERT_EQUAL_FP64(0.0, d10);
5436  ASSERT_EQUAL_FP64(-0.0, d11);
5437  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5438  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5439 
5440  TEARDOWN();
5441 }
5442 
5443 
5444 static float MinMaxHelper(float n,
5445  float m,
5446  bool min,
5447  float quiet_nan_substitute = 0.0) {
5448  uint32_t raw_n = float_to_rawbits(n);
5449  uint32_t raw_m = float_to_rawbits(m);
5450 
5451  if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5452  // n is signalling NaN.
5453  return rawbits_to_float(raw_n | kSQuietNanMask);
5454  } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5455  // m is signalling NaN.
5456  return rawbits_to_float(raw_m | kSQuietNanMask);
5457  } else if (quiet_nan_substitute == 0.0) {
5458  if (std::isnan(n)) {
5459  // n is quiet NaN.
5460  return n;
5461  } else if (std::isnan(m)) {
5462  // m is quiet NaN.
5463  return m;
5464  }
5465  } else {
5466  // Substitute n or m if one is quiet, but not both.
5467  if (std::isnan(n) && !std::isnan(m)) {
5468  // n is quiet NaN: replace with substitute.
5469  n = quiet_nan_substitute;
5470  } else if (!std::isnan(n) && std::isnan(m)) {
5471  // m is quiet NaN: replace with substitute.
5472  m = quiet_nan_substitute;
5473  }
5474  }
5475 
5476  if ((n == 0.0) && (m == 0.0) &&
5477  (copysign(1.0, n) != copysign(1.0, m))) {
5478  return min ? -0.0 : 0.0;
5479  }
5480 
5481  return min ? fminf(n, m) : fmaxf(n, m);
5482 }
5483 
5484 
5485 static double MinMaxHelper(double n,
5486  double m,
5487  bool min,
5488  double quiet_nan_substitute = 0.0) {
5489  uint64_t raw_n = double_to_rawbits(n);
5490  uint64_t raw_m = double_to_rawbits(m);
5491 
5492  if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5493  // n is signalling NaN.
5494  return rawbits_to_double(raw_n | kDQuietNanMask);
5495  } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5496  // m is signalling NaN.
5497  return rawbits_to_double(raw_m | kDQuietNanMask);
5498  } else if (quiet_nan_substitute == 0.0) {
5499  if (std::isnan(n)) {
5500  // n is quiet NaN.
5501  return n;
5502  } else if (std::isnan(m)) {
5503  // m is quiet NaN.
5504  return m;
5505  }
5506  } else {
5507  // Substitute n or m if one is quiet, but not both.
5508  if (std::isnan(n) && !std::isnan(m)) {
5509  // n is quiet NaN: replace with substitute.
5510  n = quiet_nan_substitute;
5511  } else if (!std::isnan(n) && std::isnan(m)) {
5512  // m is quiet NaN: replace with substitute.
5513  m = quiet_nan_substitute;
5514  }
5515  }
5516 
5517  if ((n == 0.0) && (m == 0.0) &&
5518  (copysign(1.0, n) != copysign(1.0, m))) {
5519  return min ? -0.0 : 0.0;
5520  }
5521 
5522  return min ? fmin(n, m) : fmax(n, m);
5523 }
5524 
5525 
5526 static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5527  double minnm, double maxnm) {
5528  SETUP();
5529 
5530  START();
5531  __ Fmov(d0, n);
5532  __ Fmov(d1, m);
5533  __ Fmin(d28, d0, d1);
5534  __ Fmax(d29, d0, d1);
5535  __ Fminnm(d30, d0, d1);
5536  __ Fmaxnm(d31, d0, d1);
5537  END();
5538 
5539  RUN();
5540 
5541  ASSERT_EQUAL_FP64(min, d28);
5542  ASSERT_EQUAL_FP64(max, d29);
5543  ASSERT_EQUAL_FP64(minnm, d30);
5544  ASSERT_EQUAL_FP64(maxnm, d31);
5545 
5546  TEARDOWN();
5547 }
5548 
5549 
5550 TEST(fmax_fmin_d) {
5551  INIT_V8();
5552  // Use non-standard NaNs to check that the payload bits are preserved.
5553  double snan = rawbits_to_double(0x7ff5555512345678);
5554  double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5555 
5556  double snan_processed = rawbits_to_double(0x7ffd555512345678);
5557  double qnan_processed = qnan;
5558 
5559  ASSERT(IsSignallingNaN(snan));
5560  ASSERT(IsQuietNaN(qnan));
5561  ASSERT(IsQuietNaN(snan_processed));
5562  ASSERT(IsQuietNaN(qnan_processed));
5563 
5564  // Bootstrap tests.
5565  FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5566  FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5567  FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5568  kFP64NegativeInfinity, kFP64PositiveInfinity,
5569  kFP64NegativeInfinity, kFP64PositiveInfinity);
5570  FminFmaxDoubleHelper(snan, 0,
5571  snan_processed, snan_processed,
5572  snan_processed, snan_processed);
5573  FminFmaxDoubleHelper(0, snan,
5574  snan_processed, snan_processed,
5575  snan_processed, snan_processed);
5576  FminFmaxDoubleHelper(qnan, 0,
5577  qnan_processed, qnan_processed,
5578  0, 0);
5579  FminFmaxDoubleHelper(0, qnan,
5580  qnan_processed, qnan_processed,
5581  0, 0);
5582  FminFmaxDoubleHelper(qnan, snan,
5583  snan_processed, snan_processed,
5584  snan_processed, snan_processed);
5585  FminFmaxDoubleHelper(snan, qnan,
5586  snan_processed, snan_processed,
5587  snan_processed, snan_processed);
5588 
5589  // Iterate over all combinations of inputs.
5590  double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5591  -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5592  kFP64PositiveInfinity, kFP64NegativeInfinity,
5593  kFP64QuietNaN, kFP64SignallingNaN };
5594 
5595  const int count = sizeof(inputs) / sizeof(inputs[0]);
5596 
5597  for (int in = 0; in < count; in++) {
5598  double n = inputs[in];
5599  for (int im = 0; im < count; im++) {
5600  double m = inputs[im];
5601  FminFmaxDoubleHelper(n, m,
5602  MinMaxHelper(n, m, true),
5603  MinMaxHelper(n, m, false),
5604  MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5605  MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5606  }
5607  }
5608 }
5609 
5610 
5611 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5612  float minnm, float maxnm) {
5613  SETUP();
5614 
5615  START();
5616  __ Fmov(s0, n);
5617  __ Fmov(s1, m);
5618  __ Fmin(s28, s0, s1);
5619  __ Fmax(s29, s0, s1);
5620  __ Fminnm(s30, s0, s1);
5621  __ Fmaxnm(s31, s0, s1);
5622  END();
5623 
5624  RUN();
5625 
5626  ASSERT_EQUAL_FP32(min, s28);
5627  ASSERT_EQUAL_FP32(max, s29);
5628  ASSERT_EQUAL_FP32(minnm, s30);
5629  ASSERT_EQUAL_FP32(maxnm, s31);
5630 
5631  TEARDOWN();
5632 }
5633 
5634 
5635 TEST(fmax_fmin_s) {
5636  INIT_V8();
5637  // Use non-standard NaNs to check that the payload bits are preserved.
5638  float snan = rawbits_to_float(0x7f951234);
5639  float qnan = rawbits_to_float(0x7fea8765);
5640 
5641  float snan_processed = rawbits_to_float(0x7fd51234);
5642  float qnan_processed = qnan;
5643 
5644  ASSERT(IsSignallingNaN(snan));
5645  ASSERT(IsQuietNaN(qnan));
5646  ASSERT(IsQuietNaN(snan_processed));
5647  ASSERT(IsQuietNaN(qnan_processed));
5648 
5649  // Bootstrap tests.
5650  FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5651  FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5652  FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5653  kFP32NegativeInfinity, kFP32PositiveInfinity,
5654  kFP32NegativeInfinity, kFP32PositiveInfinity);
5655  FminFmaxFloatHelper(snan, 0,
5656  snan_processed, snan_processed,
5657  snan_processed, snan_processed);
5658  FminFmaxFloatHelper(0, snan,
5659  snan_processed, snan_processed,
5660  snan_processed, snan_processed);
5661  FminFmaxFloatHelper(qnan, 0,
5662  qnan_processed, qnan_processed,
5663  0, 0);
5664  FminFmaxFloatHelper(0, qnan,
5665  qnan_processed, qnan_processed,
5666  0, 0);
5667  FminFmaxFloatHelper(qnan, snan,
5668  snan_processed, snan_processed,
5669  snan_processed, snan_processed);
5670  FminFmaxFloatHelper(snan, qnan,
5671  snan_processed, snan_processed,
5672  snan_processed, snan_processed);
5673 
5674  // Iterate over all combinations of inputs.
5675  float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5676  -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5677  kFP32PositiveInfinity, kFP32NegativeInfinity,
5678  kFP32QuietNaN, kFP32SignallingNaN };
5679 
5680  const int count = sizeof(inputs) / sizeof(inputs[0]);
5681 
5682  for (int in = 0; in < count; in++) {
5683  float n = inputs[in];
5684  for (int im = 0; im < count; im++) {
5685  float m = inputs[im];
5686  FminFmaxFloatHelper(n, m,
5687  MinMaxHelper(n, m, true),
5688  MinMaxHelper(n, m, false),
5689  MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5690  MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5691  }
5692  }
5693 }
5694 
5695 
5696 TEST(fccmp) {
5697  INIT_V8();
5698  SETUP();
5699 
5700  START();
5701  __ Fmov(s16, 0.0);
5702  __ Fmov(s17, 0.5);
5703  __ Fmov(d18, -0.5);
5704  __ Fmov(d19, -1.0);
5705  __ Mov(x20, 0);
5706 
5707  __ Cmp(x20, 0);
5708  __ Fccmp(s16, s16, NoFlag, eq);
5709  __ Mrs(x0, NZCV);
5710 
5711  __ Cmp(x20, 0);
5712  __ Fccmp(s16, s16, VFlag, ne);
5713  __ Mrs(x1, NZCV);
5714 
5715  __ Cmp(x20, 0);
5716  __ Fccmp(s16, s17, CFlag, ge);
5717  __ Mrs(x2, NZCV);
5718 
5719  __ Cmp(x20, 0);
5720  __ Fccmp(s16, s17, CVFlag, lt);
5721  __ Mrs(x3, NZCV);
5722 
5723  __ Cmp(x20, 0);
5724  __ Fccmp(d18, d18, ZFlag, le);
5725  __ Mrs(x4, NZCV);
5726 
5727  __ Cmp(x20, 0);
5728  __ Fccmp(d18, d18, ZVFlag, gt);
5729  __ Mrs(x5, NZCV);
5730 
5731  __ Cmp(x20, 0);
5732  __ Fccmp(d18, d19, ZCVFlag, ls);
5733  __ Mrs(x6, NZCV);
5734 
5735  __ Cmp(x20, 0);
5736  __ Fccmp(d18, d19, NFlag, hi);
5737  __ Mrs(x7, NZCV);
5738 
5739  __ fccmp(s16, s16, NFlag, al);
5740  __ Mrs(x8, NZCV);
5741 
5742  __ fccmp(d18, d18, NFlag, nv);
5743  __ Mrs(x9, NZCV);
5744 
5745  END();
5746 
5747  RUN();
5748 
5749  ASSERT_EQUAL_32(ZCFlag, w0);
5750  ASSERT_EQUAL_32(VFlag, w1);
5751  ASSERT_EQUAL_32(NFlag, w2);
5752  ASSERT_EQUAL_32(CVFlag, w3);
5753  ASSERT_EQUAL_32(ZCFlag, w4);
5754  ASSERT_EQUAL_32(ZVFlag, w5);
5755  ASSERT_EQUAL_32(CFlag, w6);
5756  ASSERT_EQUAL_32(NFlag, w7);
5757  ASSERT_EQUAL_32(ZCFlag, w8);
5758  ASSERT_EQUAL_32(ZCFlag, w9);
5759 
5760  TEARDOWN();
5761 }
5762 
5763 
5764 TEST(fcmp) {
5765  INIT_V8();
5766  SETUP();
5767 
5768  START();
5769 
5770  // Some of these tests require a floating-point scratch register assigned to
5771  // the macro assembler, but most do not.
5772  {
5773  // We're going to mess around with the available scratch registers in this
5774  // test. A UseScratchRegisterScope will make sure that they are restored to
5775  // the default values once we're finished.
5776  UseScratchRegisterScope temps(&masm);
5777  masm.FPTmpList()->set_list(0);
5778 
5779  __ Fmov(s8, 0.0);
5780  __ Fmov(s9, 0.5);
5781  __ Mov(w18, 0x7f800001); // Single precision NaN.
5782  __ Fmov(s18, w18);
5783 
5784  __ Fcmp(s8, s8);
5785  __ Mrs(x0, NZCV);
5786  __ Fcmp(s8, s9);
5787  __ Mrs(x1, NZCV);
5788  __ Fcmp(s9, s8);
5789  __ Mrs(x2, NZCV);
5790  __ Fcmp(s8, s18);
5791  __ Mrs(x3, NZCV);
5792  __ Fcmp(s18, s18);
5793  __ Mrs(x4, NZCV);
5794  __ Fcmp(s8, 0.0);
5795  __ Mrs(x5, NZCV);
5796  masm.FPTmpList()->set_list(d0.Bit());
5797  __ Fcmp(s8, 255.0);
5798  masm.FPTmpList()->set_list(0);
5799  __ Mrs(x6, NZCV);
5800 
5801  __ Fmov(d19, 0.0);
5802  __ Fmov(d20, 0.5);
5803  __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
5804  __ Fmov(d21, x21);
5805 
5806  __ Fcmp(d19, d19);
5807  __ Mrs(x10, NZCV);
5808  __ Fcmp(d19, d20);
5809  __ Mrs(x11, NZCV);
5810  __ Fcmp(d20, d19);
5811  __ Mrs(x12, NZCV);
5812  __ Fcmp(d19, d21);
5813  __ Mrs(x13, NZCV);
5814  __ Fcmp(d21, d21);
5815  __ Mrs(x14, NZCV);
5816  __ Fcmp(d19, 0.0);
5817  __ Mrs(x15, NZCV);
5818  masm.FPTmpList()->set_list(d0.Bit());
5819  __ Fcmp(d19, 12.3456);
5820  masm.FPTmpList()->set_list(0);
5821  __ Mrs(x16, NZCV);
5822  }
5823 
5824  END();
5825 
5826  RUN();
5827 
5828  ASSERT_EQUAL_32(ZCFlag, w0);
5829  ASSERT_EQUAL_32(NFlag, w1);
5830  ASSERT_EQUAL_32(CFlag, w2);
5831  ASSERT_EQUAL_32(CVFlag, w3);
5832  ASSERT_EQUAL_32(CVFlag, w4);
5833  ASSERT_EQUAL_32(ZCFlag, w5);
5834  ASSERT_EQUAL_32(NFlag, w6);
5835  ASSERT_EQUAL_32(ZCFlag, w10);
5836  ASSERT_EQUAL_32(NFlag, w11);
5837  ASSERT_EQUAL_32(CFlag, w12);
5838  ASSERT_EQUAL_32(CVFlag, w13);
5839  ASSERT_EQUAL_32(CVFlag, w14);
5840  ASSERT_EQUAL_32(ZCFlag, w15);
5841  ASSERT_EQUAL_32(NFlag, w16);
5842 
5843  TEARDOWN();
5844 }
5845 
5846 
5847 TEST(fcsel) {
5848  INIT_V8();
5849  SETUP();
5850 
5851  START();
5852  __ Mov(x16, 0);
5853  __ Fmov(s16, 1.0);
5854  __ Fmov(s17, 2.0);
5855  __ Fmov(d18, 3.0);
5856  __ Fmov(d19, 4.0);
5857 
5858  __ Cmp(x16, 0);
5859  __ Fcsel(s0, s16, s17, eq);
5860  __ Fcsel(s1, s16, s17, ne);
5861  __ Fcsel(d2, d18, d19, eq);
5862  __ Fcsel(d3, d18, d19, ne);
5863  __ fcsel(s4, s16, s17, al);
5864  __ fcsel(d5, d18, d19, nv);
5865  END();
5866 
5867  RUN();
5868 
5869  ASSERT_EQUAL_FP32(1.0, s0);
5870  ASSERT_EQUAL_FP32(2.0, s1);
5871  ASSERT_EQUAL_FP64(3.0, d2);
5872  ASSERT_EQUAL_FP64(4.0, d3);
5873  ASSERT_EQUAL_FP32(1.0, s4);
5874  ASSERT_EQUAL_FP64(3.0, d5);
5875 
5876  TEARDOWN();
5877 }
5878 
5879 
5880 TEST(fneg) {
5881  INIT_V8();
5882  SETUP();
5883 
5884  START();
5885  __ Fmov(s16, 1.0);
5886  __ Fmov(s17, 0.0);
5887  __ Fmov(s18, kFP32PositiveInfinity);
5888  __ Fmov(d19, 1.0);
5889  __ Fmov(d20, 0.0);
5890  __ Fmov(d21, kFP64PositiveInfinity);
5891 
5892  __ Fneg(s0, s16);
5893  __ Fneg(s1, s0);
5894  __ Fneg(s2, s17);
5895  __ Fneg(s3, s2);
5896  __ Fneg(s4, s18);
5897  __ Fneg(s5, s4);
5898  __ Fneg(d6, d19);
5899  __ Fneg(d7, d6);
5900  __ Fneg(d8, d20);
5901  __ Fneg(d9, d8);
5902  __ Fneg(d10, d21);
5903  __ Fneg(d11, d10);
5904  END();
5905 
5906  RUN();
5907 
5908  ASSERT_EQUAL_FP32(-1.0, s0);
5909  ASSERT_EQUAL_FP32(1.0, s1);
5910  ASSERT_EQUAL_FP32(-0.0, s2);
5911  ASSERT_EQUAL_FP32(0.0, s3);
5912  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
5913  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5914  ASSERT_EQUAL_FP64(-1.0, d6);
5915  ASSERT_EQUAL_FP64(1.0, d7);
5916  ASSERT_EQUAL_FP64(-0.0, d8);
5917  ASSERT_EQUAL_FP64(0.0, d9);
5918  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5919  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5920 
5921  TEARDOWN();
5922 }
5923 
5924 
5925 TEST(fabs) {
5926  INIT_V8();
5927  SETUP();
5928 
5929  START();
5930  __ Fmov(s16, -1.0);
5931  __ Fmov(s17, -0.0);
5932  __ Fmov(s18, kFP32NegativeInfinity);
5933  __ Fmov(d19, -1.0);
5934  __ Fmov(d20, -0.0);
5935  __ Fmov(d21, kFP64NegativeInfinity);
5936 
5937  __ Fabs(s0, s16);
5938  __ Fabs(s1, s0);
5939  __ Fabs(s2, s17);
5940  __ Fabs(s3, s18);
5941  __ Fabs(d4, d19);
5942  __ Fabs(d5, d4);
5943  __ Fabs(d6, d20);
5944  __ Fabs(d7, d21);
5945  END();
5946 
5947  RUN();
5948 
5949  ASSERT_EQUAL_FP32(1.0, s0);
5950  ASSERT_EQUAL_FP32(1.0, s1);
5951  ASSERT_EQUAL_FP32(0.0, s2);
5952  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
5953  ASSERT_EQUAL_FP64(1.0, d4);
5954  ASSERT_EQUAL_FP64(1.0, d5);
5955  ASSERT_EQUAL_FP64(0.0, d6);
5956  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
5957 
5958  TEARDOWN();
5959 }
5960 
5961 
5962 TEST(fsqrt) {
5963  INIT_V8();
5964  SETUP();
5965 
5966  START();
5967  __ Fmov(s16, 0.0);
5968  __ Fmov(s17, 1.0);
5969  __ Fmov(s18, 0.25);
5970  __ Fmov(s19, 65536.0);
5971  __ Fmov(s20, -0.0);
5972  __ Fmov(s21, kFP32PositiveInfinity);
5973  __ Fmov(s22, -1.0);
5974  __ Fmov(d23, 0.0);
5975  __ Fmov(d24, 1.0);
5976  __ Fmov(d25, 0.25);
5977  __ Fmov(d26, 4294967296.0);
5978  __ Fmov(d27, -0.0);
5979  __ Fmov(d28, kFP64PositiveInfinity);
5980  __ Fmov(d29, -1.0);
5981 
5982  __ Fsqrt(s0, s16);
5983  __ Fsqrt(s1, s17);
5984  __ Fsqrt(s2, s18);
5985  __ Fsqrt(s3, s19);
5986  __ Fsqrt(s4, s20);
5987  __ Fsqrt(s5, s21);
5988  __ Fsqrt(s6, s22);
5989  __ Fsqrt(d7, d23);
5990  __ Fsqrt(d8, d24);
5991  __ Fsqrt(d9, d25);
5992  __ Fsqrt(d10, d26);
5993  __ Fsqrt(d11, d27);
5994  __ Fsqrt(d12, d28);
5995  __ Fsqrt(d13, d29);
5996  END();
5997 
5998  RUN();
5999 
6000  ASSERT_EQUAL_FP32(0.0, s0);
6001  ASSERT_EQUAL_FP32(1.0, s1);
6002  ASSERT_EQUAL_FP32(0.5, s2);
6003  ASSERT_EQUAL_FP32(256.0, s3);
6004  ASSERT_EQUAL_FP32(-0.0, s4);
6005  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
6006  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
6007  ASSERT_EQUAL_FP64(0.0, d7);
6008  ASSERT_EQUAL_FP64(1.0, d8);
6009  ASSERT_EQUAL_FP64(0.5, d9);
6010  ASSERT_EQUAL_FP64(65536.0, d10);
6011  ASSERT_EQUAL_FP64(-0.0, d11);
6012  ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
6013  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
6014 
6015  TEARDOWN();
6016 }
6017 
6018 
6019 TEST(frinta) {
6020  INIT_V8();
6021  SETUP();
6022 
6023  START();
6024  __ Fmov(s16, 1.0);
6025  __ Fmov(s17, 1.1);
6026  __ Fmov(s18, 1.5);
6027  __ Fmov(s19, 1.9);
6028  __ Fmov(s20, 2.5);
6029  __ Fmov(s21, -1.5);
6030  __ Fmov(s22, -2.5);
6031  __ Fmov(s23, kFP32PositiveInfinity);
6032  __ Fmov(s24, kFP32NegativeInfinity);
6033  __ Fmov(s25, 0.0);
6034  __ Fmov(s26, -0.0);
6035 
6036  __ Frinta(s0, s16);
6037  __ Frinta(s1, s17);
6038  __ Frinta(s2, s18);
6039  __ Frinta(s3, s19);
6040  __ Frinta(s4, s20);
6041  __ Frinta(s5, s21);
6042  __ Frinta(s6, s22);
6043  __ Frinta(s7, s23);
6044  __ Frinta(s8, s24);
6045  __ Frinta(s9, s25);
6046  __ Frinta(s10, s26);
6047 
6048  __ Fmov(d16, 1.0);
6049  __ Fmov(d17, 1.1);
6050  __ Fmov(d18, 1.5);
6051  __ Fmov(d19, 1.9);
6052  __ Fmov(d20, 2.5);
6053  __ Fmov(d21, -1.5);
6054  __ Fmov(d22, -2.5);
6055  __ Fmov(d23, kFP32PositiveInfinity);
6056  __ Fmov(d24, kFP32NegativeInfinity);
6057  __ Fmov(d25, 0.0);
6058  __ Fmov(d26, -0.0);
6059 
6060  __ Frinta(d11, d16);
6061  __ Frinta(d12, d17);
6062  __ Frinta(d13, d18);
6063  __ Frinta(d14, d19);
6064  __ Frinta(d15, d20);
6065  __ Frinta(d16, d21);
6066  __ Frinta(d17, d22);
6067  __ Frinta(d18, d23);
6068  __ Frinta(d19, d24);
6069  __ Frinta(d20, d25);
6070  __ Frinta(d21, d26);
6071  END();
6072 
6073  RUN();
6074 
6075  ASSERT_EQUAL_FP32(1.0, s0);
6076  ASSERT_EQUAL_FP32(1.0, s1);
6077  ASSERT_EQUAL_FP32(2.0, s2);
6078  ASSERT_EQUAL_FP32(2.0, s3);
6079  ASSERT_EQUAL_FP32(3.0, s4);
6080  ASSERT_EQUAL_FP32(-2.0, s5);
6081  ASSERT_EQUAL_FP32(-3.0, s6);
6082  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6083  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6084  ASSERT_EQUAL_FP32(0.0, s9);
6085  ASSERT_EQUAL_FP32(-0.0, s10);
6086  ASSERT_EQUAL_FP64(1.0, d11);
6087  ASSERT_EQUAL_FP64(1.0, d12);
6088  ASSERT_EQUAL_FP64(2.0, d13);
6089  ASSERT_EQUAL_FP64(2.0, d14);
6090  ASSERT_EQUAL_FP64(3.0, d15);
6091  ASSERT_EQUAL_FP64(-2.0, d16);
6092  ASSERT_EQUAL_FP64(-3.0, d17);
6093  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6094  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6095  ASSERT_EQUAL_FP64(0.0, d20);
6096  ASSERT_EQUAL_FP64(-0.0, d21);
6097 
6098  TEARDOWN();
6099 }
6100 
6101 
6102 TEST(frintn) {
6103  INIT_V8();
6104  SETUP();
6105 
6106  START();
6107  __ Fmov(s16, 1.0);
6108  __ Fmov(s17, 1.1);
6109  __ Fmov(s18, 1.5);
6110  __ Fmov(s19, 1.9);
6111  __ Fmov(s20, 2.5);
6112  __ Fmov(s21, -1.5);
6113  __ Fmov(s22, -2.5);
6114  __ Fmov(s23, kFP32PositiveInfinity);
6115  __ Fmov(s24, kFP32NegativeInfinity);
6116  __ Fmov(s25, 0.0);
6117  __ Fmov(s26, -0.0);
6118 
6119  __ Frintn(s0, s16);
6120  __ Frintn(s1, s17);
6121  __ Frintn(s2, s18);
6122  __ Frintn(s3, s19);
6123  __ Frintn(s4, s20);
6124  __ Frintn(s5, s21);
6125  __ Frintn(s6, s22);
6126  __ Frintn(s7, s23);
6127  __ Frintn(s8, s24);
6128  __ Frintn(s9, s25);
6129  __ Frintn(s10, s26);
6130 
6131  __ Fmov(d16, 1.0);
6132  __ Fmov(d17, 1.1);
6133  __ Fmov(d18, 1.5);
6134  __ Fmov(d19, 1.9);
6135  __ Fmov(d20, 2.5);
6136  __ Fmov(d21, -1.5);
6137  __ Fmov(d22, -2.5);
6138  __ Fmov(d23, kFP32PositiveInfinity);
6139  __ Fmov(d24, kFP32NegativeInfinity);
6140  __ Fmov(d25, 0.0);
6141  __ Fmov(d26, -0.0);
6142 
6143  __ Frintn(d11, d16);
6144  __ Frintn(d12, d17);
6145  __ Frintn(d13, d18);
6146  __ Frintn(d14, d19);
6147  __ Frintn(d15, d20);
6148  __ Frintn(d16, d21);
6149  __ Frintn(d17, d22);
6150  __ Frintn(d18, d23);
6151  __ Frintn(d19, d24);
6152  __ Frintn(d20, d25);
6153  __ Frintn(d21, d26);
6154  END();
6155 
6156  RUN();
6157 
6158  ASSERT_EQUAL_FP32(1.0, s0);
6159  ASSERT_EQUAL_FP32(1.0, s1);
6160  ASSERT_EQUAL_FP32(2.0, s2);
6161  ASSERT_EQUAL_FP32(2.0, s3);
6162  ASSERT_EQUAL_FP32(2.0, s4);
6163  ASSERT_EQUAL_FP32(-2.0, s5);
6164  ASSERT_EQUAL_FP32(-2.0, s6);
6165  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6166  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6167  ASSERT_EQUAL_FP32(0.0, s9);
6168  ASSERT_EQUAL_FP32(-0.0, s10);
6169  ASSERT_EQUAL_FP64(1.0, d11);
6170  ASSERT_EQUAL_FP64(1.0, d12);
6171  ASSERT_EQUAL_FP64(2.0, d13);
6172  ASSERT_EQUAL_FP64(2.0, d14);
6173  ASSERT_EQUAL_FP64(2.0, d15);
6174  ASSERT_EQUAL_FP64(-2.0, d16);
6175  ASSERT_EQUAL_FP64(-2.0, d17);
6176  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6177  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6178  ASSERT_EQUAL_FP64(0.0, d20);
6179  ASSERT_EQUAL_FP64(-0.0, d21);
6180 
6181  TEARDOWN();
6182 }
6183 
6184 
6185 TEST(frintz) {
6186  INIT_V8();
6187  SETUP();
6188 
6189  START();
6190  __ Fmov(s16, 1.0);
6191  __ Fmov(s17, 1.1);
6192  __ Fmov(s18, 1.5);
6193  __ Fmov(s19, 1.9);
6194  __ Fmov(s20, 2.5);
6195  __ Fmov(s21, -1.5);
6196  __ Fmov(s22, -2.5);
6197  __ Fmov(s23, kFP32PositiveInfinity);
6198  __ Fmov(s24, kFP32NegativeInfinity);
6199  __ Fmov(s25, 0.0);
6200  __ Fmov(s26, -0.0);
6201 
6202  __ Frintz(s0, s16);
6203  __ Frintz(s1, s17);
6204  __ Frintz(s2, s18);
6205  __ Frintz(s3, s19);
6206  __ Frintz(s4, s20);
6207  __ Frintz(s5, s21);
6208  __ Frintz(s6, s22);
6209  __ Frintz(s7, s23);
6210  __ Frintz(s8, s24);
6211  __ Frintz(s9, s25);
6212  __ Frintz(s10, s26);
6213 
6214  __ Fmov(d16, 1.0);
6215  __ Fmov(d17, 1.1);
6216  __ Fmov(d18, 1.5);
6217  __ Fmov(d19, 1.9);
6218  __ Fmov(d20, 2.5);
6219  __ Fmov(d21, -1.5);
6220  __ Fmov(d22, -2.5);
6221  __ Fmov(d23, kFP32PositiveInfinity);
6222  __ Fmov(d24, kFP32NegativeInfinity);
6223  __ Fmov(d25, 0.0);
6224  __ Fmov(d26, -0.0);
6225 
6226  __ Frintz(d11, d16);
6227  __ Frintz(d12, d17);
6228  __ Frintz(d13, d18);
6229  __ Frintz(d14, d19);
6230  __ Frintz(d15, d20);
6231  __ Frintz(d16, d21);
6232  __ Frintz(d17, d22);
6233  __ Frintz(d18, d23);
6234  __ Frintz(d19, d24);
6235  __ Frintz(d20, d25);
6236  __ Frintz(d21, d26);
6237  END();
6238 
6239  RUN();
6240 
6241  ASSERT_EQUAL_FP32(1.0, s0);
6242  ASSERT_EQUAL_FP32(1.0, s1);
6243  ASSERT_EQUAL_FP32(1.0, s2);
6244  ASSERT_EQUAL_FP32(1.0, s3);
6245  ASSERT_EQUAL_FP32(2.0, s4);
6246  ASSERT_EQUAL_FP32(-1.0, s5);
6247  ASSERT_EQUAL_FP32(-2.0, s6);
6248  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6249  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6250  ASSERT_EQUAL_FP32(0.0, s9);
6251  ASSERT_EQUAL_FP32(-0.0, s10);
6252  ASSERT_EQUAL_FP64(1.0, d11);
6253  ASSERT_EQUAL_FP64(1.0, d12);
6254  ASSERT_EQUAL_FP64(1.0, d13);
6255  ASSERT_EQUAL_FP64(1.0, d14);
6256  ASSERT_EQUAL_FP64(2.0, d15);
6257  ASSERT_EQUAL_FP64(-1.0, d16);
6258  ASSERT_EQUAL_FP64(-2.0, d17);
6259  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6260  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6261  ASSERT_EQUAL_FP64(0.0, d20);
6262  ASSERT_EQUAL_FP64(-0.0, d21);
6263 
6264  TEARDOWN();
6265 }
6266 
6267 
6268 TEST(fcvt_ds) {
6269  INIT_V8();
6270  SETUP();
6271 
6272  START();
6273  __ Fmov(s16, 1.0);
6274  __ Fmov(s17, 1.1);
6275  __ Fmov(s18, 1.5);
6276  __ Fmov(s19, 1.9);
6277  __ Fmov(s20, 2.5);
6278  __ Fmov(s21, -1.5);
6279  __ Fmov(s22, -2.5);
6280  __ Fmov(s23, kFP32PositiveInfinity);
6281  __ Fmov(s24, kFP32NegativeInfinity);
6282  __ Fmov(s25, 0.0);
6283  __ Fmov(s26, -0.0);
6284  __ Fmov(s27, FLT_MAX);
6285  __ Fmov(s28, FLT_MIN);
6286  __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
6287  __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
6288 
6289  __ Fcvt(d0, s16);
6290  __ Fcvt(d1, s17);
6291  __ Fcvt(d2, s18);
6292  __ Fcvt(d3, s19);
6293  __ Fcvt(d4, s20);
6294  __ Fcvt(d5, s21);
6295  __ Fcvt(d6, s22);
6296  __ Fcvt(d7, s23);
6297  __ Fcvt(d8, s24);
6298  __ Fcvt(d9, s25);
6299  __ Fcvt(d10, s26);
6300  __ Fcvt(d11, s27);
6301  __ Fcvt(d12, s28);
6302  __ Fcvt(d13, s29);
6303  __ Fcvt(d14, s30);
6304  END();
6305 
6306  RUN();
6307 
6308  ASSERT_EQUAL_FP64(1.0f, d0);
6309  ASSERT_EQUAL_FP64(1.1f, d1);
6310  ASSERT_EQUAL_FP64(1.5f, d2);
6311  ASSERT_EQUAL_FP64(1.9f, d3);
6312  ASSERT_EQUAL_FP64(2.5f, d4);
6313  ASSERT_EQUAL_FP64(-1.5f, d5);
6314  ASSERT_EQUAL_FP64(-2.5f, d6);
6315  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6316  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
6317  ASSERT_EQUAL_FP64(0.0f, d9);
6318  ASSERT_EQUAL_FP64(-0.0f, d10);
6319  ASSERT_EQUAL_FP64(FLT_MAX, d11);
6320  ASSERT_EQUAL_FP64(FLT_MIN, d12);
6321 
6322  // Check that the NaN payload is preserved according to ARM64 conversion
6323  // rules:
6324  // - The sign bit is preserved.
6325  // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6326  // - The remaining mantissa bits are copied until they run out.
6327  // - The low-order bits that haven't already been assigned are set to 0.
6328  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6329  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6330 
6331  TEARDOWN();
6332 }
6333 
6334 
6335 TEST(fcvt_sd) {
6336  INIT_V8();
6337  // There are a huge number of corner-cases to check, so this test iterates
6338  // through a list. The list is then negated and checked again (since the sign
6339  // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6340  // negative values.
6341  //
6342  // Note that this test only checks ties-to-even rounding, because that is all
6343  // that the simulator supports.
6344  struct {double in; float expected;} test[] = {
6345  // Check some simple conversions.
6346  {0.0, 0.0f},
6347  {1.0, 1.0f},
6348  {1.5, 1.5f},
6349  {2.0, 2.0f},
6350  {FLT_MAX, FLT_MAX},
6351  // - The smallest normalized float.
6352  {pow(2.0, -126), powf(2, -126)},
6353  // - Normal floats that need (ties-to-even) rounding.
6354  // For normalized numbers:
6355  // bit 29 (0x0000000020000000) is the lowest-order bit which will
6356  // fit in the float's mantissa.
6357  {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6358  {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6359  {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6360  {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6361  {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6362  {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6363  {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6364  {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6365  {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6366  {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6367  {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6368  {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6369  {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6370  // - A mantissa that overflows into the exponent during rounding.
6371  {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6372  // - The largest double that rounds to a normal float.
6373  {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6374 
6375  // Doubles that are too big for a float.
6376  {kFP64PositiveInfinity, kFP32PositiveInfinity},
6377  {DBL_MAX, kFP32PositiveInfinity},
6378  // - The smallest exponent that's too big for a float.
6379  {pow(2.0, 128), kFP32PositiveInfinity},
6380  // - This exponent is in range, but the value rounds to infinity.
6381  {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6382 
6383  // Doubles that are too small for a float.
6384  // - The smallest (subnormal) double.
6385  {DBL_MIN, 0.0},
6386  // - The largest double which is too small for a subnormal float.
6387  {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6388 
6389  // Normal doubles that become subnormal floats.
6390  // - The largest subnormal float.
6391  {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6392  // - The smallest subnormal float.
6393  {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6394  // - Subnormal floats that need (ties-to-even) rounding.
6395  // For these subnormals:
6396  // bit 34 (0x0000000400000000) is the lowest-order bit which will
6397  // fit in the float's mantissa.
6398  {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6399  {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6400  {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6401  {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6402  {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6403  {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6404  {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6405  {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6406  {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6407  {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6408  {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6409  {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6410  {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6411  // - The smallest double which rounds up to become a subnormal float.
6412  {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6413 
6414  // Check NaN payload preservation.
6415  {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6416  {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6417  // - Signalling NaNs become quiet NaNs.
6418  {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6419  {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6420  {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6421  };
6422  int count = sizeof(test) / sizeof(test[0]);
6423 
6424  for (int i = 0; i < count; i++) {
6425  double in = test[i].in;
6426  float expected = test[i].expected;
6427 
6428  // We only expect positive input.
6429  ASSERT(std::signbit(in) == 0);
6430  ASSERT(std::signbit(expected) == 0);
6431 
6432  SETUP();
6433  START();
6434 
6435  __ Fmov(d10, in);
6436  __ Fcvt(s20, d10);
6437 
6438  __ Fmov(d11, -in);
6439  __ Fcvt(s21, d11);
6440 
6441  END();
6442  RUN();
6443  ASSERT_EQUAL_FP32(expected, s20);
6444  ASSERT_EQUAL_FP32(-expected, s21);
6445  TEARDOWN();
6446  }
6447 }
6448 
6449 
6450 TEST(fcvtas) {
6451  INIT_V8();
6452  SETUP();
6453 
6454  START();
6455  __ Fmov(s0, 1.0);
6456  __ Fmov(s1, 1.1);
6457  __ Fmov(s2, 2.5);
6458  __ Fmov(s3, -2.5);
6459  __ Fmov(s4, kFP32PositiveInfinity);
6460  __ Fmov(s5, kFP32NegativeInfinity);
6461  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6462  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6463  __ Fmov(d8, 1.0);
6464  __ Fmov(d9, 1.1);
6465  __ Fmov(d10, 2.5);
6466  __ Fmov(d11, -2.5);
6467  __ Fmov(d12, kFP64PositiveInfinity);
6468  __ Fmov(d13, kFP64NegativeInfinity);
6469  __ Fmov(d14, kWMaxInt - 1);
6470  __ Fmov(d15, kWMinInt + 1);
6471  __ Fmov(s17, 1.1);
6472  __ Fmov(s18, 2.5);
6473  __ Fmov(s19, -2.5);
6474  __ Fmov(s20, kFP32PositiveInfinity);
6475  __ Fmov(s21, kFP32NegativeInfinity);
6476  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6477  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6478  __ Fmov(d24, 1.1);
6479  __ Fmov(d25, 2.5);
6480  __ Fmov(d26, -2.5);
6481  __ Fmov(d27, kFP64PositiveInfinity);
6482  __ Fmov(d28, kFP64NegativeInfinity);
6483  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6484  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6485 
6486  __ Fcvtas(w0, s0);
6487  __ Fcvtas(w1, s1);
6488  __ Fcvtas(w2, s2);
6489  __ Fcvtas(w3, s3);
6490  __ Fcvtas(w4, s4);
6491  __ Fcvtas(w5, s5);
6492  __ Fcvtas(w6, s6);
6493  __ Fcvtas(w7, s7);
6494  __ Fcvtas(w8, d8);
6495  __ Fcvtas(w9, d9);
6496  __ Fcvtas(w10, d10);
6497  __ Fcvtas(w11, d11);
6498  __ Fcvtas(w12, d12);
6499  __ Fcvtas(w13, d13);
6500  __ Fcvtas(w14, d14);
6501  __ Fcvtas(w15, d15);
6502  __ Fcvtas(x17, s17);
6503  __ Fcvtas(x18, s18);
6504  __ Fcvtas(x19, s19);
6505  __ Fcvtas(x20, s20);
6506  __ Fcvtas(x21, s21);
6507  __ Fcvtas(x22, s22);
6508  __ Fcvtas(x23, s23);
6509  __ Fcvtas(x24, d24);
6510  __ Fcvtas(x25, d25);
6511  __ Fcvtas(x26, d26);
6512  __ Fcvtas(x27, d27);
6513  __ Fcvtas(x28, d28);
6514  __ Fcvtas(x29, d29);
6515  __ Fcvtas(x30, d30);
6516  END();
6517 
6518  RUN();
6519 
6520  ASSERT_EQUAL_64(1, x0);
6521  ASSERT_EQUAL_64(1, x1);
6522  ASSERT_EQUAL_64(3, x2);
6523  ASSERT_EQUAL_64(0xfffffffd, x3);
6524  ASSERT_EQUAL_64(0x7fffffff, x4);
6525  ASSERT_EQUAL_64(0x80000000, x5);
6526  ASSERT_EQUAL_64(0x7fffff80, x6);
6527  ASSERT_EQUAL_64(0x80000080, x7);
6528  ASSERT_EQUAL_64(1, x8);
6529  ASSERT_EQUAL_64(1, x9);
6530  ASSERT_EQUAL_64(3, x10);
6531  ASSERT_EQUAL_64(0xfffffffd, x11);
6532  ASSERT_EQUAL_64(0x7fffffff, x12);
6533  ASSERT_EQUAL_64(0x80000000, x13);
6534  ASSERT_EQUAL_64(0x7ffffffe, x14);
6535  ASSERT_EQUAL_64(0x80000001, x15);
6536  ASSERT_EQUAL_64(1, x17);
6537  ASSERT_EQUAL_64(3, x18);
6538  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
6539  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6540  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6541  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6542  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6543  ASSERT_EQUAL_64(1, x24);
6544  ASSERT_EQUAL_64(3, x25);
6545  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
6546  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6547  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6548  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6549  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6550 
6551  TEARDOWN();
6552 }
6553 
6554 
6555 TEST(fcvtau) {
6556  INIT_V8();
6557  SETUP();
6558 
6559  START();
6560  __ Fmov(s0, 1.0);
6561  __ Fmov(s1, 1.1);
6562  __ Fmov(s2, 2.5);
6563  __ Fmov(s3, -2.5);
6564  __ Fmov(s4, kFP32PositiveInfinity);
6565  __ Fmov(s5, kFP32NegativeInfinity);
6566  __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6567  __ Fmov(d8, 1.0);
6568  __ Fmov(d9, 1.1);
6569  __ Fmov(d10, 2.5);
6570  __ Fmov(d11, -2.5);
6571  __ Fmov(d12, kFP64PositiveInfinity);
6572  __ Fmov(d13, kFP64NegativeInfinity);
6573  __ Fmov(d14, 0xfffffffe);
6574  __ Fmov(s16, 1.0);
6575  __ Fmov(s17, 1.1);
6576  __ Fmov(s18, 2.5);
6577  __ Fmov(s19, -2.5);
6578  __ Fmov(s20, kFP32PositiveInfinity);
6579  __ Fmov(s21, kFP32NegativeInfinity);
6580  __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6581  __ Fmov(d24, 1.1);
6582  __ Fmov(d25, 2.5);
6583  __ Fmov(d26, -2.5);
6584  __ Fmov(d27, kFP64PositiveInfinity);
6585  __ Fmov(d28, kFP64NegativeInfinity);
6586  __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
6587  __ Fmov(s30, 0x100000000UL);
6588 
6589  __ Fcvtau(w0, s0);
6590  __ Fcvtau(w1, s1);
6591  __ Fcvtau(w2, s2);
6592  __ Fcvtau(w3, s3);
6593  __ Fcvtau(w4, s4);
6594  __ Fcvtau(w5, s5);
6595  __ Fcvtau(w6, s6);
6596  __ Fcvtau(w8, d8);
6597  __ Fcvtau(w9, d9);
6598  __ Fcvtau(w10, d10);
6599  __ Fcvtau(w11, d11);
6600  __ Fcvtau(w12, d12);
6601  __ Fcvtau(w13, d13);
6602  __ Fcvtau(w14, d14);
6603  __ Fcvtau(w15, d15);
6604  __ Fcvtau(x16, s16);
6605  __ Fcvtau(x17, s17);
6606  __ Fcvtau(x18, s18);
6607  __ Fcvtau(x19, s19);
6608  __ Fcvtau(x20, s20);
6609  __ Fcvtau(x21, s21);
6610  __ Fcvtau(x22, s22);
6611  __ Fcvtau(x24, d24);
6612  __ Fcvtau(x25, d25);
6613  __ Fcvtau(x26, d26);
6614  __ Fcvtau(x27, d27);
6615  __ Fcvtau(x28, d28);
6616  __ Fcvtau(x29, d29);
6617  __ Fcvtau(w30, s30);
6618  END();
6619 
6620  RUN();
6621 
6622  ASSERT_EQUAL_64(1, x0);
6623  ASSERT_EQUAL_64(1, x1);
6624  ASSERT_EQUAL_64(3, x2);
6625  ASSERT_EQUAL_64(0, x3);
6626  ASSERT_EQUAL_64(0xffffffff, x4);
6627  ASSERT_EQUAL_64(0, x5);
6628  ASSERT_EQUAL_64(0xffffff00, x6);
6629  ASSERT_EQUAL_64(1, x8);
6630  ASSERT_EQUAL_64(1, x9);
6631  ASSERT_EQUAL_64(3, x10);
6632  ASSERT_EQUAL_64(0, x11);
6633  ASSERT_EQUAL_64(0xffffffff, x12);
6634  ASSERT_EQUAL_64(0, x13);
6635  ASSERT_EQUAL_64(0xfffffffe, x14);
6636  ASSERT_EQUAL_64(1, x16);
6637  ASSERT_EQUAL_64(1, x17);
6638  ASSERT_EQUAL_64(3, x18);
6639  ASSERT_EQUAL_64(0, x19);
6640  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6641  ASSERT_EQUAL_64(0, x21);
6642  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6643  ASSERT_EQUAL_64(1, x24);
6644  ASSERT_EQUAL_64(3, x25);
6645  ASSERT_EQUAL_64(0, x26);
6646  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6647  ASSERT_EQUAL_64(0, x28);
6648  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6649  ASSERT_EQUAL_64(0xffffffff, x30);
6650 
6651  TEARDOWN();
6652 }
6653 
6654 
6655 TEST(fcvtms) {
6656  INIT_V8();
6657  SETUP();
6658 
6659  START();
6660  __ Fmov(s0, 1.0);
6661  __ Fmov(s1, 1.1);
6662  __ Fmov(s2, 1.5);
6663  __ Fmov(s3, -1.5);
6664  __ Fmov(s4, kFP32PositiveInfinity);
6665  __ Fmov(s5, kFP32NegativeInfinity);
6666  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6667  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6668  __ Fmov(d8, 1.0);
6669  __ Fmov(d9, 1.1);
6670  __ Fmov(d10, 1.5);
6671  __ Fmov(d11, -1.5);
6672  __ Fmov(d12, kFP64PositiveInfinity);
6673  __ Fmov(d13, kFP64NegativeInfinity);
6674  __ Fmov(d14, kWMaxInt - 1);
6675  __ Fmov(d15, kWMinInt + 1);
6676  __ Fmov(s17, 1.1);
6677  __ Fmov(s18, 1.5);
6678  __ Fmov(s19, -1.5);
6679  __ Fmov(s20, kFP32PositiveInfinity);
6680  __ Fmov(s21, kFP32NegativeInfinity);
6681  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6682  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6683  __ Fmov(d24, 1.1);
6684  __ Fmov(d25, 1.5);
6685  __ Fmov(d26, -1.5);
6686  __ Fmov(d27, kFP64PositiveInfinity);
6687  __ Fmov(d28, kFP64NegativeInfinity);
6688  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6689  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6690 
6691  __ Fcvtms(w0, s0);
6692  __ Fcvtms(w1, s1);
6693  __ Fcvtms(w2, s2);
6694  __ Fcvtms(w3, s3);
6695  __ Fcvtms(w4, s4);
6696  __ Fcvtms(w5, s5);
6697  __ Fcvtms(w6, s6);
6698  __ Fcvtms(w7, s7);
6699  __ Fcvtms(w8, d8);
6700  __ Fcvtms(w9, d9);
6701  __ Fcvtms(w10, d10);
6702  __ Fcvtms(w11, d11);
6703  __ Fcvtms(w12, d12);
6704  __ Fcvtms(w13, d13);
6705  __ Fcvtms(w14, d14);
6706  __ Fcvtms(w15, d15);
6707  __ Fcvtms(x17, s17);
6708  __ Fcvtms(x18, s18);
6709  __ Fcvtms(x19, s19);
6710  __ Fcvtms(x20, s20);
6711  __ Fcvtms(x21, s21);
6712  __ Fcvtms(x22, s22);
6713  __ Fcvtms(x23, s23);
6714  __ Fcvtms(x24, d24);
6715  __ Fcvtms(x25, d25);
6716  __ Fcvtms(x26, d26);
6717  __ Fcvtms(x27, d27);
6718  __ Fcvtms(x28, d28);
6719  __ Fcvtms(x29, d29);
6720  __ Fcvtms(x30, d30);
6721  END();
6722 
6723  RUN();
6724 
6725  ASSERT_EQUAL_64(1, x0);
6726  ASSERT_EQUAL_64(1, x1);
6727  ASSERT_EQUAL_64(1, x2);
6728  ASSERT_EQUAL_64(0xfffffffe, x3);
6729  ASSERT_EQUAL_64(0x7fffffff, x4);
6730  ASSERT_EQUAL_64(0x80000000, x5);
6731  ASSERT_EQUAL_64(0x7fffff80, x6);
6732  ASSERT_EQUAL_64(0x80000080, x7);
6733  ASSERT_EQUAL_64(1, x8);
6734  ASSERT_EQUAL_64(1, x9);
6735  ASSERT_EQUAL_64(1, x10);
6736  ASSERT_EQUAL_64(0xfffffffe, x11);
6737  ASSERT_EQUAL_64(0x7fffffff, x12);
6738  ASSERT_EQUAL_64(0x80000000, x13);
6739  ASSERT_EQUAL_64(0x7ffffffe, x14);
6740  ASSERT_EQUAL_64(0x80000001, x15);
6741  ASSERT_EQUAL_64(1, x17);
6742  ASSERT_EQUAL_64(1, x18);
6743  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6744  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6745  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6746  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6747  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6748  ASSERT_EQUAL_64(1, x24);
6749  ASSERT_EQUAL_64(1, x25);
6750  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6751  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6752  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6753  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6754  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6755 
6756  TEARDOWN();
6757 }
6758 
6759 
6760 TEST(fcvtmu) {
6761  INIT_V8();
6762  SETUP();
6763 
6764  START();
6765  __ Fmov(s0, 1.0);
6766  __ Fmov(s1, 1.1);
6767  __ Fmov(s2, 1.5);
6768  __ Fmov(s3, -1.5);
6769  __ Fmov(s4, kFP32PositiveInfinity);
6770  __ Fmov(s5, kFP32NegativeInfinity);
6771  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6772  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6773  __ Fmov(d8, 1.0);
6774  __ Fmov(d9, 1.1);
6775  __ Fmov(d10, 1.5);
6776  __ Fmov(d11, -1.5);
6777  __ Fmov(d12, kFP64PositiveInfinity);
6778  __ Fmov(d13, kFP64NegativeInfinity);
6779  __ Fmov(d14, kWMaxInt - 1);
6780  __ Fmov(d15, kWMinInt + 1);
6781  __ Fmov(s17, 1.1);
6782  __ Fmov(s18, 1.5);
6783  __ Fmov(s19, -1.5);
6784  __ Fmov(s20, kFP32PositiveInfinity);
6785  __ Fmov(s21, kFP32NegativeInfinity);
6786  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6787  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6788  __ Fmov(d24, 1.1);
6789  __ Fmov(d25, 1.5);
6790  __ Fmov(d26, -1.5);
6791  __ Fmov(d27, kFP64PositiveInfinity);
6792  __ Fmov(d28, kFP64NegativeInfinity);
6793  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6794  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6795 
6796  __ Fcvtmu(w0, s0);
6797  __ Fcvtmu(w1, s1);
6798  __ Fcvtmu(w2, s2);
6799  __ Fcvtmu(w3, s3);
6800  __ Fcvtmu(w4, s4);
6801  __ Fcvtmu(w5, s5);
6802  __ Fcvtmu(w6, s6);
6803  __ Fcvtmu(w7, s7);
6804  __ Fcvtmu(w8, d8);
6805  __ Fcvtmu(w9, d9);
6806  __ Fcvtmu(w10, d10);
6807  __ Fcvtmu(w11, d11);
6808  __ Fcvtmu(w12, d12);
6809  __ Fcvtmu(w13, d13);
6810  __ Fcvtmu(w14, d14);
6811  __ Fcvtmu(x17, s17);
6812  __ Fcvtmu(x18, s18);
6813  __ Fcvtmu(x19, s19);
6814  __ Fcvtmu(x20, s20);
6815  __ Fcvtmu(x21, s21);
6816  __ Fcvtmu(x22, s22);
6817  __ Fcvtmu(x23, s23);
6818  __ Fcvtmu(x24, d24);
6819  __ Fcvtmu(x25, d25);
6820  __ Fcvtmu(x26, d26);
6821  __ Fcvtmu(x27, d27);
6822  __ Fcvtmu(x28, d28);
6823  __ Fcvtmu(x29, d29);
6824  __ Fcvtmu(x30, d30);
6825  END();
6826 
6827  RUN();
6828 
6829  ASSERT_EQUAL_64(1, x0);
6830  ASSERT_EQUAL_64(1, x1);
6831  ASSERT_EQUAL_64(1, x2);
6832  ASSERT_EQUAL_64(0, x3);
6833  ASSERT_EQUAL_64(0xffffffff, x4);
6834  ASSERT_EQUAL_64(0, x5);
6835  ASSERT_EQUAL_64(0x7fffff80, x6);
6836  ASSERT_EQUAL_64(0, x7);
6837  ASSERT_EQUAL_64(1, x8);
6838  ASSERT_EQUAL_64(1, x9);
6839  ASSERT_EQUAL_64(1, x10);
6840  ASSERT_EQUAL_64(0, x11);
6841  ASSERT_EQUAL_64(0xffffffff, x12);
6842  ASSERT_EQUAL_64(0, x13);
6843  ASSERT_EQUAL_64(0x7ffffffe, x14);
6844  ASSERT_EQUAL_64(1, x17);
6845  ASSERT_EQUAL_64(1, x18);
6846  ASSERT_EQUAL_64(0x0UL, x19);
6847  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6848  ASSERT_EQUAL_64(0x0UL, x21);
6849  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6850  ASSERT_EQUAL_64(0x0UL, x23);
6851  ASSERT_EQUAL_64(1, x24);
6852  ASSERT_EQUAL_64(1, x25);
6853  ASSERT_EQUAL_64(0x0UL, x26);
6854  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6855  ASSERT_EQUAL_64(0x0UL, x28);
6856  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6857  ASSERT_EQUAL_64(0x0UL, x30);
6858 
6859  TEARDOWN();
6860 }
6861 
6862 
6863 TEST(fcvtns) {
6864  INIT_V8();
6865  SETUP();
6866 
6867  START();
6868  __ Fmov(s0, 1.0);
6869  __ Fmov(s1, 1.1);
6870  __ Fmov(s2, 1.5);
6871  __ Fmov(s3, -1.5);
6872  __ Fmov(s4, kFP32PositiveInfinity);
6873  __ Fmov(s5, kFP32NegativeInfinity);
6874  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6875  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6876  __ Fmov(d8, 1.0);
6877  __ Fmov(d9, 1.1);
6878  __ Fmov(d10, 1.5);
6879  __ Fmov(d11, -1.5);
6880  __ Fmov(d12, kFP64PositiveInfinity);
6881  __ Fmov(d13, kFP64NegativeInfinity);
6882  __ Fmov(d14, kWMaxInt - 1);
6883  __ Fmov(d15, kWMinInt + 1);
6884  __ Fmov(s17, 1.1);
6885  __ Fmov(s18, 1.5);
6886  __ Fmov(s19, -1.5);
6887  __ Fmov(s20, kFP32PositiveInfinity);
6888  __ Fmov(s21, kFP32NegativeInfinity);
6889  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
6890  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6891  __ Fmov(d24, 1.1);
6892  __ Fmov(d25, 1.5);
6893  __ Fmov(d26, -1.5);
6894  __ Fmov(d27, kFP64PositiveInfinity);
6895  __ Fmov(d28, kFP64NegativeInfinity);
6896  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
6897  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6898 
6899  __ Fcvtns(w0, s0);
6900  __ Fcvtns(w1, s1);
6901  __ Fcvtns(w2, s2);
6902  __ Fcvtns(w3, s3);
6903  __ Fcvtns(w4, s4);
6904  __ Fcvtns(w5, s5);
6905  __ Fcvtns(w6, s6);
6906  __ Fcvtns(w7, s7);
6907  __ Fcvtns(w8, d8);
6908  __ Fcvtns(w9, d9);
6909  __ Fcvtns(w10, d10);
6910  __ Fcvtns(w11, d11);
6911  __ Fcvtns(w12, d12);
6912  __ Fcvtns(w13, d13);
6913  __ Fcvtns(w14, d14);
6914  __ Fcvtns(w15, d15);
6915  __ Fcvtns(x17, s17);
6916  __ Fcvtns(x18, s18);
6917  __ Fcvtns(x19, s19);
6918  __ Fcvtns(x20, s20);
6919  __ Fcvtns(x21, s21);
6920  __ Fcvtns(x22, s22);
6921  __ Fcvtns(x23, s23);
6922  __ Fcvtns(x24, d24);
6923  __ Fcvtns(x25, d25);
6924  __ Fcvtns(x26, d26);
6925  __ Fcvtns(x27, d27);
6926 // __ Fcvtns(x28, d28);
6927  __ Fcvtns(x29, d29);
6928  __ Fcvtns(x30, d30);
6929  END();
6930 
6931  RUN();
6932 
6933  ASSERT_EQUAL_64(1, x0);
6934  ASSERT_EQUAL_64(1, x1);
6935  ASSERT_EQUAL_64(2, x2);
6936  ASSERT_EQUAL_64(0xfffffffe, x3);
6937  ASSERT_EQUAL_64(0x7fffffff, x4);
6938  ASSERT_EQUAL_64(0x80000000, x5);
6939  ASSERT_EQUAL_64(0x7fffff80, x6);
6940  ASSERT_EQUAL_64(0x80000080, x7);
6941  ASSERT_EQUAL_64(1, x8);
6942  ASSERT_EQUAL_64(1, x9);
6943  ASSERT_EQUAL_64(2, x10);
6944  ASSERT_EQUAL_64(0xfffffffe, x11);
6945  ASSERT_EQUAL_64(0x7fffffff, x12);
6946  ASSERT_EQUAL_64(0x80000000, x13);
6947  ASSERT_EQUAL_64(0x7ffffffe, x14);
6948  ASSERT_EQUAL_64(0x80000001, x15);
6949  ASSERT_EQUAL_64(1, x17);
6950  ASSERT_EQUAL_64(2, x18);
6951  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6952  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6953  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6954  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6955  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6956  ASSERT_EQUAL_64(1, x24);
6957  ASSERT_EQUAL_64(2, x25);
6958  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6959  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6960 // ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6961  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6962  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6963 
6964  TEARDOWN();
6965 }
6966 
6967 
6968 TEST(fcvtnu) {
6969  INIT_V8();
6970  SETUP();
6971 
6972  START();
6973  __ Fmov(s0, 1.0);
6974  __ Fmov(s1, 1.1);
6975  __ Fmov(s2, 1.5);
6976  __ Fmov(s3, -1.5);
6977  __ Fmov(s4, kFP32PositiveInfinity);
6978  __ Fmov(s5, kFP32NegativeInfinity);
6979  __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6980  __ Fmov(d8, 1.0);
6981  __ Fmov(d9, 1.1);
6982  __ Fmov(d10, 1.5);
6983  __ Fmov(d11, -1.5);
6984  __ Fmov(d12, kFP64PositiveInfinity);
6985  __ Fmov(d13, kFP64NegativeInfinity);
6986  __ Fmov(d14, 0xfffffffe);
6987  __ Fmov(s16, 1.0);
6988  __ Fmov(s17, 1.1);
6989  __ Fmov(s18, 1.5);
6990  __ Fmov(s19, -1.5);
6991  __ Fmov(s20, kFP32PositiveInfinity);
6992  __ Fmov(s21, kFP32NegativeInfinity);
6993  __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
6994  __ Fmov(d24, 1.1);
6995  __ Fmov(d25, 1.5);
6996  __ Fmov(d26, -1.5);
6997  __ Fmov(d27, kFP64PositiveInfinity);
6998  __ Fmov(d28, kFP64NegativeInfinity);
6999  __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
7000  __ Fmov(s30, 0x100000000UL);
7001 
7002  __ Fcvtnu(w0, s0);
7003  __ Fcvtnu(w1, s1);
7004  __ Fcvtnu(w2, s2);
7005  __ Fcvtnu(w3, s3);
7006  __ Fcvtnu(w4, s4);
7007  __ Fcvtnu(w5, s5);
7008  __ Fcvtnu(w6, s6);
7009  __ Fcvtnu(w8, d8);
7010  __ Fcvtnu(w9, d9);
7011  __ Fcvtnu(w10, d10);
7012  __ Fcvtnu(w11, d11);
7013  __ Fcvtnu(w12, d12);
7014  __ Fcvtnu(w13, d13);
7015  __ Fcvtnu(w14, d14);
7016  __ Fcvtnu(w15, d15);
7017  __ Fcvtnu(x16, s16);
7018  __ Fcvtnu(x17, s17);
7019  __ Fcvtnu(x18, s18);
7020  __ Fcvtnu(x19, s19);
7021  __ Fcvtnu(x20, s20);
7022  __ Fcvtnu(x21, s21);
7023  __ Fcvtnu(x22, s22);
7024  __ Fcvtnu(x24, d24);
7025  __ Fcvtnu(x25, d25);
7026  __ Fcvtnu(x26, d26);
7027  __ Fcvtnu(x27, d27);
7028 // __ Fcvtnu(x28, d28);
7029  __ Fcvtnu(x29, d29);
7030  __ Fcvtnu(w30, s30);
7031  END();
7032 
7033  RUN();
7034 
7035  ASSERT_EQUAL_64(1, x0);
7036  ASSERT_EQUAL_64(1, x1);
7037  ASSERT_EQUAL_64(2, x2);
7038  ASSERT_EQUAL_64(0, x3);
7039  ASSERT_EQUAL_64(0xffffffff, x4);
7040  ASSERT_EQUAL_64(0, x5);
7041  ASSERT_EQUAL_64(0xffffff00, x6);
7042  ASSERT_EQUAL_64(1, x8);
7043  ASSERT_EQUAL_64(1, x9);
7044  ASSERT_EQUAL_64(2, x10);
7045  ASSERT_EQUAL_64(0, x11);
7046  ASSERT_EQUAL_64(0xffffffff, x12);
7047  ASSERT_EQUAL_64(0, x13);
7048  ASSERT_EQUAL_64(0xfffffffe, x14);
7049  ASSERT_EQUAL_64(1, x16);
7050  ASSERT_EQUAL_64(1, x17);
7051  ASSERT_EQUAL_64(2, x18);
7052  ASSERT_EQUAL_64(0, x19);
7053  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7054  ASSERT_EQUAL_64(0, x21);
7055  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
7056  ASSERT_EQUAL_64(1, x24);
7057  ASSERT_EQUAL_64(2, x25);
7058  ASSERT_EQUAL_64(0, x26);
7059  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7060 // ASSERT_EQUAL_64(0, x28);
7061  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
7062  ASSERT_EQUAL_64(0xffffffff, x30);
7063 
7064  TEARDOWN();
7065 }
7066 
7067 
7068 TEST(fcvtzs) {
7069  INIT_V8();
7070  SETUP();
7071 
7072  START();
7073  __ Fmov(s0, 1.0);
7074  __ Fmov(s1, 1.1);
7075  __ Fmov(s2, 1.5);
7076  __ Fmov(s3, -1.5);
7077  __ Fmov(s4, kFP32PositiveInfinity);
7078  __ Fmov(s5, kFP32NegativeInfinity);
7079  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7080  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
7081  __ Fmov(d8, 1.0);
7082  __ Fmov(d9, 1.1);
7083  __ Fmov(d10, 1.5);
7084  __ Fmov(d11, -1.5);
7085  __ Fmov(d12, kFP64PositiveInfinity);
7086  __ Fmov(d13, kFP64NegativeInfinity);
7087  __ Fmov(d14, kWMaxInt - 1);
7088  __ Fmov(d15, kWMinInt + 1);
7089  __ Fmov(s17, 1.1);
7090  __ Fmov(s18, 1.5);
7091  __ Fmov(s19, -1.5);
7092  __ Fmov(s20, kFP32PositiveInfinity);
7093  __ Fmov(s21, kFP32NegativeInfinity);
7094  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
7095  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
7096  __ Fmov(d24, 1.1);
7097  __ Fmov(d25, 1.5);
7098  __ Fmov(d26, -1.5);
7099  __ Fmov(d27, kFP64PositiveInfinity);
7100  __ Fmov(d28, kFP64NegativeInfinity);
7101  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
7102  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
7103 
7104  __ Fcvtzs(w0, s0);
7105  __ Fcvtzs(w1, s1);
7106  __ Fcvtzs(w2, s2);
7107  __ Fcvtzs(w3, s3);
7108  __ Fcvtzs(w4, s4);
7109  __ Fcvtzs(w5, s5);
7110  __ Fcvtzs(w6, s6);
7111  __ Fcvtzs(w7, s7);
7112  __ Fcvtzs(w8, d8);
7113  __ Fcvtzs(w9, d9);
7114  __ Fcvtzs(w10, d10);
7115  __ Fcvtzs(w11, d11);
7116  __ Fcvtzs(w12, d12);
7117  __ Fcvtzs(w13, d13);
7118  __ Fcvtzs(w14, d14);
7119  __ Fcvtzs(w15, d15);
7120  __ Fcvtzs(x17, s17);
7121  __ Fcvtzs(x18, s18);
7122  __ Fcvtzs(x19, s19);
7123  __ Fcvtzs(x20, s20);
7124  __ Fcvtzs(x21, s21);
7125  __ Fcvtzs(x22, s22);
7126  __ Fcvtzs(x23, s23);
7127  __ Fcvtzs(x24, d24);
7128  __ Fcvtzs(x25, d25);
7129  __ Fcvtzs(x26, d26);
7130  __ Fcvtzs(x27, d27);
7131  __ Fcvtzs(x28, d28);
7132  __ Fcvtzs(x29, d29);
7133  __ Fcvtzs(x30, d30);
7134  END();
7135 
7136  RUN();
7137 
7138  ASSERT_EQUAL_64(1, x0);
7139  ASSERT_EQUAL_64(1, x1);
7140  ASSERT_EQUAL_64(1, x2);
7141  ASSERT_EQUAL_64(0xffffffff, x3);
7142  ASSERT_EQUAL_64(0x7fffffff, x4);
7143  ASSERT_EQUAL_64(0x80000000, x5);
7144  ASSERT_EQUAL_64(0x7fffff80, x6);
7145  ASSERT_EQUAL_64(0x80000080, x7);
7146  ASSERT_EQUAL_64(1, x8);
7147  ASSERT_EQUAL_64(1, x9);
7148  ASSERT_EQUAL_64(1, x10);
7149  ASSERT_EQUAL_64(0xffffffff, x11);
7150  ASSERT_EQUAL_64(0x7fffffff, x12);
7151  ASSERT_EQUAL_64(0x80000000, x13);
7152  ASSERT_EQUAL_64(0x7ffffffe, x14);
7153  ASSERT_EQUAL_64(0x80000001, x15);
7154  ASSERT_EQUAL_64(1, x17);
7155  ASSERT_EQUAL_64(1, x18);
7156  ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
7157  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
7158  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
7159  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7160  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
7161  ASSERT_EQUAL_64(1, x24);
7162  ASSERT_EQUAL_64(1, x25);
7163  ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
7164  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
7165  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
7166  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7167  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
7168 
7169  TEARDOWN();
7170 }
7171 
7172 
7173 TEST(fcvtzu) {
7174  INIT_V8();
7175  SETUP();
7176 
7177  START();
7178  __ Fmov(s0, 1.0);
7179  __ Fmov(s1, 1.1);
7180  __ Fmov(s2, 1.5);
7181  __ Fmov(s3, -1.5);
7182  __ Fmov(s4, kFP32PositiveInfinity);
7183  __ Fmov(s5, kFP32NegativeInfinity);
7184  __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7185  __ Fneg(s7, s6); // Smallest float > INT32_MIN.
7186  __ Fmov(d8, 1.0);
7187  __ Fmov(d9, 1.1);
7188  __ Fmov(d10, 1.5);
7189  __ Fmov(d11, -1.5);
7190  __ Fmov(d12, kFP64PositiveInfinity);
7191  __ Fmov(d13, kFP64NegativeInfinity);
7192  __ Fmov(d14, kWMaxInt - 1);
7193  __ Fmov(d15, kWMinInt + 1);
7194  __ Fmov(s17, 1.1);
7195  __ Fmov(s18, 1.5);
7196  __ Fmov(s19, -1.5);
7197  __ Fmov(s20, kFP32PositiveInfinity);
7198  __ Fmov(s21, kFP32NegativeInfinity);
7199  __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
7200  __ Fneg(s23, s22); // Smallest float > INT64_MIN.
7201  __ Fmov(d24, 1.1);
7202  __ Fmov(d25, 1.5);
7203  __ Fmov(d26, -1.5);
7204  __ Fmov(d27, kFP64PositiveInfinity);
7205  __ Fmov(d28, kFP64NegativeInfinity);
7206  __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
7207  __ Fneg(d30, d29); // Smallest double > INT64_MIN.
7208 
7209  __ Fcvtzu(w0, s0);
7210  __ Fcvtzu(w1, s1);
7211  __ Fcvtzu(w2, s2);
7212  __ Fcvtzu(w3, s3);
7213  __ Fcvtzu(w4, s4);
7214  __ Fcvtzu(w5, s5);
7215  __ Fcvtzu(w6, s6);
7216  __ Fcvtzu(w7, s7);
7217  __ Fcvtzu(w8, d8);
7218  __ Fcvtzu(w9, d9);
7219  __ Fcvtzu(w10, d10);
7220  __ Fcvtzu(w11, d11);
7221  __ Fcvtzu(w12, d12);
7222  __ Fcvtzu(w13, d13);
7223  __ Fcvtzu(w14, d14);
7224  __ Fcvtzu(x17, s17);
7225  __ Fcvtzu(x18, s18);
7226  __ Fcvtzu(x19, s19);
7227  __ Fcvtzu(x20, s20);
7228  __ Fcvtzu(x21, s21);
7229  __ Fcvtzu(x22, s22);
7230  __ Fcvtzu(x23, s23);
7231  __ Fcvtzu(x24, d24);
7232  __ Fcvtzu(x25, d25);
7233  __ Fcvtzu(x26, d26);
7234  __ Fcvtzu(x27, d27);
7235  __ Fcvtzu(x28, d28);
7236  __ Fcvtzu(x29, d29);
7237  __ Fcvtzu(x30, d30);
7238  END();
7239 
7240  RUN();
7241 
7242  ASSERT_EQUAL_64(1, x0);
7243  ASSERT_EQUAL_64(1, x1);
7244  ASSERT_EQUAL_64(1, x2);
7245  ASSERT_EQUAL_64(0, x3);
7246  ASSERT_EQUAL_64(0xffffffff, x4);
7247  ASSERT_EQUAL_64(0, x5);
7248  ASSERT_EQUAL_64(0x7fffff80, x6);
7249  ASSERT_EQUAL_64(0, x7);
7250  ASSERT_EQUAL_64(1, x8);
7251  ASSERT_EQUAL_64(1, x9);
7252  ASSERT_EQUAL_64(1, x10);
7253  ASSERT_EQUAL_64(0, x11);
7254  ASSERT_EQUAL_64(0xffffffff, x12);
7255  ASSERT_EQUAL_64(0, x13);
7256  ASSERT_EQUAL_64(0x7ffffffe, x14);
7257  ASSERT_EQUAL_64(1, x17);
7258  ASSERT_EQUAL_64(1, x18);
7259  ASSERT_EQUAL_64(0x0UL, x19);
7260  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7261  ASSERT_EQUAL_64(0x0UL, x21);
7262  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7263  ASSERT_EQUAL_64(0x0UL, x23);
7264  ASSERT_EQUAL_64(1, x24);
7265  ASSERT_EQUAL_64(1, x25);
7266  ASSERT_EQUAL_64(0x0UL, x26);
7267  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7268  ASSERT_EQUAL_64(0x0UL, x28);
7269  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7270  ASSERT_EQUAL_64(0x0UL, x30);
7271 
7272  TEARDOWN();
7273 }
7274 
7275 
7276 // Test that scvtf and ucvtf can convert the 64-bit input into the expected
7277 // value. All possible values of 'fbits' are tested. The expected value is
7278 // modified accordingly in each case.
7279 //
7280 // The expected value is specified as the bit encoding of the expected double
7281 // produced by scvtf (expected_scvtf_bits) as well as ucvtf
7282 // (expected_ucvtf_bits).
7283 //
7284 // Where the input value is representable by int32_t or uint32_t, conversions
7285 // from W registers will also be tested.
7286 static void TestUScvtfHelper(uint64_t in,
7287  uint64_t expected_scvtf_bits,
7288  uint64_t expected_ucvtf_bits) {
7289  uint64_t u64 = in;
7290  uint32_t u32 = u64 & 0xffffffff;
7291  int64_t s64 = static_cast<int64_t>(in);
7292  int32_t s32 = s64 & 0x7fffffff;
7293 
7294  bool cvtf_s32 = (s64 == s32);
7295  bool cvtf_u32 = (u64 == u32);
7296 
7297  double results_scvtf_x[65];
7298  double results_ucvtf_x[65];
7299  double results_scvtf_w[33];
7300  double results_ucvtf_w[33];
7301 
7302  SETUP();
7303  START();
7304 
7305  __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7306  __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7307  __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7308  __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7309 
7310  __ Mov(x10, s64);
7311 
7312  // Corrupt the top word, in case it is accidentally used during W-register
7313  // conversions.
7314  __ Mov(x11, 0x5555555555555555);
7315  __ Bfi(x11, x10, 0, kWRegSizeInBits);
7316 
7317  // Test integer conversions.
7318  __ Scvtf(d0, x10);
7319  __ Ucvtf(d1, x10);
7320  __ Scvtf(d2, w11);
7321  __ Ucvtf(d3, w11);
7322  __ Str(d0, MemOperand(x0));
7323  __ Str(d1, MemOperand(x1));
7324  __ Str(d2, MemOperand(x2));
7325  __ Str(d3, MemOperand(x3));
7326 
7327  // Test all possible values of fbits.
7328  for (int fbits = 1; fbits <= 32; fbits++) {
7329  __ Scvtf(d0, x10, fbits);
7330  __ Ucvtf(d1, x10, fbits);
7331  __ Scvtf(d2, w11, fbits);
7332  __ Ucvtf(d3, w11, fbits);
7333  __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7334  __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7335  __ Str(d2, MemOperand(x2, fbits * kDRegSize));
7336  __ Str(d3, MemOperand(x3, fbits * kDRegSize));
7337  }
7338 
7339  // Conversions from W registers can only handle fbits values <= 32, so just
7340  // test conversions from X registers for 32 < fbits <= 64.
7341  for (int fbits = 33; fbits <= 64; fbits++) {
7342  __ Scvtf(d0, x10, fbits);
7343  __ Ucvtf(d1, x10, fbits);
7344  __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7345  __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7346  }
7347 
7348  END();
7349  RUN();
7350 
7351  // Check the results.
7352  double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7353  double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7354 
7355  for (int fbits = 0; fbits <= 32; fbits++) {
7356  double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7357  double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7358  ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7359  ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7360  if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7361  if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7362  }
7363  for (int fbits = 33; fbits <= 64; fbits++) {
7364  double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7365  double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7366  ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7367  ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7368  }
7369 
7370  TEARDOWN();
7371 }
7372 
7373 
7374 TEST(scvtf_ucvtf_double) {
7375  INIT_V8();
7376  // Simple conversions of positive numbers which require no rounding; the
7377  // results should not depened on the rounding mode, and ucvtf and scvtf should
7378  // produce the same result.
7379  TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7380  TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7381  TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7382  TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7383  TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7384  // Test mantissa extremities.
7385  TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7386  // The largest int32_t that fits in a double.
7387  TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7388  // Values that would be negative if treated as an int32_t.
7389  TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7390  TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7391  TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7392  // The largest int64_t that fits in a double.
7393  TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7394  // Check for bit pattern reproduction.
7395  TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7396  TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7397 
7398  // Simple conversions of negative int64_t values. These require no rounding,
7399  // and the results should not depend on the rounding mode.
7400  TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7401  TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7402  TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7403 
7404  // Conversions which require rounding.
7405  TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7406  TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7407  TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7408  TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7409  TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7410  TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7411  TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7412  TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7413  TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7414  TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7415  TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7416  TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7417  TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7418  // Check rounding of negative int64_t values (and large uint64_t values).
7419  TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7420  TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7421  TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7422  TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7423  TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7424  TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7425  TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7426  TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7427  TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7428  TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7429  TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7430  TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7431  TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7432  // Round up to produce a result that's too big for the input to represent.
7433  TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7434  TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7435  TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7436  TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7437 }
7438 
7439 
7440 // The same as TestUScvtfHelper, but convert to floats.
7441 static void TestUScvtf32Helper(uint64_t in,
7442  uint32_t expected_scvtf_bits,
7443  uint32_t expected_ucvtf_bits) {
7444  uint64_t u64 = in;
7445  uint32_t u32 = u64 & 0xffffffff;
7446  int64_t s64 = static_cast<int64_t>(in);
7447  int32_t s32 = s64 & 0x7fffffff;
7448 
7449  bool cvtf_s32 = (s64 == s32);
7450  bool cvtf_u32 = (u64 == u32);
7451 
7452  float results_scvtf_x[65];
7453  float results_ucvtf_x[65];
7454  float results_scvtf_w[33];
7455  float results_ucvtf_w[33];
7456 
7457  SETUP();
7458  START();
7459 
7460  __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7461  __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7462  __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7463  __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7464 
7465  __ Mov(x10, s64);
7466 
7467  // Corrupt the top word, in case it is accidentally used during W-register
7468  // conversions.
7469  __ Mov(x11, 0x5555555555555555);
7470  __ Bfi(x11, x10, 0, kWRegSizeInBits);
7471 
7472  // Test integer conversions.
7473  __ Scvtf(s0, x10);
7474  __ Ucvtf(s1, x10);
7475  __ Scvtf(s2, w11);
7476  __ Ucvtf(s3, w11);
7477  __ Str(s0, MemOperand(x0));
7478  __ Str(s1, MemOperand(x1));
7479  __ Str(s2, MemOperand(x2));
7480  __ Str(s3, MemOperand(x3));
7481 
7482  // Test all possible values of fbits.
7483  for (int fbits = 1; fbits <= 32; fbits++) {
7484  __ Scvtf(s0, x10, fbits);
7485  __ Ucvtf(s1, x10, fbits);
7486  __ Scvtf(s2, w11, fbits);
7487  __ Ucvtf(s3, w11, fbits);
7488  __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7489  __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7490  __ Str(s2, MemOperand(x2, fbits * kSRegSize));
7491  __ Str(s3, MemOperand(x3, fbits * kSRegSize));
7492  }
7493 
7494  // Conversions from W registers can only handle fbits values <= 32, so just
7495  // test conversions from X registers for 32 < fbits <= 64.
7496  for (int fbits = 33; fbits <= 64; fbits++) {
7497  __ Scvtf(s0, x10, fbits);
7498  __ Ucvtf(s1, x10, fbits);
7499  __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7500  __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7501  }
7502 
7503  END();
7504  RUN();
7505 
7506  // Check the results.
7507  float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7508  float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7509 
7510  for (int fbits = 0; fbits <= 32; fbits++) {
7511  float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7512  float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7513  ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7514  ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7515  if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7516  if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7517  break;
7518  }
7519  for (int fbits = 33; fbits <= 64; fbits++) {
7520  break;
7521  float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7522  float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7523  ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7524  ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7525  }
7526 
7527  TEARDOWN();
7528 }
7529 
7530 
7531 TEST(scvtf_ucvtf_float) {
7532  INIT_V8();
7533  // Simple conversions of positive numbers which require no rounding; the
7534  // results should not depened on the rounding mode, and ucvtf and scvtf should
7535  // produce the same result.
7536  TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7537  TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7538  TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7539  TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7540  TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7541  // Test mantissa extremities.
7542  TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7543  TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7544  // The largest int32_t that fits in a float.
7545  TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7546  // Values that would be negative if treated as an int32_t.
7547  TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7548  TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7549  TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7550  // The largest int64_t that fits in a float.
7551  TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7552  // Check for bit pattern reproduction.
7553  TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7554 
7555  // Simple conversions of negative int64_t values. These require no rounding,
7556  // and the results should not depend on the rounding mode.
7557  TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7558  TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7559 
7560  // Conversions which require rounding.
7561  TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7562  TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7563  TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7564  TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7565  TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7566  TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7567  TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7568  TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7569  TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7570  TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7571  TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7572  TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7573  TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7574  // Check rounding of negative int64_t values (and large uint64_t values).
7575  TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7576  TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7577  TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7578  TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7579  TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7580  TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7581  TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7582  TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7583  TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7584  TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7585  TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7586  TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7587  TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7588  // Round up to produce a result that's too big for the input to represent.
7589  TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7590  TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7591  TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7592  TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7593  TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7594  TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7595  TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7596  TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7597 }
7598 
7599 
7600 TEST(system_mrs) {
7601  INIT_V8();
7602  SETUP();
7603 
7604  START();
7605  __ Mov(w0, 0);
7606  __ Mov(w1, 1);
7607  __ Mov(w2, 0x80000000);
7608 
7609  // Set the Z and C flags.
7610  __ Cmp(w0, w0);
7611  __ Mrs(x3, NZCV);
7612 
7613  // Set the N flag.
7614  __ Cmp(w0, w1);
7615  __ Mrs(x4, NZCV);
7616 
7617  // Set the Z, C and V flags.
7618  __ Adds(w0, w2, w2);
7619  __ Mrs(x5, NZCV);
7620 
7621  // Read the default FPCR.
7622  __ Mrs(x6, FPCR);
7623  END();
7624 
7625  RUN();
7626 
7627  // NZCV
7628  ASSERT_EQUAL_32(ZCFlag, w3);
7629  ASSERT_EQUAL_32(NFlag, w4);
7630  ASSERT_EQUAL_32(ZCVFlag, w5);
7631 
7632  // FPCR
7633  // The default FPCR on Linux-based platforms is 0.
7634  ASSERT_EQUAL_32(0, w6);
7635 
7636  TEARDOWN();
7637 }
7638 
7639 
7640 TEST(system_msr) {
7641  INIT_V8();
7642  // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
7643  const uint64_t fpcr_core = 0x07c00000;
7644 
7645  // All FPCR fields (including fields which may be read-as-zero):
7646  // Stride, Len
7647  // IDE, IXE, UFE, OFE, DZE, IOE
7648  const uint64_t fpcr_all = fpcr_core | 0x00379f00;
7649 
7650  SETUP();
7651 
7652  START();
7653  __ Mov(w0, 0);
7654  __ Mov(w1, 0x7fffffff);
7655 
7656  __ Mov(x7, 0);
7657 
7658  __ Mov(x10, NVFlag);
7659  __ Cmp(w0, w0); // Set Z and C.
7660  __ Msr(NZCV, x10); // Set N and V.
7661  // The Msr should have overwritten every flag set by the Cmp.
7662  __ Cinc(x7, x7, mi); // N
7663  __ Cinc(x7, x7, ne); // !Z
7664  __ Cinc(x7, x7, lo); // !C
7665  __ Cinc(x7, x7, vs); // V
7666 
7667  __ Mov(x10, ZCFlag);
7668  __ Cmn(w1, w1); // Set N and V.
7669  __ Msr(NZCV, x10); // Set Z and C.
7670  // The Msr should have overwritten every flag set by the Cmn.
7671  __ Cinc(x7, x7, pl); // !N
7672  __ Cinc(x7, x7, eq); // Z
7673  __ Cinc(x7, x7, hs); // C
7674  __ Cinc(x7, x7, vc); // !V
7675 
7676  // All core FPCR fields must be writable.
7677  __ Mov(x8, fpcr_core);
7678  __ Msr(FPCR, x8);
7679  __ Mrs(x8, FPCR);
7680 
7681  // All FPCR fields, including optional ones. This part of the test doesn't
7682  // achieve much other than ensuring that supported fields can be cleared by
7683  // the next test.
7684  __ Mov(x9, fpcr_all);
7685  __ Msr(FPCR, x9);
7686  __ Mrs(x9, FPCR);
7687  __ And(x9, x9, fpcr_core);
7688 
7689  // The undefined bits must ignore writes.
7690  // It's conceivable that a future version of the architecture could use these
7691  // fields (making this test fail), but in the meantime this is a useful test
7692  // for the simulator.
7693  __ Mov(x10, ~fpcr_all);
7694  __ Msr(FPCR, x10);
7695  __ Mrs(x10, FPCR);
7696 
7697  END();
7698 
7699  RUN();
7700 
7701  // We should have incremented x7 (from 0) exactly 8 times.
7702  ASSERT_EQUAL_64(8, x7);
7703 
7704  ASSERT_EQUAL_64(fpcr_core, x8);
7705  ASSERT_EQUAL_64(fpcr_core, x9);
7706  ASSERT_EQUAL_64(0, x10);
7707 
7708  TEARDOWN();
7709 }
7710 
7711 
7712 TEST(system_nop) {
7713  INIT_V8();
7714  SETUP();
7715  RegisterDump before;
7716 
7717  START();
7718  before.Dump(&masm);
7719  __ Nop();
7720  END();
7721 
7722  RUN();
7723 
7724  ASSERT_EQUAL_REGISTERS(before);
7725  ASSERT_EQUAL_NZCV(before.flags_nzcv());
7726 
7727  TEARDOWN();
7728 }
7729 
7730 
7731 TEST(zero_dest) {
7732  INIT_V8();
7733  SETUP();
7734  RegisterDump before;
7735 
7736  START();
7737  // Preserve the system stack pointer, in case we clobber it.
7738  __ Mov(x30, csp);
7739  // Initialize the other registers used in this test.
7740  uint64_t literal_base = 0x0100001000100101UL;
7741  __ Mov(x0, 0);
7742  __ Mov(x1, literal_base);
7743  for (unsigned i = 2; i < x30.code(); i++) {
7745  }
7746  before.Dump(&masm);
7747 
7748  // All of these instructions should be NOPs in these forms, but have
7749  // alternate forms which can write into the stack pointer.
7750  __ add(xzr, x0, x1);
7751  __ add(xzr, x1, xzr);
7752  __ add(xzr, xzr, x1);
7753 
7754  __ and_(xzr, x0, x2);
7755  __ and_(xzr, x2, xzr);
7756  __ and_(xzr, xzr, x2);
7757 
7758  __ bic(xzr, x0, x3);
7759  __ bic(xzr, x3, xzr);
7760  __ bic(xzr, xzr, x3);
7761 
7762  __ eon(xzr, x0, x4);
7763  __ eon(xzr, x4, xzr);
7764  __ eon(xzr, xzr, x4);
7765 
7766  __ eor(xzr, x0, x5);
7767  __ eor(xzr, x5, xzr);
7768  __ eor(xzr, xzr, x5);
7769 
7770  __ orr(xzr, x0, x6);
7771  __ orr(xzr, x6, xzr);
7772  __ orr(xzr, xzr, x6);
7773 
7774  __ sub(xzr, x0, x7);
7775  __ sub(xzr, x7, xzr);
7776  __ sub(xzr, xzr, x7);
7777 
7778  // Swap the saved system stack pointer with the real one. If csp was written
7779  // during the test, it will show up in x30. This is done because the test
7780  // framework assumes that csp will be valid at the end of the test.
7781  __ Mov(x29, x30);
7782  __ Mov(x30, csp);
7783  __ Mov(csp, x29);
7784  // We used x29 as a scratch register, so reset it to make sure it doesn't
7785  // trigger a test failure.
7786  __ Add(x29, x28, x1);
7787  END();
7788 
7789  RUN();
7790 
7791  ASSERT_EQUAL_REGISTERS(before);
7792  ASSERT_EQUAL_NZCV(before.flags_nzcv());
7793 
7794  TEARDOWN();
7795 }
7796 
7797 
7798 TEST(zero_dest_setflags) {
7799  INIT_V8();
7800  SETUP();
7801  RegisterDump before;
7802 
7803  START();
7804  // Preserve the system stack pointer, in case we clobber it.
7805  __ Mov(x30, csp);
7806  // Initialize the other registers used in this test.
7807  uint64_t literal_base = 0x0100001000100101UL;
7808  __ Mov(x0, 0);
7809  __ Mov(x1, literal_base);
7810  for (int i = 2; i < 30; i++) {
7812  }
7813  before.Dump(&masm);
7814 
7815  // All of these instructions should only write to the flags in these forms,
7816  // but have alternate forms which can write into the stack pointer.
7817  __ adds(xzr, x0, Operand(x1, UXTX));
7818  __ adds(xzr, x1, Operand(xzr, UXTX));
7819  __ adds(xzr, x1, 1234);
7820  __ adds(xzr, x0, x1);
7821  __ adds(xzr, x1, xzr);
7822  __ adds(xzr, xzr, x1);
7823 
7824  __ ands(xzr, x2, ~0xf);
7825  __ ands(xzr, xzr, ~0xf);
7826  __ ands(xzr, x0, x2);
7827  __ ands(xzr, x2, xzr);
7828  __ ands(xzr, xzr, x2);
7829 
7830  __ bics(xzr, x3, ~0xf);
7831  __ bics(xzr, xzr, ~0xf);
7832  __ bics(xzr, x0, x3);
7833  __ bics(xzr, x3, xzr);
7834  __ bics(xzr, xzr, x3);
7835 
7836  __ subs(xzr, x0, Operand(x3, UXTX));
7837  __ subs(xzr, x3, Operand(xzr, UXTX));
7838  __ subs(xzr, x3, 1234);
7839  __ subs(xzr, x0, x3);
7840  __ subs(xzr, x3, xzr);
7841  __ subs(xzr, xzr, x3);
7842 
7843  // Swap the saved system stack pointer with the real one. If csp was written
7844  // during the test, it will show up in x30. This is done because the test
7845  // framework assumes that csp will be valid at the end of the test.
7846  __ Mov(x29, x30);
7847  __ Mov(x30, csp);
7848  __ Mov(csp, x29);
7849  // We used x29 as a scratch register, so reset it to make sure it doesn't
7850  // trigger a test failure.
7851  __ Add(x29, x28, x1);
7852  END();
7853 
7854  RUN();
7855 
7856  ASSERT_EQUAL_REGISTERS(before);
7857 
7858  TEARDOWN();
7859 }
7860 
7861 
7862 TEST(register_bit) {
7863  // No code generation takes place in this test, so no need to setup and
7864  // teardown.
7865 
7866  // Simple tests.
7867  CHECK(x0.Bit() == (1UL << 0));
7868  CHECK(x1.Bit() == (1UL << 1));
7869  CHECK(x10.Bit() == (1UL << 10));
7870 
7871  // AAPCS64 definitions.
7872  CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
7873  CHECK(lr.Bit() == (1UL << kLinkRegCode));
7874 
7875  // Fixed (hardware) definitions.
7876  CHECK(xzr.Bit() == (1UL << kZeroRegCode));
7877 
7878  // Internal ABI definitions.
7879  CHECK(jssp.Bit() == (1UL << kJSSPCode));
7880  CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
7881  CHECK(csp.Bit() != xzr.Bit());
7882 
7883  // xn.Bit() == wn.Bit() at all times, for the same n.
7884  CHECK(x0.Bit() == w0.Bit());
7885  CHECK(x1.Bit() == w1.Bit());
7886  CHECK(x10.Bit() == w10.Bit());
7887  CHECK(jssp.Bit() == wjssp.Bit());
7888  CHECK(xzr.Bit() == wzr.Bit());
7889  CHECK(csp.Bit() == wcsp.Bit());
7890 }
7891 
7892 
7893 TEST(stack_pointer_override) {
7894  // This test generates some stack maintenance code, but the test only checks
7895  // the reported state.
7896  INIT_V8();
7897  SETUP();
7898  START();
7899 
7900  // The default stack pointer in V8 is jssp, but for compatibility with W16,
7901  // the test framework sets it to csp before calling the test.
7902  CHECK(csp.Is(__ StackPointer()));
7903  __ SetStackPointer(x0);
7904  CHECK(x0.Is(__ StackPointer()));
7905  __ SetStackPointer(jssp);
7906  CHECK(jssp.Is(__ StackPointer()));
7907  __ SetStackPointer(csp);
7908  CHECK(csp.Is(__ StackPointer()));
7909 
7910  END();
7911  RUN();
7912  TEARDOWN();
7913 }
7914 
7915 
7916 TEST(peek_poke_simple) {
7917  INIT_V8();
7918  SETUP();
7919  START();
7920 
7921  static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
7922  static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
7923  x12.Bit() | x13.Bit();
7924 
7925  // The literal base is chosen to have two useful properties:
7926  // * When multiplied by small values (such as a register index), this value
7927  // is clearly readable in the result.
7928  // * The value is not formed from repeating fixed-size smaller values, so it
7929  // can be used to detect endianness-related errors.
7930  uint64_t literal_base = 0x0100001000100101UL;
7931 
7932  // Initialize the registers.
7933  __ Mov(x0, literal_base);
7934  __ Add(x1, x0, x0);
7935  __ Add(x2, x1, x0);
7936  __ Add(x3, x2, x0);
7937 
7938  __ Claim(4);
7939 
7940  // Simple exchange.
7941  // After this test:
7942  // x0-x3 should be unchanged.
7943  // w10-w13 should contain the lower words of x0-x3.
7944  __ Poke(x0, 0);
7945  __ Poke(x1, 8);
7946  __ Poke(x2, 16);
7947  __ Poke(x3, 24);
7948  Clobber(&masm, x0_to_x3);
7949  __ Peek(x0, 0);
7950  __ Peek(x1, 8);
7951  __ Peek(x2, 16);
7952  __ Peek(x3, 24);
7953 
7954  __ Poke(w0, 0);
7955  __ Poke(w1, 4);
7956  __ Poke(w2, 8);
7957  __ Poke(w3, 12);
7958  Clobber(&masm, x10_to_x13);
7959  __ Peek(w10, 0);
7960  __ Peek(w11, 4);
7961  __ Peek(w12, 8);
7962  __ Peek(w13, 12);
7963 
7964  __ Drop(4);
7965 
7966  END();
7967  RUN();
7968 
7969  ASSERT_EQUAL_64(literal_base * 1, x0);
7970  ASSERT_EQUAL_64(literal_base * 2, x1);
7971  ASSERT_EQUAL_64(literal_base * 3, x2);
7972  ASSERT_EQUAL_64(literal_base * 4, x3);
7973 
7974  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7975  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7976  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7977  ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
7978 
7979  TEARDOWN();
7980 }
7981 
7982 
7983 TEST(peek_poke_unaligned) {
7984  INIT_V8();
7985  SETUP();
7986  START();
7987 
7988  // The literal base is chosen to have two useful properties:
7989  // * When multiplied by small values (such as a register index), this value
7990  // is clearly readable in the result.
7991  // * The value is not formed from repeating fixed-size smaller values, so it
7992  // can be used to detect endianness-related errors.
7993  uint64_t literal_base = 0x0100001000100101UL;
7994 
7995  // Initialize the registers.
7996  __ Mov(x0, literal_base);
7997  __ Add(x1, x0, x0);
7998  __ Add(x2, x1, x0);
7999  __ Add(x3, x2, x0);
8000  __ Add(x4, x3, x0);
8001  __ Add(x5, x4, x0);
8002  __ Add(x6, x5, x0);
8003 
8004  __ Claim(4);
8005 
8006  // Unaligned exchanges.
8007  // After this test:
8008  // x0-x6 should be unchanged.
8009  // w10-w12 should contain the lower words of x0-x2.
8010  __ Poke(x0, 1);
8011  Clobber(&masm, x0.Bit());
8012  __ Peek(x0, 1);
8013  __ Poke(x1, 2);
8014  Clobber(&masm, x1.Bit());
8015  __ Peek(x1, 2);
8016  __ Poke(x2, 3);
8017  Clobber(&masm, x2.Bit());
8018  __ Peek(x2, 3);
8019  __ Poke(x3, 4);
8020  Clobber(&masm, x3.Bit());
8021  __ Peek(x3, 4);
8022  __ Poke(x4, 5);
8023  Clobber(&masm, x4.Bit());
8024  __ Peek(x4, 5);
8025  __ Poke(x5, 6);
8026  Clobber(&masm, x5.Bit());
8027  __ Peek(x5, 6);
8028  __ Poke(x6, 7);
8029  Clobber(&masm, x6.Bit());
8030  __ Peek(x6, 7);
8031 
8032  __ Poke(w0, 1);
8033  Clobber(&masm, w10.Bit());
8034  __ Peek(w10, 1);
8035  __ Poke(w1, 2);
8036  Clobber(&masm, w11.Bit());
8037  __ Peek(w11, 2);
8038  __ Poke(w2, 3);
8039  Clobber(&masm, w12.Bit());
8040  __ Peek(w12, 3);
8041 
8042  __ Drop(4);
8043 
8044  END();
8045  RUN();
8046 
8047  ASSERT_EQUAL_64(literal_base * 1, x0);
8048  ASSERT_EQUAL_64(literal_base * 2, x1);
8049  ASSERT_EQUAL_64(literal_base * 3, x2);
8050  ASSERT_EQUAL_64(literal_base * 4, x3);
8051  ASSERT_EQUAL_64(literal_base * 5, x4);
8052  ASSERT_EQUAL_64(literal_base * 6, x5);
8053  ASSERT_EQUAL_64(literal_base * 7, x6);
8054 
8055  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8056  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8057  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8058 
8059  TEARDOWN();
8060 }
8061 
8062 
8063 TEST(peek_poke_endianness) {
8064  INIT_V8();
8065  SETUP();
8066  START();
8067 
8068  // The literal base is chosen to have two useful properties:
8069  // * When multiplied by small values (such as a register index), this value
8070  // is clearly readable in the result.
8071  // * The value is not formed from repeating fixed-size smaller values, so it
8072  // can be used to detect endianness-related errors.
8073  uint64_t literal_base = 0x0100001000100101UL;
8074 
8075  // Initialize the registers.
8076  __ Mov(x0, literal_base);
8077  __ Add(x1, x0, x0);
8078 
8079  __ Claim(4);
8080 
8081  // Endianness tests.
8082  // After this section:
8083  // x4 should match x0[31:0]:x0[63:32]
8084  // w5 should match w1[15:0]:w1[31:16]
8085  __ Poke(x0, 0);
8086  __ Poke(x0, 8);
8087  __ Peek(x4, 4);
8088 
8089  __ Poke(w1, 0);
8090  __ Poke(w1, 4);
8091  __ Peek(w5, 2);
8092 
8093  __ Drop(4);
8094 
8095  END();
8096  RUN();
8097 
8098  uint64_t x0_expected = literal_base * 1;
8099  uint64_t x1_expected = literal_base * 2;
8100  uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
8101  uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
8102  ((x1_expected >> 16) & 0x0000ffff);
8103 
8104  ASSERT_EQUAL_64(x0_expected, x0);
8105  ASSERT_EQUAL_64(x1_expected, x1);
8106  ASSERT_EQUAL_64(x4_expected, x4);
8107  ASSERT_EQUAL_64(x5_expected, x5);
8108 
8109  TEARDOWN();
8110 }
8111 
8112 
8113 TEST(peek_poke_mixed) {
8114  INIT_V8();
8115  SETUP();
8116  START();
8117 
8118  // The literal base is chosen to have two useful properties:
8119  // * When multiplied by small values (such as a register index), this value
8120  // is clearly readable in the result.
8121  // * The value is not formed from repeating fixed-size smaller values, so it
8122  // can be used to detect endianness-related errors.
8123  uint64_t literal_base = 0x0100001000100101UL;
8124 
8125  // Initialize the registers.
8126  __ Mov(x0, literal_base);
8127  __ Add(x1, x0, x0);
8128  __ Add(x2, x1, x0);
8129  __ Add(x3, x2, x0);
8130 
8131  __ Claim(4);
8132 
8133  // Mix with other stack operations.
8134  // After this section:
8135  // x0-x3 should be unchanged.
8136  // x6 should match x1[31:0]:x0[63:32]
8137  // w7 should match x1[15:0]:x0[63:48]
8138  __ Poke(x1, 8);
8139  __ Poke(x0, 0);
8140  {
8141  ASSERT(__ StackPointer().Is(csp));
8142  __ Mov(x4, __ StackPointer());
8143  __ SetStackPointer(x4);
8144 
8145  __ Poke(wzr, 0); // Clobber the space we're about to drop.
8146  __ Drop(1, kWRegSize);
8147  __ Peek(x6, 0);
8148  __ Claim(1);
8149  __ Peek(w7, 10);
8150  __ Poke(x3, 28);
8151  __ Poke(xzr, 0); // Clobber the space we're about to drop.
8152  __ Drop(1);
8153  __ Poke(x2, 12);
8154  __ Push(w0);
8155 
8156  __ Mov(csp, __ StackPointer());
8157  __ SetStackPointer(csp);
8158  }
8159 
8160  __ Pop(x0, x1, x2, x3);
8161 
8162  END();
8163  RUN();
8164 
8165  uint64_t x0_expected = literal_base * 1;
8166  uint64_t x1_expected = literal_base * 2;
8167  uint64_t x2_expected = literal_base * 3;
8168  uint64_t x3_expected = literal_base * 4;
8169  uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8170  uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8171  ((x0_expected >> 48) & 0x0000ffff);
8172 
8173  ASSERT_EQUAL_64(x0_expected, x0);
8174  ASSERT_EQUAL_64(x1_expected, x1);
8175  ASSERT_EQUAL_64(x2_expected, x2);
8176  ASSERT_EQUAL_64(x3_expected, x3);
8177  ASSERT_EQUAL_64(x6_expected, x6);
8178  ASSERT_EQUAL_64(x7_expected, x7);
8179 
8180  TEARDOWN();
8181 }
8182 
8183 
8184 // This enum is used only as an argument to the push-pop test helpers.
8186  // Push or Pop using the Push and Pop methods, with blocks of up to four
8187  // registers. (Smaller blocks will be used if necessary.)
8189 
8190  // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8192 };
8193 
8194 
8195 // The maximum number of registers that can be used by the PushPopJssp* tests,
8196 // where a reg_count field is provided.
8197 static int const kPushPopJsspMaxRegCount = -1;
8198 
8199 // Test a simple push-pop pattern:
8200 // * Claim <claim> bytes to set the stack alignment.
8201 // * Push <reg_count> registers with size <reg_size>.
8202 // * Clobber the register contents.
8203 // * Pop <reg_count> registers to restore the original contents.
8204 // * Drop <claim> bytes to restore the original stack pointer.
8205 //
8206 // Different push and pop methods can be specified independently to test for
8207 // proper word-endian behaviour.
8208 static void PushPopJsspSimpleHelper(int reg_count,
8209  int claim,
8210  int reg_size,
8211  PushPopMethod push_method,
8212  PushPopMethod pop_method) {
8213  SETUP();
8214 
8215  START();
8216 
8217  // Registers x8 and x9 are used by the macro assembler for debug code (for
8218  // example in 'Pop'), so we can't use them here. We can't use jssp because it
8219  // will be the stack pointer for this test.
8220  static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
8221  if (reg_count == kPushPopJsspMaxRegCount) {
8222  reg_count = CountSetBits(allowed, kNumberOfRegisters);
8223  }
8224  // Work out which registers to use, based on reg_size.
8227  RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8228  allowed);
8229 
8230  // The literal base is chosen to have two useful properties:
8231  // * When multiplied by small values (such as a register index), this value
8232  // is clearly readable in the result.
8233  // * The value is not formed from repeating fixed-size smaller values, so it
8234  // can be used to detect endianness-related errors.
8235  uint64_t literal_base = 0x0100001000100101UL;
8236 
8237  {
8238  ASSERT(__ StackPointer().Is(csp));
8239  __ Mov(jssp, __ StackPointer());
8240  __ SetStackPointer(jssp);
8241 
8242  int i;
8243 
8244  // Initialize the registers.
8245  for (i = 0; i < reg_count; i++) {
8246  // Always write into the X register, to ensure that the upper word is
8247  // properly ignored by Push when testing W registers.
8248  if (!x[i].IsZero()) {
8249  __ Mov(x[i], literal_base * i);
8250  }
8251  }
8252 
8253  // Claim memory first, as requested.
8254  __ Claim(claim, kByteSizeInBytes);
8255 
8256  switch (push_method) {
8257  case PushPopByFour:
8258  // Push high-numbered registers first (to the highest addresses).
8259  for (i = reg_count; i >= 4; i -= 4) {
8260  __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8261  }
8262  // Finish off the leftovers.
8263  switch (i) {
8264  case 3: __ Push(r[2], r[1], r[0]); break;
8265  case 2: __ Push(r[1], r[0]); break;
8266  case 1: __ Push(r[0]); break;
8267  default: ASSERT(i == 0); break;
8268  }
8269  break;
8270  case PushPopRegList:
8271  __ PushSizeRegList(list, reg_size);
8272  break;
8273  }
8274 
8275  // Clobber all the registers, to ensure that they get repopulated by Pop.
8276  Clobber(&masm, list);
8277 
8278  switch (pop_method) {
8279  case PushPopByFour:
8280  // Pop low-numbered registers first (from the lowest addresses).
8281  for (i = 0; i <= (reg_count-4); i += 4) {
8282  __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8283  }
8284  // Finish off the leftovers.
8285  switch (reg_count - i) {
8286  case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
8287  case 2: __ Pop(r[i], r[i+1]); break;
8288  case 1: __ Pop(r[i]); break;
8289  default: ASSERT(i == reg_count); break;
8290  }
8291  break;
8292  case PushPopRegList:
8293  __ PopSizeRegList(list, reg_size);
8294  break;
8295  }
8296 
8297  // Drop memory to restore jssp.
8298  __ Drop(claim, kByteSizeInBytes);
8299 
8300  __ Mov(csp, __ StackPointer());
8301  __ SetStackPointer(csp);
8302  }
8303 
8304  END();
8305 
8306  RUN();
8307 
8308  // Check that the register contents were preserved.
8309  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8310  // that the upper word was properly cleared by Pop.
8311  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8312  for (int i = 0; i < reg_count; i++) {
8313  if (x[i].IsZero()) {
8314  ASSERT_EQUAL_64(0, x[i]);
8315  } else {
8316  ASSERT_EQUAL_64(literal_base * i, x[i]);
8317  }
8318  }
8319 
8320  TEARDOWN();
8321 }
8322 
8323 
8324 TEST(push_pop_jssp_simple_32) {
8325  INIT_V8();
8326  for (int claim = 0; claim <= 8; claim++) {
8327  for (int count = 0; count <= 8; count++) {
8328  PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8330  PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8332  PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8334  PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8336  }
8337  // Test with the maximum number of registers.
8338  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8340  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8342  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8344  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8346  }
8347 }
8348 
8349 
8350 TEST(push_pop_jssp_simple_64) {
8351  INIT_V8();
8352  for (int claim = 0; claim <= 8; claim++) {
8353  for (int count = 0; count <= 8; count++) {
8354  PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8356  PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8358  PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8360  PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8362  }
8363  // Test with the maximum number of registers.
8364  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8366  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8368  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8370  PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8372  }
8373 }
8374 
8375 
8376 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8377 // where a reg_count field is provided.
8378 static int const kPushPopFPJsspMaxRegCount = -1;
8379 
8380 // Test a simple push-pop pattern:
8381 // * Claim <claim> bytes to set the stack alignment.
8382 // * Push <reg_count> FP registers with size <reg_size>.
8383 // * Clobber the register contents.
8384 // * Pop <reg_count> FP registers to restore the original contents.
8385 // * Drop <claim> bytes to restore the original stack pointer.
8386 //
8387 // Different push and pop methods can be specified independently to test for
8388 // proper word-endian behaviour.
8389 static void PushPopFPJsspSimpleHelper(int reg_count,
8390  int claim,
8391  int reg_size,
8392  PushPopMethod push_method,
8393  PushPopMethod pop_method) {
8394  SETUP();
8395 
8396  START();
8397 
8398  // We can use any floating-point register. None of them are reserved for
8399  // debug code, for example.
8400  static RegList const allowed = ~0;
8401  if (reg_count == kPushPopFPJsspMaxRegCount) {
8402  reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8403  }
8404  // Work out which registers to use, based on reg_size.
8407  RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8408  allowed);
8409 
8410  // The literal base is chosen to have two useful properties:
8411  // * When multiplied (using an integer) by small values (such as a register
8412  // index), this value is clearly readable in the result.
8413  // * The value is not formed from repeating fixed-size smaller values, so it
8414  // can be used to detect endianness-related errors.
8415  // * It is never a floating-point NaN, and will therefore always compare
8416  // equal to itself.
8417  uint64_t literal_base = 0x0100001000100101UL;
8418 
8419  {
8420  ASSERT(__ StackPointer().Is(csp));
8421  __ Mov(jssp, __ StackPointer());
8422  __ SetStackPointer(jssp);
8423 
8424  int i;
8425 
8426  // Initialize the registers, using X registers to load the literal.
8427  __ Mov(x0, 0);
8428  __ Mov(x1, literal_base);
8429  for (i = 0; i < reg_count; i++) {
8430  // Always write into the D register, to ensure that the upper word is
8431  // properly ignored by Push when testing S registers.
8432  __ Fmov(d[i], x0);
8433  // Calculate the next literal.
8434  __ Add(x0, x0, x1);
8435  }
8436 
8437  // Claim memory first, as requested.
8438  __ Claim(claim, kByteSizeInBytes);
8439 
8440  switch (push_method) {
8441  case PushPopByFour:
8442  // Push high-numbered registers first (to the highest addresses).
8443  for (i = reg_count; i >= 4; i -= 4) {
8444  __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8445  }
8446  // Finish off the leftovers.
8447  switch (i) {
8448  case 3: __ Push(v[2], v[1], v[0]); break;
8449  case 2: __ Push(v[1], v[0]); break;
8450  case 1: __ Push(v[0]); break;
8451  default: ASSERT(i == 0); break;
8452  }
8453  break;
8454  case PushPopRegList:
8455  __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8456  break;
8457  }
8458 
8459  // Clobber all the registers, to ensure that they get repopulated by Pop.
8460  ClobberFP(&masm, list);
8461 
8462  switch (pop_method) {
8463  case PushPopByFour:
8464  // Pop low-numbered registers first (from the lowest addresses).
8465  for (i = 0; i <= (reg_count-4); i += 4) {
8466  __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8467  }
8468  // Finish off the leftovers.
8469  switch (reg_count - i) {
8470  case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
8471  case 2: __ Pop(v[i], v[i+1]); break;
8472  case 1: __ Pop(v[i]); break;
8473  default: ASSERT(i == reg_count); break;
8474  }
8475  break;
8476  case PushPopRegList:
8477  __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8478  break;
8479  }
8480 
8481  // Drop memory to restore jssp.
8482  __ Drop(claim, kByteSizeInBytes);
8483 
8484  __ Mov(csp, __ StackPointer());
8485  __ SetStackPointer(csp);
8486  }
8487 
8488  END();
8489 
8490  RUN();
8491 
8492  // Check that the register contents were preserved.
8493  // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8494  // test that the upper word was properly cleared by Pop.
8495  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8496  for (int i = 0; i < reg_count; i++) {
8497  uint64_t literal = literal_base * i;
8498  double expected;
8499  memcpy(&expected, &literal, sizeof(expected));
8500  ASSERT_EQUAL_FP64(expected, d[i]);
8501  }
8502 
8503  TEARDOWN();
8504 }
8505 
8506 
8507 TEST(push_pop_fp_jssp_simple_32) {
8508  INIT_V8();
8509  for (int claim = 0; claim <= 8; claim++) {
8510  for (int count = 0; count <= 8; count++) {
8511  PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8513  PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8515  PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8517  PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8519  }
8520  // Test with the maximum number of registers.
8521  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8523  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8525  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8527  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8529  }
8530 }
8531 
8532 
8533 TEST(push_pop_fp_jssp_simple_64) {
8534  INIT_V8();
8535  for (int claim = 0; claim <= 8; claim++) {
8536  for (int count = 0; count <= 8; count++) {
8537  PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8539  PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8541  PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8543  PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8545  }
8546  // Test with the maximum number of registers.
8547  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8549  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8551  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8553  PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8555  }
8556 }
8557 
8558 
8559 // Push and pop data using an overlapping combination of Push/Pop and
8560 // RegList-based methods.
8561 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8562  SETUP();
8563 
8564  // Registers x8 and x9 are used by the macro assembler for debug code (for
8565  // example in 'Pop'), so we can't use them here. We can't use jssp because it
8566  // will be the stack pointer for this test.
8567  static RegList const allowed =
8568  ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8569  // Work out which registers to use, based on reg_size.
8570  Register r[10];
8571  Register x[10];
8572  PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8573 
8574  // Calculate some handy register lists.
8575  RegList r0_to_r3 = 0;
8576  for (int i = 0; i <= 3; i++) {
8577  r0_to_r3 |= x[i].Bit();
8578  }
8579  RegList r4_to_r5 = 0;
8580  for (int i = 4; i <= 5; i++) {
8581  r4_to_r5 |= x[i].Bit();
8582  }
8583  RegList r6_to_r9 = 0;
8584  for (int i = 6; i <= 9; i++) {
8585  r6_to_r9 |= x[i].Bit();
8586  }
8587 
8588  // The literal base is chosen to have two useful properties:
8589  // * When multiplied by small values (such as a register index), this value
8590  // is clearly readable in the result.
8591  // * The value is not formed from repeating fixed-size smaller values, so it
8592  // can be used to detect endianness-related errors.
8593  uint64_t literal_base = 0x0100001000100101UL;
8594 
8595  START();
8596  {
8597  ASSERT(__ StackPointer().Is(csp));
8598  __ Mov(jssp, __ StackPointer());
8599  __ SetStackPointer(jssp);
8600 
8601  // Claim memory first, as requested.
8602  __ Claim(claim, kByteSizeInBytes);
8603 
8604  __ Mov(x[3], literal_base * 3);
8605  __ Mov(x[2], literal_base * 2);
8606  __ Mov(x[1], literal_base * 1);
8607  __ Mov(x[0], literal_base * 0);
8608 
8609  __ PushSizeRegList(r0_to_r3, reg_size);
8610  __ Push(r[3], r[2]);
8611 
8612  Clobber(&masm, r0_to_r3);
8613  __ PopSizeRegList(r0_to_r3, reg_size);
8614 
8615  __ Push(r[2], r[1], r[3], r[0]);
8616 
8617  Clobber(&masm, r4_to_r5);
8618  __ Pop(r[4], r[5]);
8619  Clobber(&masm, r6_to_r9);
8620  __ Pop(r[6], r[7], r[8], r[9]);
8621 
8622  // Drop memory to restore jssp.
8623  __ Drop(claim, kByteSizeInBytes);
8624 
8625  __ Mov(csp, __ StackPointer());
8626  __ SetStackPointer(csp);
8627  }
8628 
8629  END();
8630 
8631  RUN();
8632 
8633  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8634  // that the upper word was properly cleared by Pop.
8635  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8636 
8637  ASSERT_EQUAL_64(literal_base * 3, x[9]);
8638  ASSERT_EQUAL_64(literal_base * 2, x[8]);
8639  ASSERT_EQUAL_64(literal_base * 0, x[7]);
8640  ASSERT_EQUAL_64(literal_base * 3, x[6]);
8641  ASSERT_EQUAL_64(literal_base * 1, x[5]);
8642  ASSERT_EQUAL_64(literal_base * 2, x[4]);
8643 
8644  TEARDOWN();
8645 }
8646 
8647 
8648 TEST(push_pop_jssp_mixed_methods_64) {
8649  INIT_V8();
8650  for (int claim = 0; claim <= 8; claim++) {
8651  PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
8652  }
8653 }
8654 
8655 
8656 TEST(push_pop_jssp_mixed_methods_32) {
8657  INIT_V8();
8658  for (int claim = 0; claim <= 8; claim++) {
8659  PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
8660  }
8661 }
8662 
8663 
8664 // Push and pop data using overlapping X- and W-sized quantities.
8665 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
8666  // This test emits rather a lot of code.
8667  SETUP_SIZE(BUF_SIZE * 2);
8668 
8669  // Work out which registers to use, based on reg_size.
8670  Register tmp = x8;
8671  static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
8672  if (reg_count == kPushPopJsspMaxRegCount) {
8673  reg_count = CountSetBits(allowed, kNumberOfRegisters);
8674  }
8677  RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8678 
8679  // The number of W-sized slots we expect to pop. When we pop, we alternate
8680  // between W and X registers, so we need reg_count*1.5 W-sized slots.
8681  int const requested_w_slots = reg_count + reg_count / 2;
8682 
8683  // Track what _should_ be on the stack, using W-sized slots.
8684  static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8685  uint32_t stack[kMaxWSlots];
8686  for (int i = 0; i < kMaxWSlots; i++) {
8687  stack[i] = 0xdeadbeef;
8688  }
8689 
8690  // The literal base is chosen to have two useful properties:
8691  // * When multiplied by small values (such as a register index), this value
8692  // is clearly readable in the result.
8693  // * The value is not formed from repeating fixed-size smaller values, so it
8694  // can be used to detect endianness-related errors.
8695  static uint64_t const literal_base = 0x0100001000100101UL;
8696  static uint64_t const literal_base_hi = literal_base >> 32;
8697  static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8698  static uint64_t const literal_base_w = literal_base & 0xffffffff;
8699 
8700  START();
8701  {
8702  ASSERT(__ StackPointer().Is(csp));
8703  __ Mov(jssp, __ StackPointer());
8704  __ SetStackPointer(jssp);
8705 
8706  // Initialize the registers.
8707  for (int i = 0; i < reg_count; i++) {
8708  // Always write into the X register, to ensure that the upper word is
8709  // properly ignored by Push when testing W registers.
8710  if (!x[i].IsZero()) {
8711  __ Mov(x[i], literal_base * i);
8712  }
8713  }
8714 
8715  // Claim memory first, as requested.
8716  __ Claim(claim, kByteSizeInBytes);
8717 
8718  // The push-pop pattern is as follows:
8719  // Push: Pop:
8720  // x[0](hi) -> w[0]
8721  // x[0](lo) -> x[1](hi)
8722  // w[1] -> x[1](lo)
8723  // w[1] -> w[2]
8724  // x[2](hi) -> x[2](hi)
8725  // x[2](lo) -> x[2](lo)
8726  // x[2](hi) -> w[3]
8727  // x[2](lo) -> x[4](hi)
8728  // x[2](hi) -> x[4](lo)
8729  // x[2](lo) -> w[5]
8730  // w[3] -> x[5](hi)
8731  // w[3] -> x[6](lo)
8732  // w[3] -> w[7]
8733  // w[3] -> x[8](hi)
8734  // x[4](hi) -> x[8](lo)
8735  // x[4](lo) -> w[9]
8736  // ... pattern continues ...
8737  //
8738  // That is, registers are pushed starting with the lower numbers,
8739  // alternating between x and w registers, and pushing i%4+1 copies of each,
8740  // where i is the register number.
8741  // Registers are popped starting with the higher numbers one-by-one,
8742  // alternating between x and w registers, but only popping one at a time.
8743  //
8744  // This pattern provides a wide variety of alignment effects and overlaps.
8745 
8746  // ---- Push ----
8747 
8748  int active_w_slots = 0;
8749  for (int i = 0; active_w_slots < requested_w_slots; i++) {
8750  ASSERT(i < reg_count);
8751  // In order to test various arguments to PushMultipleTimes, and to try to
8752  // exercise different alignment and overlap effects, we push each
8753  // register a different number of times.
8754  int times = i % 4 + 1;
8755  if (i & 1) {
8756  // Push odd-numbered registers as W registers.
8757  if (i & 2) {
8758  __ PushMultipleTimes(w[i], times);
8759  } else {
8760  // Use a register to specify the count.
8761  __ Mov(tmp.W(), times);
8762  __ PushMultipleTimes(w[i], tmp.W());
8763  }
8764  // Fill in the expected stack slots.
8765  for (int j = 0; j < times; j++) {
8766  if (w[i].Is(wzr)) {
8767  // The zero register always writes zeroes.
8768  stack[active_w_slots++] = 0;
8769  } else {
8770  stack[active_w_slots++] = literal_base_w * i;
8771  }
8772  }
8773  } else {
8774  // Push even-numbered registers as X registers.
8775  if (i & 2) {
8776  __ PushMultipleTimes(x[i], times);
8777  } else {
8778  // Use a register to specify the count.
8779  __ Mov(tmp, times);
8780  __ PushMultipleTimes(x[i], tmp);
8781  }
8782  // Fill in the expected stack slots.
8783  for (int j = 0; j < times; j++) {
8784  if (x[i].IsZero()) {
8785  // The zero register always writes zeroes.
8786  stack[active_w_slots++] = 0;
8787  stack[active_w_slots++] = 0;
8788  } else {
8789  stack[active_w_slots++] = literal_base_hi * i;
8790  stack[active_w_slots++] = literal_base_lo * i;
8791  }
8792  }
8793  }
8794  }
8795  // Because we were pushing several registers at a time, we probably pushed
8796  // more than we needed to.
8797  if (active_w_slots > requested_w_slots) {
8798  __ Drop(active_w_slots - requested_w_slots, kWRegSize);
8799  // Bump the number of active W-sized slots back to where it should be,
8800  // and fill the empty space with a dummy value.
8801  do {
8802  stack[active_w_slots--] = 0xdeadbeef;
8803  } while (active_w_slots > requested_w_slots);
8804  }
8805 
8806  // ---- Pop ----
8807 
8808  Clobber(&masm, list);
8809 
8810  // If popping an even number of registers, the first one will be X-sized.
8811  // Otherwise, the first one will be W-sized.
8812  bool next_is_64 = !(reg_count & 1);
8813  for (int i = reg_count-1; i >= 0; i--) {
8814  if (next_is_64) {
8815  __ Pop(x[i]);
8816  active_w_slots -= 2;
8817  } else {
8818  __ Pop(w[i]);
8819  active_w_slots -= 1;
8820  }
8821  next_is_64 = !next_is_64;
8822  }
8823  ASSERT(active_w_slots == 0);
8824 
8825  // Drop memory to restore jssp.
8826  __ Drop(claim, kByteSizeInBytes);
8827 
8828  __ Mov(csp, __ StackPointer());
8829  __ SetStackPointer(csp);
8830  }
8831 
8832  END();
8833 
8834  RUN();
8835 
8836  int slot = 0;
8837  for (int i = 0; i < reg_count; i++) {
8838  // Even-numbered registers were written as W registers.
8839  // Odd-numbered registers were written as X registers.
8840  bool expect_64 = (i & 1);
8841  uint64_t expected;
8842 
8843  if (expect_64) {
8844  uint64_t hi = stack[slot++];
8845  uint64_t lo = stack[slot++];
8846  expected = (hi << 32) | lo;
8847  } else {
8848  expected = stack[slot++];
8849  }
8850 
8851  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
8852  // test that the upper word was properly cleared by Pop.
8853  if (x[i].IsZero()) {
8854  ASSERT_EQUAL_64(0, x[i]);
8855  } else {
8856  ASSERT_EQUAL_64(expected, x[i]);
8857  }
8858  }
8859  ASSERT(slot == requested_w_slots);
8860 
8861  TEARDOWN();
8862 }
8863 
8864 
8865 TEST(push_pop_jssp_wx_overlap) {
8866  INIT_V8();
8867  for (int claim = 0; claim <= 8; claim++) {
8868  for (int count = 1; count <= 8; count++) {
8869  PushPopJsspWXOverlapHelper(count, claim);
8870  PushPopJsspWXOverlapHelper(count, claim);
8871  PushPopJsspWXOverlapHelper(count, claim);
8872  PushPopJsspWXOverlapHelper(count, claim);
8873  }
8874  // Test with the maximum number of registers.
8875  PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8876  PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8877  PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8878  PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
8879  }
8880 }
8881 
8882 
8883 TEST(push_pop_csp) {
8884  INIT_V8();
8885  SETUP();
8886 
8887  START();
8888 
8889  ASSERT(csp.Is(__ StackPointer()));
8890 
8891  __ Mov(x3, 0x3333333333333333UL);
8892  __ Mov(x2, 0x2222222222222222UL);
8893  __ Mov(x1, 0x1111111111111111UL);
8894  __ Mov(x0, 0x0000000000000000UL);
8895  __ Claim(2);
8896  __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8897  __ Push(x3, x2);
8898  __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
8899  __ Push(x2, x1, x3, x0);
8900  __ Pop(x4, x5);
8901  __ Pop(x6, x7, x8, x9);
8902 
8903  __ Claim(2);
8904  __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
8905  __ Push(w3, w1, w2, w0);
8906  __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
8907  __ Pop(w14, w15, w16, w17);
8908 
8909  __ Claim(2);
8910  __ Push(w2, w2, w1, w1);
8911  __ Push(x3, x3);
8912  __ Pop(w18, w19, w20, w21);
8913  __ Pop(x22, x23);
8914 
8915  __ Claim(2);
8916  __ PushXRegList(x1.Bit() | x22.Bit());
8917  __ PopXRegList(x24.Bit() | x26.Bit());
8918 
8919  __ Claim(2);
8920  __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
8921  __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
8922 
8923  __ Claim(2);
8924  __ PushXRegList(0);
8925  __ PopXRegList(0);
8926  __ PushXRegList(0xffffffff);
8927  __ PopXRegList(0xffffffff);
8928  __ Drop(12);
8929 
8930  END();
8931 
8932  RUN();
8933 
8934  ASSERT_EQUAL_64(0x1111111111111111UL, x3);
8935  ASSERT_EQUAL_64(0x0000000000000000UL, x2);
8936  ASSERT_EQUAL_64(0x3333333333333333UL, x1);
8937  ASSERT_EQUAL_64(0x2222222222222222UL, x0);
8938  ASSERT_EQUAL_64(0x3333333333333333UL, x9);
8939  ASSERT_EQUAL_64(0x2222222222222222UL, x8);
8940  ASSERT_EQUAL_64(0x0000000000000000UL, x7);
8941  ASSERT_EQUAL_64(0x3333333333333333UL, x6);
8942  ASSERT_EQUAL_64(0x1111111111111111UL, x5);
8943  ASSERT_EQUAL_64(0x2222222222222222UL, x4);
8944 
8945  ASSERT_EQUAL_32(0x11111111U, w13);
8946  ASSERT_EQUAL_32(0x33333333U, w12);
8947  ASSERT_EQUAL_32(0x00000000U, w11);
8948  ASSERT_EQUAL_32(0x22222222U, w10);
8949  ASSERT_EQUAL_32(0x11111111U, w17);
8950  ASSERT_EQUAL_32(0x00000000U, w16);
8951  ASSERT_EQUAL_32(0x33333333U, w15);
8952  ASSERT_EQUAL_32(0x22222222U, w14);
8953 
8954  ASSERT_EQUAL_32(0x11111111U, w18);
8955  ASSERT_EQUAL_32(0x11111111U, w19);
8956  ASSERT_EQUAL_32(0x11111111U, w20);
8957  ASSERT_EQUAL_32(0x11111111U, w21);
8958  ASSERT_EQUAL_64(0x3333333333333333UL, x22);
8959  ASSERT_EQUAL_64(0x0000000000000000UL, x23);
8960 
8961  ASSERT_EQUAL_64(0x3333333333333333UL, x24);
8962  ASSERT_EQUAL_64(0x3333333333333333UL, x26);
8963 
8964  ASSERT_EQUAL_32(0x33333333U, w25);
8965  ASSERT_EQUAL_32(0x00000000U, w27);
8966  ASSERT_EQUAL_32(0x22222222U, w28);
8967  ASSERT_EQUAL_32(0x33333333U, w29);
8968  TEARDOWN();
8969 }
8970 
8971 
8972 TEST(push_queued) {
8973  INIT_V8();
8974  SETUP();
8975 
8976  START();
8977 
8978  ASSERT(__ StackPointer().Is(csp));
8979  __ Mov(jssp, __ StackPointer());
8980  __ SetStackPointer(jssp);
8981 
8982  MacroAssembler::PushPopQueue queue(&masm);
8983 
8984  // Queue up registers.
8985  queue.Queue(x0);
8986  queue.Queue(x1);
8987  queue.Queue(x2);
8988  queue.Queue(x3);
8989 
8990  queue.Queue(w4);
8991  queue.Queue(w5);
8992  queue.Queue(w6);
8993 
8994  queue.Queue(d0);
8995  queue.Queue(d1);
8996 
8997  queue.Queue(s2);
8998 
8999  __ Mov(x0, 0x1234000000000000);
9000  __ Mov(x1, 0x1234000100010001);
9001  __ Mov(x2, 0x1234000200020002);
9002  __ Mov(x3, 0x1234000300030003);
9003  __ Mov(w4, 0x12340004);
9004  __ Mov(w5, 0x12340005);
9005  __ Mov(w6, 0x12340006);
9006  __ Fmov(d0, 123400.0);
9007  __ Fmov(d1, 123401.0);
9008  __ Fmov(s2, 123402.0);
9009 
9010  // Actually push them.
9011  queue.PushQueued();
9012 
9015 
9016  // Pop them conventionally.
9017  __ Pop(s2);
9018  __ Pop(d1, d0);
9019  __ Pop(w6, w5, w4);
9020  __ Pop(x3, x2, x1, x0);
9021 
9022  __ Mov(csp, __ StackPointer());
9023  __ SetStackPointer(csp);
9024 
9025  END();
9026 
9027  RUN();
9028 
9029  ASSERT_EQUAL_64(0x1234000000000000, x0);
9030  ASSERT_EQUAL_64(0x1234000100010001, x1);
9031  ASSERT_EQUAL_64(0x1234000200020002, x2);
9032  ASSERT_EQUAL_64(0x1234000300030003, x3);
9033 
9034  ASSERT_EQUAL_32(0x12340004, w4);
9035  ASSERT_EQUAL_32(0x12340005, w5);
9036  ASSERT_EQUAL_32(0x12340006, w6);
9037 
9038  ASSERT_EQUAL_FP64(123400.0, d0);
9039  ASSERT_EQUAL_FP64(123401.0, d1);
9040 
9041  ASSERT_EQUAL_FP32(123402.0, s2);
9042 
9043  TEARDOWN();
9044 }
9045 
9046 
9047 TEST(pop_queued) {
9048  INIT_V8();
9049  SETUP();
9050 
9051  START();
9052 
9053  ASSERT(__ StackPointer().Is(csp));
9054  __ Mov(jssp, __ StackPointer());
9055  __ SetStackPointer(jssp);
9056 
9057  MacroAssembler::PushPopQueue queue(&masm);
9058 
9059  __ Mov(x0, 0x1234000000000000);
9060  __ Mov(x1, 0x1234000100010001);
9061  __ Mov(x2, 0x1234000200020002);
9062  __ Mov(x3, 0x1234000300030003);
9063  __ Mov(w4, 0x12340004);
9064  __ Mov(w5, 0x12340005);
9065  __ Mov(w6, 0x12340006);
9066  __ Fmov(d0, 123400.0);
9067  __ Fmov(d1, 123401.0);
9068  __ Fmov(s2, 123402.0);
9069 
9070  // Push registers conventionally.
9071  __ Push(x0, x1, x2, x3);
9072  __ Push(w4, w5, w6);
9073  __ Push(d0, d1);
9074  __ Push(s2);
9075 
9076  // Queue up a pop.
9077  queue.Queue(s2);
9078 
9079  queue.Queue(d1);
9080  queue.Queue(d0);
9081 
9082  queue.Queue(w6);
9083  queue.Queue(w5);
9084  queue.Queue(w4);
9085 
9086  queue.Queue(x3);
9087  queue.Queue(x2);
9088  queue.Queue(x1);
9089  queue.Queue(x0);
9090 
9093 
9094  // Actually pop them.
9095  queue.PopQueued();
9096 
9097  __ Mov(csp, __ StackPointer());
9098  __ SetStackPointer(csp);
9099 
9100  END();
9101 
9102  RUN();
9103 
9104  ASSERT_EQUAL_64(0x1234000000000000, x0);
9105  ASSERT_EQUAL_64(0x1234000100010001, x1);
9106  ASSERT_EQUAL_64(0x1234000200020002, x2);
9107  ASSERT_EQUAL_64(0x1234000300030003, x3);
9108 
9109  ASSERT_EQUAL_64(0x0000000012340004, x4);
9110  ASSERT_EQUAL_64(0x0000000012340005, x5);
9111  ASSERT_EQUAL_64(0x0000000012340006, x6);
9112 
9113  ASSERT_EQUAL_FP64(123400.0, d0);
9114  ASSERT_EQUAL_FP64(123401.0, d1);
9115 
9116  ASSERT_EQUAL_FP32(123402.0, s2);
9117 
9118  TEARDOWN();
9119 }
9120 
9121 
9122 TEST(jump_both_smi) {
9123  INIT_V8();
9124  SETUP();
9125 
9126  Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9127  Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9128  Label return1, return2, return3, done;
9129 
9130  START();
9131 
9132  __ Mov(x0, 0x5555555500000001UL); // A pointer.
9133  __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
9134  __ Mov(x2, 0x1234567800000000UL); // A smi.
9135  __ Mov(x3, 0x8765432100000000UL); // A smi.
9136  __ Mov(x4, 0xdead);
9137  __ Mov(x5, 0xdead);
9138  __ Mov(x6, 0xdead);
9139  __ Mov(x7, 0xdead);
9140 
9141  __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9142  __ Bind(&return1);
9143  __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9144  __ Bind(&return2);
9145  __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9146  __ Bind(&return3);
9147  __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9148 
9149  __ Bind(&cond_fail_00);
9150  __ Mov(x4, 0);
9151  __ B(&return1);
9152  __ Bind(&cond_pass_00);
9153  __ Mov(x4, 1);
9154  __ B(&return1);
9155 
9156  __ Bind(&cond_fail_01);
9157  __ Mov(x5, 0);
9158  __ B(&return2);
9159  __ Bind(&cond_pass_01);
9160  __ Mov(x5, 1);
9161  __ B(&return2);
9162 
9163  __ Bind(&cond_fail_10);
9164  __ Mov(x6, 0);
9165  __ B(&return3);
9166  __ Bind(&cond_pass_10);
9167  __ Mov(x6, 1);
9168  __ B(&return3);
9169 
9170  __ Bind(&cond_fail_11);
9171  __ Mov(x7, 0);
9172  __ B(&done);
9173  __ Bind(&cond_pass_11);
9174  __ Mov(x7, 1);
9175 
9176  __ Bind(&done);
9177 
9178  END();
9179 
9180  RUN();
9181 
9182  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9183  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9184  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9185  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9186  ASSERT_EQUAL_64(0, x4);
9187  ASSERT_EQUAL_64(0, x5);
9188  ASSERT_EQUAL_64(0, x6);
9189  ASSERT_EQUAL_64(1, x7);
9190 
9191  TEARDOWN();
9192 }
9193 
9194 
9195 TEST(jump_either_smi) {
9196  INIT_V8();
9197  SETUP();
9198 
9199  Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9200  Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9201  Label return1, return2, return3, done;
9202 
9203  START();
9204 
9205  __ Mov(x0, 0x5555555500000001UL); // A pointer.
9206  __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
9207  __ Mov(x2, 0x1234567800000000UL); // A smi.
9208  __ Mov(x3, 0x8765432100000000UL); // A smi.
9209  __ Mov(x4, 0xdead);
9210  __ Mov(x5, 0xdead);
9211  __ Mov(x6, 0xdead);
9212  __ Mov(x7, 0xdead);
9213 
9214  __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9215  __ Bind(&return1);
9216  __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9217  __ Bind(&return2);
9218  __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9219  __ Bind(&return3);
9220  __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9221 
9222  __ Bind(&cond_fail_00);
9223  __ Mov(x4, 0);
9224  __ B(&return1);
9225  __ Bind(&cond_pass_00);
9226  __ Mov(x4, 1);
9227  __ B(&return1);
9228 
9229  __ Bind(&cond_fail_01);
9230  __ Mov(x5, 0);
9231  __ B(&return2);
9232  __ Bind(&cond_pass_01);
9233  __ Mov(x5, 1);
9234  __ B(&return2);
9235 
9236  __ Bind(&cond_fail_10);
9237  __ Mov(x6, 0);
9238  __ B(&return3);
9239  __ Bind(&cond_pass_10);
9240  __ Mov(x6, 1);
9241  __ B(&return3);
9242 
9243  __ Bind(&cond_fail_11);
9244  __ Mov(x7, 0);
9245  __ B(&done);
9246  __ Bind(&cond_pass_11);
9247  __ Mov(x7, 1);
9248 
9249  __ Bind(&done);
9250 
9251  END();
9252 
9253  RUN();
9254 
9255  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9256  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9257  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9258  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9259  ASSERT_EQUAL_64(0, x4);
9260  ASSERT_EQUAL_64(1, x5);
9261  ASSERT_EQUAL_64(1, x6);
9262  ASSERT_EQUAL_64(1, x7);
9263 
9264  TEARDOWN();
9265 }
9266 
9267 
9268 TEST(noreg) {
9269  // This test doesn't generate any code, but it verifies some invariants
9270  // related to NoReg.
9271  CHECK(NoReg.Is(NoFPReg));
9272  CHECK(NoFPReg.Is(NoReg));
9273  CHECK(NoReg.Is(NoCPUReg));
9274  CHECK(NoCPUReg.Is(NoReg));
9275  CHECK(NoFPReg.Is(NoCPUReg));
9276  CHECK(NoCPUReg.Is(NoFPReg));
9277 
9278  CHECK(NoReg.IsNone());
9279  CHECK(NoFPReg.IsNone());
9280  CHECK(NoCPUReg.IsNone());
9281 }
9282 
9283 
9284 TEST(isvalid) {
9285  // This test doesn't generate any code, but it verifies some invariants
9286  // related to IsValid().
9287  CHECK(!NoReg.IsValid());
9288  CHECK(!NoFPReg.IsValid());
9289  CHECK(!NoCPUReg.IsValid());
9290 
9291  CHECK(x0.IsValid());
9292  CHECK(w0.IsValid());
9293  CHECK(x30.IsValid());
9294  CHECK(w30.IsValid());
9295  CHECK(xzr.IsValid());
9296  CHECK(wzr.IsValid());
9297 
9298  CHECK(csp.IsValid());
9299  CHECK(wcsp.IsValid());
9300 
9301  CHECK(d0.IsValid());
9302  CHECK(s0.IsValid());
9303  CHECK(d31.IsValid());
9304  CHECK(s31.IsValid());
9305 
9306  CHECK(x0.IsValidRegister());
9307  CHECK(w0.IsValidRegister());
9308  CHECK(xzr.IsValidRegister());
9309  CHECK(wzr.IsValidRegister());
9310  CHECK(csp.IsValidRegister());
9311  CHECK(wcsp.IsValidRegister());
9312  CHECK(!x0.IsValidFPRegister());
9313  CHECK(!w0.IsValidFPRegister());
9314  CHECK(!xzr.IsValidFPRegister());
9315  CHECK(!wzr.IsValidFPRegister());
9316  CHECK(!csp.IsValidFPRegister());
9317  CHECK(!wcsp.IsValidFPRegister());
9318 
9319  CHECK(d0.IsValidFPRegister());
9320  CHECK(s0.IsValidFPRegister());
9321  CHECK(!d0.IsValidRegister());
9322  CHECK(!s0.IsValidRegister());
9323 
9324  // Test the same as before, but using CPURegister types. This shouldn't make
9325  // any difference.
9326  CHECK(static_cast<CPURegister>(x0).IsValid());
9327  CHECK(static_cast<CPURegister>(w0).IsValid());
9328  CHECK(static_cast<CPURegister>(x30).IsValid());
9329  CHECK(static_cast<CPURegister>(w30).IsValid());
9330  CHECK(static_cast<CPURegister>(xzr).IsValid());
9331  CHECK(static_cast<CPURegister>(wzr).IsValid());
9332 
9333  CHECK(static_cast<CPURegister>(csp).IsValid());
9334  CHECK(static_cast<CPURegister>(wcsp).IsValid());
9335 
9336  CHECK(static_cast<CPURegister>(d0).IsValid());
9337  CHECK(static_cast<CPURegister>(s0).IsValid());
9338  CHECK(static_cast<CPURegister>(d31).IsValid());
9339  CHECK(static_cast<CPURegister>(s31).IsValid());
9340 
9341  CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9342  CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9343  CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9344  CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9345  CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9346  CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9347  CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9348  CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9349  CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9350  CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9351  CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9352  CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9353 
9354  CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9355  CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9356  CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9357  CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9358 }
9359 
9360 
9361 TEST(cpureglist_utils_x) {
9362  // This test doesn't generate any code, but it verifies the behaviour of
9363  // the CPURegList utility methods.
9364 
9365  // Test a list of X registers.
9366  CPURegList test(x0, x1, x2, x3);
9367 
9368  CHECK(test.IncludesAliasOf(x0));
9369  CHECK(test.IncludesAliasOf(x1));
9370  CHECK(test.IncludesAliasOf(x2));
9371  CHECK(test.IncludesAliasOf(x3));
9372  CHECK(test.IncludesAliasOf(w0));
9373  CHECK(test.IncludesAliasOf(w1));
9374  CHECK(test.IncludesAliasOf(w2));
9375  CHECK(test.IncludesAliasOf(w3));
9376 
9377  CHECK(!test.IncludesAliasOf(x4));
9378  CHECK(!test.IncludesAliasOf(x30));
9379  CHECK(!test.IncludesAliasOf(xzr));
9380  CHECK(!test.IncludesAliasOf(csp));
9381  CHECK(!test.IncludesAliasOf(w4));
9382  CHECK(!test.IncludesAliasOf(w30));
9383  CHECK(!test.IncludesAliasOf(wzr));
9384  CHECK(!test.IncludesAliasOf(wcsp));
9385 
9386  CHECK(!test.IncludesAliasOf(d0));
9387  CHECK(!test.IncludesAliasOf(d1));
9388  CHECK(!test.IncludesAliasOf(d2));
9389  CHECK(!test.IncludesAliasOf(d3));
9390  CHECK(!test.IncludesAliasOf(s0));
9391  CHECK(!test.IncludesAliasOf(s1));
9392  CHECK(!test.IncludesAliasOf(s2));
9393  CHECK(!test.IncludesAliasOf(s3));
9394 
9395  CHECK(!test.IsEmpty());
9396 
9397  CHECK(test.type() == x0.type());
9398 
9399  CHECK(test.PopHighestIndex().Is(x3));
9400  CHECK(test.PopLowestIndex().Is(x0));
9401 
9402  CHECK(test.IncludesAliasOf(x1));
9403  CHECK(test.IncludesAliasOf(x2));
9404  CHECK(test.IncludesAliasOf(w1));
9405  CHECK(test.IncludesAliasOf(w2));
9406  CHECK(!test.IncludesAliasOf(x0));
9407  CHECK(!test.IncludesAliasOf(x3));
9408  CHECK(!test.IncludesAliasOf(w0));
9409  CHECK(!test.IncludesAliasOf(w3));
9410 
9411  CHECK(test.PopHighestIndex().Is(x2));
9412  CHECK(test.PopLowestIndex().Is(x1));
9413 
9414  CHECK(!test.IncludesAliasOf(x1));
9415  CHECK(!test.IncludesAliasOf(x2));
9416  CHECK(!test.IncludesAliasOf(w1));
9417  CHECK(!test.IncludesAliasOf(w2));
9418 
9419  CHECK(test.IsEmpty());
9420 }
9421 
9422 
9423 TEST(cpureglist_utils_w) {
9424  // This test doesn't generate any code, but it verifies the behaviour of
9425  // the CPURegList utility methods.
9426 
9427  // Test a list of W registers.
9428  CPURegList test(w10, w11, w12, w13);
9429 
9430  CHECK(test.IncludesAliasOf(x10));
9431  CHECK(test.IncludesAliasOf(x11));
9432  CHECK(test.IncludesAliasOf(x12));
9433  CHECK(test.IncludesAliasOf(x13));
9434  CHECK(test.IncludesAliasOf(w10));
9435  CHECK(test.IncludesAliasOf(w11));
9436  CHECK(test.IncludesAliasOf(w12));
9437  CHECK(test.IncludesAliasOf(w13));
9438 
9439  CHECK(!test.IncludesAliasOf(x0));
9440  CHECK(!test.IncludesAliasOf(x9));
9441  CHECK(!test.IncludesAliasOf(x14));
9442  CHECK(!test.IncludesAliasOf(x30));
9443  CHECK(!test.IncludesAliasOf(xzr));
9444  CHECK(!test.IncludesAliasOf(csp));
9445  CHECK(!test.IncludesAliasOf(w0));
9446  CHECK(!test.IncludesAliasOf(w9));
9447  CHECK(!test.IncludesAliasOf(w14));
9448  CHECK(!test.IncludesAliasOf(w30));
9449  CHECK(!test.IncludesAliasOf(wzr));
9450  CHECK(!test.IncludesAliasOf(wcsp));
9451 
9452  CHECK(!test.IncludesAliasOf(d10));
9453  CHECK(!test.IncludesAliasOf(d11));
9454  CHECK(!test.IncludesAliasOf(d12));
9455  CHECK(!test.IncludesAliasOf(d13));
9456  CHECK(!test.IncludesAliasOf(s10));
9457  CHECK(!test.IncludesAliasOf(s11));
9458  CHECK(!test.IncludesAliasOf(s12));
9459  CHECK(!test.IncludesAliasOf(s13));
9460 
9461  CHECK(!test.IsEmpty());
9462 
9463  CHECK(test.type() == w10.type());
9464 
9465  CHECK(test.PopHighestIndex().Is(w13));
9466  CHECK(test.PopLowestIndex().Is(w10));
9467 
9468  CHECK(test.IncludesAliasOf(x11));
9469  CHECK(test.IncludesAliasOf(x12));
9470  CHECK(test.IncludesAliasOf(w11));
9471  CHECK(test.IncludesAliasOf(w12));
9472  CHECK(!test.IncludesAliasOf(x10));
9473  CHECK(!test.IncludesAliasOf(x13));
9474  CHECK(!test.IncludesAliasOf(w10));
9475  CHECK(!test.IncludesAliasOf(w13));
9476 
9477  CHECK(test.PopHighestIndex().Is(w12));
9478  CHECK(test.PopLowestIndex().Is(w11));
9479 
9480  CHECK(!test.IncludesAliasOf(x11));
9481  CHECK(!test.IncludesAliasOf(x12));
9482  CHECK(!test.IncludesAliasOf(w11));
9483  CHECK(!test.IncludesAliasOf(w12));
9484 
9485  CHECK(test.IsEmpty());
9486 }
9487 
9488 
9489 TEST(cpureglist_utils_d) {
9490  // This test doesn't generate any code, but it verifies the behaviour of
9491  // the CPURegList utility methods.
9492 
9493  // Test a list of D registers.
9494  CPURegList test(d20, d21, d22, d23);
9495 
9496  CHECK(test.IncludesAliasOf(d20));
9497  CHECK(test.IncludesAliasOf(d21));
9498  CHECK(test.IncludesAliasOf(d22));
9499  CHECK(test.IncludesAliasOf(d23));
9500  CHECK(test.IncludesAliasOf(s20));
9501  CHECK(test.IncludesAliasOf(s21));
9502  CHECK(test.IncludesAliasOf(s22));
9503  CHECK(test.IncludesAliasOf(s23));
9504 
9505  CHECK(!test.IncludesAliasOf(d0));
9506  CHECK(!test.IncludesAliasOf(d19));
9507  CHECK(!test.IncludesAliasOf(d24));
9508  CHECK(!test.IncludesAliasOf(d31));
9509  CHECK(!test.IncludesAliasOf(s0));
9510  CHECK(!test.IncludesAliasOf(s19));
9511  CHECK(!test.IncludesAliasOf(s24));
9512  CHECK(!test.IncludesAliasOf(s31));
9513 
9514  CHECK(!test.IncludesAliasOf(x20));
9515  CHECK(!test.IncludesAliasOf(x21));
9516  CHECK(!test.IncludesAliasOf(x22));
9517  CHECK(!test.IncludesAliasOf(x23));
9518  CHECK(!test.IncludesAliasOf(w20));
9519  CHECK(!test.IncludesAliasOf(w21));
9520  CHECK(!test.IncludesAliasOf(w22));
9521  CHECK(!test.IncludesAliasOf(w23));
9522 
9523  CHECK(!test.IncludesAliasOf(xzr));
9524  CHECK(!test.IncludesAliasOf(wzr));
9525  CHECK(!test.IncludesAliasOf(csp));
9526  CHECK(!test.IncludesAliasOf(wcsp));
9527 
9528  CHECK(!test.IsEmpty());
9529 
9530  CHECK(test.type() == d20.type());
9531 
9532  CHECK(test.PopHighestIndex().Is(d23));
9533  CHECK(test.PopLowestIndex().Is(d20));
9534 
9535  CHECK(test.IncludesAliasOf(d21));
9536  CHECK(test.IncludesAliasOf(d22));
9537  CHECK(test.IncludesAliasOf(s21));
9538  CHECK(test.IncludesAliasOf(s22));
9539  CHECK(!test.IncludesAliasOf(d20));
9540  CHECK(!test.IncludesAliasOf(d23));
9541  CHECK(!test.IncludesAliasOf(s20));
9542  CHECK(!test.IncludesAliasOf(s23));
9543 
9544  CHECK(test.PopHighestIndex().Is(d22));
9545  CHECK(test.PopLowestIndex().Is(d21));
9546 
9547  CHECK(!test.IncludesAliasOf(d21));
9548  CHECK(!test.IncludesAliasOf(d22));
9549  CHECK(!test.IncludesAliasOf(s21));
9550  CHECK(!test.IncludesAliasOf(s22));
9551 
9552  CHECK(test.IsEmpty());
9553 }
9554 
9555 
9556 TEST(cpureglist_utils_s) {
9557  // This test doesn't generate any code, but it verifies the behaviour of
9558  // the CPURegList utility methods.
9559 
9560  // Test a list of S registers.
9561  CPURegList test(s20, s21, s22, s23);
9562 
9563  // The type and size mechanisms are already covered, so here we just test
9564  // that lists of S registers alias individual D registers.
9565 
9566  CHECK(test.IncludesAliasOf(d20));
9567  CHECK(test.IncludesAliasOf(d21));
9568  CHECK(test.IncludesAliasOf(d22));
9569  CHECK(test.IncludesAliasOf(d23));
9570  CHECK(test.IncludesAliasOf(s20));
9571  CHECK(test.IncludesAliasOf(s21));
9572  CHECK(test.IncludesAliasOf(s22));
9573  CHECK(test.IncludesAliasOf(s23));
9574 }
9575 
9576 
9577 TEST(cpureglist_utils_empty) {
9578  // This test doesn't generate any code, but it verifies the behaviour of
9579  // the CPURegList utility methods.
9580 
9581  // Test an empty list.
9582  // Empty lists can have type and size properties. Check that we can create
9583  // them, and that they are empty.
9588 
9589  CHECK(reg32.IsEmpty());
9590  CHECK(reg64.IsEmpty());
9591  CHECK(fpreg32.IsEmpty());
9592  CHECK(fpreg64.IsEmpty());
9593 
9594  CHECK(reg32.PopLowestIndex().IsNone());
9595  CHECK(reg64.PopLowestIndex().IsNone());
9596  CHECK(fpreg32.PopLowestIndex().IsNone());
9597  CHECK(fpreg64.PopLowestIndex().IsNone());
9598 
9599  CHECK(reg32.PopHighestIndex().IsNone());
9600  CHECK(reg64.PopHighestIndex().IsNone());
9601  CHECK(fpreg32.PopHighestIndex().IsNone());
9602  CHECK(fpreg64.PopHighestIndex().IsNone());
9603 
9604  CHECK(reg32.IsEmpty());
9605  CHECK(reg64.IsEmpty());
9606  CHECK(fpreg32.IsEmpty());
9607  CHECK(fpreg64.IsEmpty());
9608 }
9609 
9610 
9611 TEST(printf) {
9612  INIT_V8();
9613  SETUP();
9614  START();
9615 
9616  char const * test_plain_string = "Printf with no arguments.\n";
9617  char const * test_substring = "'This is a substring.'";
9618  RegisterDump before;
9619 
9620  // Initialize x29 to the value of the stack pointer. We will use x29 as a
9621  // temporary stack pointer later, and initializing it in this way allows the
9622  // RegisterDump check to pass.
9623  __ Mov(x29, __ StackPointer());
9624 
9625  // Test simple integer arguments.
9626  __ Mov(x0, 1234);
9627  __ Mov(x1, 0x1234);
9628 
9629  // Test simple floating-point arguments.
9630  __ Fmov(d0, 1.234);
9631 
9632  // Test pointer (string) arguments.
9633  __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9634 
9635  // Test the maximum number of arguments, and sign extension.
9636  __ Mov(w3, 0xffffffff);
9637  __ Mov(w4, 0xffffffff);
9638  __ Mov(x5, 0xffffffffffffffff);
9639  __ Mov(x6, 0xffffffffffffffff);
9640  __ Fmov(s1, 1.234);
9641  __ Fmov(s2, 2.345);
9642  __ Fmov(d3, 3.456);
9643  __ Fmov(d4, 4.567);
9644 
9645  // Test printing callee-saved registers.
9646  __ Mov(x28, 0x123456789abcdef);
9647  __ Fmov(d10, 42.0);
9648 
9649  // Test with three arguments.
9650  __ Mov(x10, 3);
9651  __ Mov(x11, 40);
9652  __ Mov(x12, 500);
9653 
9654  // x8 and x9 are used by debug code in part of the macro assembler. However,
9655  // Printf guarantees to preserve them (so we can use Printf in debug code),
9656  // and we need to test that they are properly preserved. The above code
9657  // shouldn't need to use them, but we initialize x8 and x9 last to be on the
9658  // safe side. This test still assumes that none of the code from
9659  // before->Dump() to the end of the test can clobber x8 or x9, so where
9660  // possible we use the Assembler directly to be safe.
9661  __ orr(x8, xzr, 0x8888888888888888);
9662  __ orr(x9, xzr, 0x9999999999999999);
9663 
9664  // Check that we don't clobber any registers, except those that we explicitly
9665  // write results into.
9666  before.Dump(&masm);
9667 
9668  __ Printf(test_plain_string); // NOLINT(runtime/printf)
9669  __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9670  __ Printf("d0: %f\n", d0);
9671  __ Printf("Test %%s: %s\n", x2);
9672  __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9673  "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9674  w3, w4, x5, x6);
9675  __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9676  __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9677  __ Printf("%g\n", d10);
9678 
9679  // Test with a different stack pointer.
9680  const Register old_stack_pointer = __ StackPointer();
9681  __ mov(x29, old_stack_pointer);
9682  __ SetStackPointer(x29);
9683  __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
9684  __ mov(old_stack_pointer, __ StackPointer());
9685  __ SetStackPointer(old_stack_pointer);
9686 
9687  __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
9688 
9689  END();
9690  RUN();
9691 
9692  // We cannot easily test the output of the Printf sequences, and because
9693  // Printf preserves all registers by default, we can't look at the number of
9694  // bytes that were printed. However, the printf_no_preserve test should check
9695  // that, and here we just test that we didn't clobber any registers.
9696  ASSERT_EQUAL_REGISTERS(before);
9697 
9698  TEARDOWN();
9699 }
9700 
9701 
9702 TEST(printf_no_preserve) {
9703  INIT_V8();
9704  SETUP();
9705  START();
9706 
9707  char const * test_plain_string = "Printf with no arguments.\n";
9708  char const * test_substring = "'This is a substring.'";
9709 
9710  __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
9711  __ Mov(x19, x0);
9712 
9713  // Test simple integer arguments.
9714  __ Mov(x0, 1234);
9715  __ Mov(x1, 0x1234);
9716  __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9717  __ Mov(x20, x0);
9718 
9719  // Test simple floating-point arguments.
9720  __ Fmov(d0, 1.234);
9721  __ PrintfNoPreserve("d0: %f\n", d0);
9722  __ Mov(x21, x0);
9723 
9724  // Test pointer (string) arguments.
9725  __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9726  __ PrintfNoPreserve("Test %%s: %s\n", x2);
9727  __ Mov(x22, x0);
9728 
9729  // Test the maximum number of arguments, and sign extension.
9730  __ Mov(w3, 0xffffffff);
9731  __ Mov(w4, 0xffffffff);
9732  __ Mov(x5, 0xffffffffffffffff);
9733  __ Mov(x6, 0xffffffffffffffff);
9734  __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9735  "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9736  w3, w4, x5, x6);
9737  __ Mov(x23, x0);
9738 
9739  __ Fmov(s1, 1.234);
9740  __ Fmov(s2, 2.345);
9741  __ Fmov(d3, 3.456);
9742  __ Fmov(d4, 4.567);
9743  __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9744  __ Mov(x24, x0);
9745 
9746  // Test printing callee-saved registers.
9747  __ Mov(x28, 0x123456789abcdef);
9748  __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
9749  __ Mov(x25, x0);
9750 
9751  __ Fmov(d10, 42.0);
9752  __ PrintfNoPreserve("%g\n", d10);
9753  __ Mov(x26, x0);
9754 
9755  // Test with a different stack pointer.
9756  const Register old_stack_pointer = __ StackPointer();
9757  __ Mov(x29, old_stack_pointer);
9758  __ SetStackPointer(x29);
9759 
9760  __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
9761  old_stack_pointer);
9762  __ Mov(x27, x0);
9763 
9764  __ Mov(old_stack_pointer, __ StackPointer());
9765  __ SetStackPointer(old_stack_pointer);
9766 
9767  // Test with three arguments.
9768  __ Mov(x3, 3);
9769  __ Mov(x4, 40);
9770  __ Mov(x5, 500);
9771  __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9772  __ Mov(x28, x0);
9773 
9774  END();
9775  RUN();
9776 
9777  // We cannot easily test the exact output of the Printf sequences, but we can
9778  // use the return code to check that the string length was correct.
9779 
9780  // Printf with no arguments.
9781  ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9782  // x0: 1234, x1: 0x00001234
9783  ASSERT_EQUAL_64(25, x20);
9784  // d0: 1.234000
9785  ASSERT_EQUAL_64(13, x21);
9786  // Test %s: 'This is a substring.'
9787  ASSERT_EQUAL_64(32, x22);
9788  // w3(uint32): 4294967295
9789  // w4(int32): -1
9790  // x5(uint64): 18446744073709551615
9791  // x6(int64): -1
9792  ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9793  // %f: 1.234000
9794  // %g: 2.345
9795  // %e: 3.456000e+00
9796  // %E: 4.567000E+00
9797  ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
9798  // 0x89abcdef, 0x0123456789abcdef
9799  ASSERT_EQUAL_64(31, x25);
9800  // 42
9801  ASSERT_EQUAL_64(3, x26);
9802  // old_stack_pointer: 0x00007fb037ae2370
9803  // Note: This is an example value, but the field width is fixed here so the
9804  // string length is still predictable.
9805  ASSERT_EQUAL_64(38, x27);
9806  // 3=3, 4=40, 5=500
9807  ASSERT_EQUAL_64(17, x28);
9808 
9809  TEARDOWN();
9810 }
9811 
9812 
9813 // This is a V8-specific test.
9814 static void CopyFieldsHelper(CPURegList temps) {
9815  static const uint64_t kLiteralBase = 0x0100001000100101UL;
9816  static const uint64_t src[] = {kLiteralBase * 1,
9817  kLiteralBase * 2,
9818  kLiteralBase * 3,
9819  kLiteralBase * 4,
9820  kLiteralBase * 5,
9821  kLiteralBase * 6,
9822  kLiteralBase * 7,
9823  kLiteralBase * 8,
9824  kLiteralBase * 9,
9825  kLiteralBase * 10,
9826  kLiteralBase * 11};
9827  static const uint64_t src_tagged =
9828  reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
9829 
9830  static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
9831  uint64_t* dst[kTestCount];
9832  uint64_t dst_tagged[kTestCount];
9833 
9834  // The first test will be to copy 0 fields. The destination (and source)
9835  // should not be accessed in any way.
9836  dst[0] = NULL;
9837  dst_tagged[0] = kHeapObjectTag;
9838 
9839  // Allocate memory for each other test. Each test <n> will have <n> fields.
9840  // This is intended to exercise as many paths in CopyFields as possible.
9841  for (unsigned i = 1; i < kTestCount; i++) {
9842  dst[i] = new uint64_t[i];
9843  memset(dst[i], 0, i * sizeof(kLiteralBase));
9844  dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
9845  }
9846 
9847  SETUP();
9848  START();
9849 
9850  __ Mov(x0, dst_tagged[0]);
9851  __ Mov(x1, 0);
9852  __ CopyFields(x0, x1, temps, 0);
9853  for (unsigned i = 1; i < kTestCount; i++) {
9854  __ Mov(x0, dst_tagged[i]);
9855  __ Mov(x1, src_tagged);
9856  __ CopyFields(x0, x1, temps, i);
9857  }
9858 
9859  END();
9860  RUN();
9861  TEARDOWN();
9862 
9863  for (unsigned i = 1; i < kTestCount; i++) {
9864  for (unsigned j = 0; j < i; j++) {
9865  CHECK(src[j] == dst[i][j]);
9866  }
9867  delete [] dst[i];
9868  }
9869 }
9870 
9871 
9872 // This is a V8-specific test.
9873 TEST(copyfields) {
9874  INIT_V8();
9875  CopyFieldsHelper(CPURegList(x10));
9876  CopyFieldsHelper(CPURegList(x10, x11));
9877  CopyFieldsHelper(CPURegList(x10, x11, x12));
9878  CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
9879 }
9880 
9881 
9882 static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
9883  SETUP();
9884 
9885  START();
9886  Label end, slow;
9887  __ Mov(x2, 0xc001c0de);
9888  __ Mov(x1, value);
9889  __ SmiTag(x1);
9890  __ SmiAbs(x1, &slow);
9891  __ SmiUntag(x1);
9892  __ B(&end);
9893 
9894  __ Bind(&slow);
9895  __ Mov(x2, 0xbad);
9896 
9897  __ Bind(&end);
9898  END();
9899 
9900  RUN();
9901 
9902  if (must_fail) {
9903  // We tested an invalid conversion. The code must have jump on slow.
9904  ASSERT_EQUAL_64(0xbad, x2);
9905  } else {
9906  // The conversion is valid, check the result.
9907  int32_t result = (value >= 0) ? value : -value;
9908  ASSERT_EQUAL_64(result, x1);
9909 
9910  // Check that we didn't jump on slow.
9911  ASSERT_EQUAL_64(0xc001c0de, x2);
9912  }
9913 
9914  TEARDOWN();
9915 }
9916 
9917 
9918 TEST(smi_abs) {
9919  INIT_V8();
9920  // Simple and edge cases.
9921  DoSmiAbsTest(0);
9922  DoSmiAbsTest(0x12345);
9923  DoSmiAbsTest(0x40000000);
9924  DoSmiAbsTest(0x7fffffff);
9925  DoSmiAbsTest(-1);
9926  DoSmiAbsTest(-12345);
9927  DoSmiAbsTest(0x80000001);
9928 
9929  // Check that the most negative SMI is detected.
9930  DoSmiAbsTest(0x80000000, true);
9931 }
9932 
9933 
9934 TEST(blr_lr) {
9935  // A simple test to check that the simulator correcty handle "blr lr".
9936  INIT_V8();
9937  SETUP();
9938 
9939  START();
9940  Label target;
9941  Label end;
9942 
9943  __ Mov(x0, 0x0);
9944  __ Adr(lr, &target);
9945 
9946  __ Blr(lr);
9947  __ Mov(x0, 0xdeadbeef);
9948  __ B(&end);
9949 
9950  __ Bind(&target);
9951  __ Mov(x0, 0xc001c0de);
9952 
9953  __ Bind(&end);
9954  END();
9955 
9956  RUN();
9957 
9958  ASSERT_EQUAL_64(0xc001c0de, x0);
9959 
9960  TEARDOWN();
9961 }
9962 
9963 
9964 TEST(barriers) {
9965  // Generate all supported barriers, this is just a smoke test
9966  INIT_V8();
9967  SETUP();
9968 
9969  START();
9970 
9971  // DMB
9972  __ Dmb(FullSystem, BarrierAll);
9973  __ Dmb(FullSystem, BarrierReads);
9974  __ Dmb(FullSystem, BarrierWrites);
9975  __ Dmb(FullSystem, BarrierOther);
9976 
9981 
9982  __ Dmb(NonShareable, BarrierAll);
9986 
9991 
9992  // DSB
9993  __ Dsb(FullSystem, BarrierAll);
9994  __ Dsb(FullSystem, BarrierReads);
9995  __ Dsb(FullSystem, BarrierWrites);
9996  __ Dsb(FullSystem, BarrierOther);
9997 
10002 
10003  __ Dsb(NonShareable, BarrierAll);
10007 
10012 
10013  // ISB
10014  __ Isb();
10015 
10016  END();
10017 
10018  RUN();
10019 
10020  TEARDOWN();
10021 }
10022 
10023 
10024 TEST(process_nan_double) {
10025  INIT_V8();
10026  // Make sure that NaN propagation works correctly.
10027  double sn = rawbits_to_double(0x7ff5555511111111);
10028  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10029  ASSERT(IsSignallingNaN(sn));
10030  ASSERT(IsQuietNaN(qn));
10031 
10032  // The input NaNs after passing through ProcessNaN.
10033  double sn_proc = rawbits_to_double(0x7ffd555511111111);
10034  double qn_proc = qn;
10035  ASSERT(IsQuietNaN(sn_proc));
10036  ASSERT(IsQuietNaN(qn_proc));
10037 
10038  SETUP();
10039  START();
10040 
10041  // Execute a number of instructions which all use ProcessNaN, and check that
10042  // they all handle the NaN correctly.
10043  __ Fmov(d0, sn);
10044  __ Fmov(d10, qn);
10045 
10046  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10047  // - Signalling NaN
10048  __ Fmov(d1, d0);
10049  __ Fabs(d2, d0);
10050  __ Fneg(d3, d0);
10051  // - Quiet NaN
10052  __ Fmov(d11, d10);
10053  __ Fabs(d12, d10);
10054  __ Fneg(d13, d10);
10055 
10056  // Operations that use ProcessNaN.
10057  // - Signalling NaN
10058  __ Fsqrt(d4, d0);
10059  __ Frinta(d5, d0);
10060  __ Frintn(d6, d0);
10061  __ Frintz(d7, d0);
10062  // - Quiet NaN
10063  __ Fsqrt(d14, d10);
10064  __ Frinta(d15, d10);
10065  __ Frintn(d16, d10);
10066  __ Frintz(d17, d10);
10067 
10068  // The behaviour of fcvt is checked in TEST(fcvt_sd).
10069 
10070  END();
10071  RUN();
10072 
10073  uint64_t qn_raw = double_to_rawbits(qn);
10074  uint64_t sn_raw = double_to_rawbits(sn);
10075 
10076  // - Signalling NaN
10077  ASSERT_EQUAL_FP64(sn, d1);
10078  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
10079  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
10080  // - Quiet NaN
10081  ASSERT_EQUAL_FP64(qn, d11);
10082  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
10083  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
10084 
10085  // - Signalling NaN
10086  ASSERT_EQUAL_FP64(sn_proc, d4);
10087  ASSERT_EQUAL_FP64(sn_proc, d5);
10088  ASSERT_EQUAL_FP64(sn_proc, d6);
10089  ASSERT_EQUAL_FP64(sn_proc, d7);
10090  // - Quiet NaN
10091  ASSERT_EQUAL_FP64(qn_proc, d14);
10092  ASSERT_EQUAL_FP64(qn_proc, d15);
10093  ASSERT_EQUAL_FP64(qn_proc, d16);
10094  ASSERT_EQUAL_FP64(qn_proc, d17);
10095 
10096  TEARDOWN();
10097 }
10098 
10099 
10100 TEST(process_nan_float) {
10101  INIT_V8();
10102  // Make sure that NaN propagation works correctly.
10103  float sn = rawbits_to_float(0x7f951111);
10104  float qn = rawbits_to_float(0x7fea1111);
10105  ASSERT(IsSignallingNaN(sn));
10106  ASSERT(IsQuietNaN(qn));
10107 
10108  // The input NaNs after passing through ProcessNaN.
10109  float sn_proc = rawbits_to_float(0x7fd51111);
10110  float qn_proc = qn;
10111  ASSERT(IsQuietNaN(sn_proc));
10112  ASSERT(IsQuietNaN(qn_proc));
10113 
10114  SETUP();
10115  START();
10116 
10117  // Execute a number of instructions which all use ProcessNaN, and check that
10118  // they all handle the NaN correctly.
10119  __ Fmov(s0, sn);
10120  __ Fmov(s10, qn);
10121 
10122  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10123  // - Signalling NaN
10124  __ Fmov(s1, s0);
10125  __ Fabs(s2, s0);
10126  __ Fneg(s3, s0);
10127  // - Quiet NaN
10128  __ Fmov(s11, s10);
10129  __ Fabs(s12, s10);
10130  __ Fneg(s13, s10);
10131 
10132  // Operations that use ProcessNaN.
10133  // - Signalling NaN
10134  __ Fsqrt(s4, s0);
10135  __ Frinta(s5, s0);
10136  __ Frintn(s6, s0);
10137  __ Frintz(s7, s0);
10138  // - Quiet NaN
10139  __ Fsqrt(s14, s10);
10140  __ Frinta(s15, s10);
10141  __ Frintn(s16, s10);
10142  __ Frintz(s17, s10);
10143 
10144  // The behaviour of fcvt is checked in TEST(fcvt_sd).
10145 
10146  END();
10147  RUN();
10148 
10149  uint32_t qn_raw = float_to_rawbits(qn);
10150  uint32_t sn_raw = float_to_rawbits(sn);
10151 
10152  // - Signalling NaN
10153  ASSERT_EQUAL_FP32(sn, s1);
10154  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
10155  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
10156  // - Quiet NaN
10157  ASSERT_EQUAL_FP32(qn, s11);
10158  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
10159  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
10160 
10161  // - Signalling NaN
10162  ASSERT_EQUAL_FP32(sn_proc, s4);
10163  ASSERT_EQUAL_FP32(sn_proc, s5);
10164  ASSERT_EQUAL_FP32(sn_proc, s6);
10165  ASSERT_EQUAL_FP32(sn_proc, s7);
10166  // - Quiet NaN
10167  ASSERT_EQUAL_FP32(qn_proc, s14);
10168  ASSERT_EQUAL_FP32(qn_proc, s15);
10169  ASSERT_EQUAL_FP32(qn_proc, s16);
10170  ASSERT_EQUAL_FP32(qn_proc, s17);
10171 
10172  TEARDOWN();
10173 }
10174 
10175 
10176 static void ProcessNaNsHelper(double n, double m, double expected) {
10177  ASSERT(std::isnan(n) || std::isnan(m));
10178  ASSERT(isnan(expected));
10179 
10180  SETUP();
10181  START();
10182 
10183  // Execute a number of instructions which all use ProcessNaNs, and check that
10184  // they all propagate NaNs correctly.
10185  __ Fmov(d0, n);
10186  __ Fmov(d1, m);
10187 
10188  __ Fadd(d2, d0, d1);
10189  __ Fsub(d3, d0, d1);
10190  __ Fmul(d4, d0, d1);
10191  __ Fdiv(d5, d0, d1);
10192  __ Fmax(d6, d0, d1);
10193  __ Fmin(d7, d0, d1);
10194 
10195  END();
10196  RUN();
10197 
10198  ASSERT_EQUAL_FP64(expected, d2);
10199  ASSERT_EQUAL_FP64(expected, d3);
10200  ASSERT_EQUAL_FP64(expected, d4);
10201  ASSERT_EQUAL_FP64(expected, d5);
10202  ASSERT_EQUAL_FP64(expected, d6);
10203  ASSERT_EQUAL_FP64(expected, d7);
10204 
10205  TEARDOWN();
10206 }
10207 
10208 
10209 TEST(process_nans_double) {
10210  INIT_V8();
10211  // Make sure that NaN propagation works correctly.
10212  double sn = rawbits_to_double(0x7ff5555511111111);
10213  double sm = rawbits_to_double(0x7ff5555522222222);
10214  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10215  double qm = rawbits_to_double(0x7ffaaaaa22222222);
10216  ASSERT(IsSignallingNaN(sn));
10217  ASSERT(IsSignallingNaN(sm));
10218  ASSERT(IsQuietNaN(qn));
10219  ASSERT(IsQuietNaN(qm));
10220 
10221  // The input NaNs after passing through ProcessNaN.
10222  double sn_proc = rawbits_to_double(0x7ffd555511111111);
10223  double sm_proc = rawbits_to_double(0x7ffd555522222222);
10224  double qn_proc = qn;
10225  double qm_proc = qm;
10226  ASSERT(IsQuietNaN(sn_proc));
10227  ASSERT(IsQuietNaN(sm_proc));
10228  ASSERT(IsQuietNaN(qn_proc));
10229  ASSERT(IsQuietNaN(qm_proc));
10230 
10231  // Quiet NaNs are propagated.
10232  ProcessNaNsHelper(qn, 0, qn_proc);
10233  ProcessNaNsHelper(0, qm, qm_proc);
10234  ProcessNaNsHelper(qn, qm, qn_proc);
10235 
10236  // Signalling NaNs are propagated, and made quiet.
10237  ProcessNaNsHelper(sn, 0, sn_proc);
10238  ProcessNaNsHelper(0, sm, sm_proc);
10239  ProcessNaNsHelper(sn, sm, sn_proc);
10240 
10241  // Signalling NaNs take precedence over quiet NaNs.
10242  ProcessNaNsHelper(sn, qm, sn_proc);
10243  ProcessNaNsHelper(qn, sm, sm_proc);
10244  ProcessNaNsHelper(sn, sm, sn_proc);
10245 }
10246 
10247 
10248 static void ProcessNaNsHelper(float n, float m, float expected) {
10249  ASSERT(std::isnan(n) || std::isnan(m));
10250  ASSERT(isnan(expected));
10251 
10252  SETUP();
10253  START();
10254 
10255  // Execute a number of instructions which all use ProcessNaNs, and check that
10256  // they all propagate NaNs correctly.
10257  __ Fmov(s0, n);
10258  __ Fmov(s1, m);
10259 
10260  __ Fadd(s2, s0, s1);
10261  __ Fsub(s3, s0, s1);
10262  __ Fmul(s4, s0, s1);
10263  __ Fdiv(s5, s0, s1);
10264  __ Fmax(s6, s0, s1);
10265  __ Fmin(s7, s0, s1);
10266 
10267  END();
10268  RUN();
10269 
10270  ASSERT_EQUAL_FP32(expected, s2);
10271  ASSERT_EQUAL_FP32(expected, s3);
10272  ASSERT_EQUAL_FP32(expected, s4);
10273  ASSERT_EQUAL_FP32(expected, s5);
10274  ASSERT_EQUAL_FP32(expected, s6);
10275  ASSERT_EQUAL_FP32(expected, s7);
10276 
10277  TEARDOWN();
10278 }
10279 
10280 
10281 TEST(process_nans_float) {
10282  INIT_V8();
10283  // Make sure that NaN propagation works correctly.
10284  float sn = rawbits_to_float(0x7f951111);
10285  float sm = rawbits_to_float(0x7f952222);
10286  float qn = rawbits_to_float(0x7fea1111);
10287  float qm = rawbits_to_float(0x7fea2222);
10288  ASSERT(IsSignallingNaN(sn));
10289  ASSERT(IsSignallingNaN(sm));
10290  ASSERT(IsQuietNaN(qn));
10291  ASSERT(IsQuietNaN(qm));
10292 
10293  // The input NaNs after passing through ProcessNaN.
10294  float sn_proc = rawbits_to_float(0x7fd51111);
10295  float sm_proc = rawbits_to_float(0x7fd52222);
10296  float qn_proc = qn;
10297  float qm_proc = qm;
10298  ASSERT(IsQuietNaN(sn_proc));
10299  ASSERT(IsQuietNaN(sm_proc));
10300  ASSERT(IsQuietNaN(qn_proc));
10301  ASSERT(IsQuietNaN(qm_proc));
10302 
10303  // Quiet NaNs are propagated.
10304  ProcessNaNsHelper(qn, 0, qn_proc);
10305  ProcessNaNsHelper(0, qm, qm_proc);
10306  ProcessNaNsHelper(qn, qm, qn_proc);
10307 
10308  // Signalling NaNs are propagated, and made quiet.
10309  ProcessNaNsHelper(sn, 0, sn_proc);
10310  ProcessNaNsHelper(0, sm, sm_proc);
10311  ProcessNaNsHelper(sn, sm, sn_proc);
10312 
10313  // Signalling NaNs take precedence over quiet NaNs.
10314  ProcessNaNsHelper(sn, qm, sn_proc);
10315  ProcessNaNsHelper(qn, sm, sm_proc);
10316  ProcessNaNsHelper(sn, sm, sn_proc);
10317 }
10318 
10319 
10320 static void DefaultNaNHelper(float n, float m, float a) {
10321  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
10322 
10323  bool test_1op = std::isnan(n);
10324  bool test_2op = std::isnan(n) || std::isnan(m);
10325 
10326  SETUP();
10327  START();
10328 
10329  // Enable Default-NaN mode in the FPCR.
10330  __ Mrs(x0, FPCR);
10331  __ Orr(x1, x0, DN_mask);
10332  __ Msr(FPCR, x1);
10333 
10334  // Execute a number of instructions which all use ProcessNaNs, and check that
10335  // they all produce the default NaN.
10336  __ Fmov(s0, n);
10337  __ Fmov(s1, m);
10338  __ Fmov(s2, a);
10339 
10340  if (test_1op) {
10341  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10342  __ Fmov(s10, s0);
10343  __ Fabs(s11, s0);
10344  __ Fneg(s12, s0);
10345 
10346  // Operations that use ProcessNaN.
10347  __ Fsqrt(s13, s0);
10348  __ Frinta(s14, s0);
10349  __ Frintn(s15, s0);
10350  __ Frintz(s16, s0);
10351 
10352  // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10353  __ Fcvt(d17, s0);
10354  }
10355 
10356  if (test_2op) {
10357  __ Fadd(s18, s0, s1);
10358  __ Fsub(s19, s0, s1);
10359  __ Fmul(s20, s0, s1);
10360  __ Fdiv(s21, s0, s1);
10361  __ Fmax(s22, s0, s1);
10362  __ Fmin(s23, s0, s1);
10363  }
10364 
10365  __ Fmadd(s24, s0, s1, s2);
10366  __ Fmsub(s25, s0, s1, s2);
10367  __ Fnmadd(s26, s0, s1, s2);
10368  __ Fnmsub(s27, s0, s1, s2);
10369 
10370  // Restore FPCR.
10371  __ Msr(FPCR, x0);
10372 
10373  END();
10374  RUN();
10375 
10376  if (test_1op) {
10377  uint32_t n_raw = float_to_rawbits(n);
10378  ASSERT_EQUAL_FP32(n, s10);
10379  ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
10380  ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
10381  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
10382  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
10383  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
10384  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
10385  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
10386  }
10387 
10388  if (test_2op) {
10389  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
10390  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
10391  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
10392  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
10393  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
10394  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
10395  }
10396 
10397  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
10398  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
10399  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
10400  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
10401 
10402  TEARDOWN();
10403 }
10404 
10405 
10406 TEST(default_nan_float) {
10407  INIT_V8();
10408  float sn = rawbits_to_float(0x7f951111);
10409  float sm = rawbits_to_float(0x7f952222);
10410  float sa = rawbits_to_float(0x7f95aaaa);
10411  float qn = rawbits_to_float(0x7fea1111);
10412  float qm = rawbits_to_float(0x7fea2222);
10413  float qa = rawbits_to_float(0x7feaaaaa);
10414  ASSERT(IsSignallingNaN(sn));
10415  ASSERT(IsSignallingNaN(sm));
10416  ASSERT(IsSignallingNaN(sa));
10417  ASSERT(IsQuietNaN(qn));
10418  ASSERT(IsQuietNaN(qm));
10419  ASSERT(IsQuietNaN(qa));
10420 
10421  // - Signalling NaNs
10422  DefaultNaNHelper(sn, 0.0f, 0.0f);
10423  DefaultNaNHelper(0.0f, sm, 0.0f);
10424  DefaultNaNHelper(0.0f, 0.0f, sa);
10425  DefaultNaNHelper(sn, sm, 0.0f);
10426  DefaultNaNHelper(0.0f, sm, sa);
10427  DefaultNaNHelper(sn, 0.0f, sa);
10428  DefaultNaNHelper(sn, sm, sa);
10429  // - Quiet NaNs
10430  DefaultNaNHelper(qn, 0.0f, 0.0f);
10431  DefaultNaNHelper(0.0f, qm, 0.0f);
10432  DefaultNaNHelper(0.0f, 0.0f, qa);
10433  DefaultNaNHelper(qn, qm, 0.0f);
10434  DefaultNaNHelper(0.0f, qm, qa);
10435  DefaultNaNHelper(qn, 0.0f, qa);
10436  DefaultNaNHelper(qn, qm, qa);
10437  // - Mixed NaNs
10438  DefaultNaNHelper(qn, sm, sa);
10439  DefaultNaNHelper(sn, qm, sa);
10440  DefaultNaNHelper(sn, sm, qa);
10441  DefaultNaNHelper(qn, qm, sa);
10442  DefaultNaNHelper(sn, qm, qa);
10443  DefaultNaNHelper(qn, sm, qa);
10444  DefaultNaNHelper(qn, qm, qa);
10445 }
10446 
10447 
10448 static void DefaultNaNHelper(double n, double m, double a) {
10449  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
10450 
10451  bool test_1op = std::isnan(n);
10452  bool test_2op = std::isnan(n) || std::isnan(m);
10453 
10454  SETUP();
10455  START();
10456 
10457  // Enable Default-NaN mode in the FPCR.
10458  __ Mrs(x0, FPCR);
10459  __ Orr(x1, x0, DN_mask);
10460  __ Msr(FPCR, x1);
10461 
10462  // Execute a number of instructions which all use ProcessNaNs, and check that
10463  // they all produce the default NaN.
10464  __ Fmov(d0, n);
10465  __ Fmov(d1, m);
10466  __ Fmov(d2, a);
10467 
10468  if (test_1op) {
10469  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10470  __ Fmov(d10, d0);
10471  __ Fabs(d11, d0);
10472  __ Fneg(d12, d0);
10473 
10474  // Operations that use ProcessNaN.
10475  __ Fsqrt(d13, d0);
10476  __ Frinta(d14, d0);
10477  __ Frintn(d15, d0);
10478  __ Frintz(d16, d0);
10479 
10480  // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10481  __ Fcvt(s17, d0);
10482  }
10483 
10484  if (test_2op) {
10485  __ Fadd(d18, d0, d1);
10486  __ Fsub(d19, d0, d1);
10487  __ Fmul(d20, d0, d1);
10488  __ Fdiv(d21, d0, d1);
10489  __ Fmax(d22, d0, d1);
10490  __ Fmin(d23, d0, d1);
10491  }
10492 
10493  __ Fmadd(d24, d0, d1, d2);
10494  __ Fmsub(d25, d0, d1, d2);
10495  __ Fnmadd(d26, d0, d1, d2);
10496  __ Fnmsub(d27, d0, d1, d2);
10497 
10498  // Restore FPCR.
10499  __ Msr(FPCR, x0);
10500 
10501  END();
10502  RUN();
10503 
10504  if (test_1op) {
10505  uint64_t n_raw = double_to_rawbits(n);
10506  ASSERT_EQUAL_FP64(n, d10);
10507  ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10508  ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10509  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
10510  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
10511  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
10512  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
10513  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
10514  }
10515 
10516  if (test_2op) {
10517  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
10518  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
10519  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
10520  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
10521  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
10522  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
10523  }
10524 
10525  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
10526  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
10527  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
10528  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
10529 
10530  TEARDOWN();
10531 }
10532 
10533 
10534 TEST(default_nan_double) {
10535  INIT_V8();
10536  double sn = rawbits_to_double(0x7ff5555511111111);
10537  double sm = rawbits_to_double(0x7ff5555522222222);
10538  double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10539  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10540  double qm = rawbits_to_double(0x7ffaaaaa22222222);
10541  double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10542  ASSERT(IsSignallingNaN(sn));
10543  ASSERT(IsSignallingNaN(sm));
10544  ASSERT(IsSignallingNaN(sa));
10545  ASSERT(IsQuietNaN(qn));
10546  ASSERT(IsQuietNaN(qm));
10547  ASSERT(IsQuietNaN(qa));
10548 
10549  // - Signalling NaNs
10550  DefaultNaNHelper(sn, 0.0, 0.0);
10551  DefaultNaNHelper(0.0, sm, 0.0);
10552  DefaultNaNHelper(0.0, 0.0, sa);
10553  DefaultNaNHelper(sn, sm, 0.0);
10554  DefaultNaNHelper(0.0, sm, sa);
10555  DefaultNaNHelper(sn, 0.0, sa);
10556  DefaultNaNHelper(sn, sm, sa);
10557  // - Quiet NaNs
10558  DefaultNaNHelper(qn, 0.0, 0.0);
10559  DefaultNaNHelper(0.0, qm, 0.0);
10560  DefaultNaNHelper(0.0, 0.0, qa);
10561  DefaultNaNHelper(qn, qm, 0.0);
10562  DefaultNaNHelper(0.0, qm, qa);
10563  DefaultNaNHelper(qn, 0.0, qa);
10564  DefaultNaNHelper(qn, qm, qa);
10565  // - Mixed NaNs
10566  DefaultNaNHelper(qn, sm, sa);
10567  DefaultNaNHelper(sn, qm, sa);
10568  DefaultNaNHelper(sn, sm, qa);
10569  DefaultNaNHelper(qn, qm, sa);
10570  DefaultNaNHelper(sn, qm, qa);
10571  DefaultNaNHelper(qn, sm, qa);
10572  DefaultNaNHelper(qn, qm, qa);
10573 }
10574 
10575 
10576 TEST(call_no_relocation) {
10577  Address call_start;
10578  Address return_address;
10579 
10580  INIT_V8();
10581  SETUP();
10582 
10583  START();
10584 
10585  Label function;
10586  Label test;
10587 
10588  __ B(&test);
10589 
10590  __ Bind(&function);
10591  __ Mov(x0, 0x1);
10592  __ Ret();
10593 
10594  __ Bind(&test);
10595  __ Mov(x0, 0x0);
10596  __ Push(lr, xzr);
10597  {
10598  Assembler::BlockConstPoolScope scope(&masm);
10599  call_start = buf + __ pc_offset();
10600  __ Call(buf + function.pos(), RelocInfo::NONE64);
10601  return_address = buf + __ pc_offset();
10602  }
10603  __ Pop(xzr, lr);
10604  END();
10605 
10606  RUN();
10607 
10608  ASSERT_EQUAL_64(1, x0);
10609 
10610  // The return_address_from_call_start function doesn't currently encounter any
10611  // non-relocatable sequences, so we check it here to make sure it works.
10612  // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
10613  // non-relocatable calls at all.
10614  CHECK(return_address ==
10616 
10617  TEARDOWN();
10618 }
10619 
10620 
10621 static void AbsHelperX(int64_t value) {
10622  int64_t expected;
10623 
10624  SETUP();
10625  START();
10626 
10627  Label fail;
10628  Label done;
10629 
10630  __ Mov(x0, 0);
10631  __ Mov(x1, value);
10632 
10633  if (value != kXMinInt) {
10634  expected = labs(value);
10635 
10636  Label next;
10637  // The result is representable.
10638  __ Abs(x10, x1);
10639  __ Abs(x11, x1, &fail);
10640  __ Abs(x12, x1, &fail, &next);
10641  __ Bind(&next);
10642  __ Abs(x13, x1, NULL, &done);
10643  } else {
10644  // labs is undefined for kXMinInt but our implementation in the
10645  // MacroAssembler will return kXMinInt in such a case.
10646  expected = kXMinInt;
10647 
10648  Label next;
10649  // The result is not representable.
10650  __ Abs(x10, x1);
10651  __ Abs(x11, x1, NULL, &fail);
10652  __ Abs(x12, x1, &next, &fail);
10653  __ Bind(&next);
10654  __ Abs(x13, x1, &done);
10655  }
10656 
10657  __ Bind(&fail);
10658  __ Mov(x0, -1);
10659 
10660  __ Bind(&done);
10661 
10662  END();
10663  RUN();
10664 
10665  ASSERT_EQUAL_64(0, x0);
10666  ASSERT_EQUAL_64(value, x1);
10667  ASSERT_EQUAL_64(expected, x10);
10668  ASSERT_EQUAL_64(expected, x11);
10669  ASSERT_EQUAL_64(expected, x12);
10670  ASSERT_EQUAL_64(expected, x13);
10671 
10672  TEARDOWN();
10673 }
10674 
10675 
10676 static void AbsHelperW(int32_t value) {
10677  int32_t expected;
10678 
10679  SETUP();
10680  START();
10681 
10682  Label fail;
10683  Label done;
10684 
10685  __ Mov(w0, 0);
10686  // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
10687  // Once it is fixed, we should remove the cast.
10688  __ Mov(w1, static_cast<uint32_t>(value));
10689 
10690  if (value != kWMinInt) {
10691  expected = abs(value);
10692 
10693  Label next;
10694  // The result is representable.
10695  __ Abs(w10, w1);
10696  __ Abs(w11, w1, &fail);
10697  __ Abs(w12, w1, &fail, &next);
10698  __ Bind(&next);
10699  __ Abs(w13, w1, NULL, &done);
10700  } else {
10701  // abs is undefined for kWMinInt but our implementation in the
10702  // MacroAssembler will return kWMinInt in such a case.
10703  expected = kWMinInt;
10704 
10705  Label next;
10706  // The result is not representable.
10707  __ Abs(w10, w1);
10708  __ Abs(w11, w1, NULL, &fail);
10709  __ Abs(w12, w1, &next, &fail);
10710  __ Bind(&next);
10711  __ Abs(w13, w1, &done);
10712  }
10713 
10714  __ Bind(&fail);
10715  __ Mov(w0, -1);
10716 
10717  __ Bind(&done);
10718 
10719  END();
10720  RUN();
10721 
10722  ASSERT_EQUAL_32(0, w0);
10723  ASSERT_EQUAL_32(value, w1);
10724  ASSERT_EQUAL_32(expected, w10);
10725  ASSERT_EQUAL_32(expected, w11);
10726  ASSERT_EQUAL_32(expected, w12);
10727  ASSERT_EQUAL_32(expected, w13);
10728 
10729  TEARDOWN();
10730 }
10731 
10732 
10733 TEST(abs) {
10734  INIT_V8();
10735  AbsHelperX(0);
10736  AbsHelperX(42);
10737  AbsHelperX(-42);
10738  AbsHelperX(kXMinInt);
10739  AbsHelperX(kXMaxInt);
10740 
10741  AbsHelperW(0);
10742  AbsHelperW(42);
10743  AbsHelperW(-42);
10744  AbsHelperW(kWMinInt);
10745  AbsHelperW(kWMaxInt);
10746 }
10747 
10748 
10749 TEST(pool_size) {
10750  INIT_V8();
10751  SETUP();
10752 
10753  // This test does not execute any code. It only tests that the size of the
10754  // pools is read correctly from the RelocInfo.
10755 
10756  Label exit;
10757  __ b(&exit);
10758 
10759  const unsigned constant_pool_size = 312;
10760  const unsigned veneer_pool_size = 184;
10761 
10762  __ RecordConstPool(constant_pool_size);
10763  for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
10764  __ dc32(0);
10765  }
10766 
10767  __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
10768  for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
10769  __ nop();
10770  }
10771 
10772  __ bind(&exit);
10773 
10774  Heap* heap = isolate->heap();
10775  CodeDesc desc;
10776  Object* code_object = NULL;
10777  Code* code;
10778  masm.GetCode(&desc);
10779  MaybeObject* maybe_code = heap->CreateCode(desc, 0, masm.CodeObject());
10780  maybe_code->ToObject(&code_object);
10781  code = Code::cast(code_object);
10782 
10783  unsigned pool_count = 0;
10784  int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
10785  RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
10786  for (RelocIterator it(code, pool_mask); !it.done(); it.next()) {
10787  RelocInfo* info = it.rinfo();
10788  if (RelocInfo::IsConstPool(info->rmode())) {
10789  ASSERT(info->data() == constant_pool_size);
10790  ++pool_count;
10791  }
10792  if (RelocInfo::IsVeneerPool(info->rmode())) {
10793  ASSERT(info->data() == veneer_pool_size);
10794  ++pool_count;
10795  }
10796  }
10797 
10798  ASSERT(pool_count == 2);
10799 
10800  TEARDOWN();
10801 }
byte * Address
Definition: globals.h:186
const DwVfpRegister d19
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const SwVfpRegister s2
const SwVfpRegister s28
const SwVfpRegister s12
#define CHECK_EQ(expected, value)
Definition: checks.h:252
const SwVfpRegister s29
bool IsQuietNaN(T num)
Definition: utils-arm64.h:105
const SwVfpRegister s25
const SwVfpRegister s26
const SwVfpRegister s17
const int64_t kDQuietNanMask
const unsigned kWRegSize
const LowDwVfpRegister d11
const SwVfpRegister s7
const LowDwVfpRegister d0
const DwVfpRegister d23
const SwVfpRegister s8
const SwVfpRegister s10
const DwVfpRegister d31
const unsigned kDRegSizeInBits
#define ASSERT_LITERAL_POOL_SIZE(expected)
const DwVfpRegister d24
void Dump(MacroAssembler *assm)
const unsigned kZeroRegCode
#define ASSERT_EQUAL_REGISTERS(expected)
const SwVfpRegister s16
const int64_t kSQuietNanMask
#define START()
#define ASSERT_EQUAL_FP64(expected, result)
const unsigned kByteSizeInBytes
const unsigned kXRegSizeInBits
const int64_t kXMaxInt
#define BUF_SIZE
int int32_t
Definition: unicode.cc:47
uint32_t RegList
Definition: frames.h:41
#define RESET()
const SwVfpRegister s14
const SwVfpRegister s21
const int64_t kDSignMask
const LowDwVfpRegister d15
#define ASSERT(condition)
Definition: checks.h:329
const int64_t kSSignMask
CPURegister PopHighestIndex()
const DwVfpRegister d22
const LowDwVfpRegister d3
#define INIT_V8()
#define CHECK(condition)
Definition: checks.h:75
const unsigned kLinkRegCode
const SwVfpRegister s23
const QwNeonRegister q2
#define CHECK_GE(a, b)
Definition: checks.h:261
const DwVfpRegister d25
CPURegister PopLowestIndex()
const LowDwVfpRegister d10
static Code * cast(Object *obj)
#define ASSERT_EQUAL_64(expected, result)
const SwVfpRegister s22
const SwVfpRegister s6
const DwVfpRegister d28
int isnan(double x)
int CountSetBits(uint64_t value, int width)
const SwVfpRegister s31
const unsigned kWRegSizeInBits
#define __
const SwVfpRegister s18
const DwVfpRegister d16
const SwVfpRegister s3
bool IsSignallingNaN(double num)
Definition: utils-arm64.h:86
const DwVfpRegister d18
const LowDwVfpRegister d14
#define ASSERT_EQUAL_32(expected, result)
#define RUN()
const DwVfpRegister d17
const LowDwVfpRegister d7
const LowDwVfpRegister d4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
#define START_AFTER_RESET()
const LowDwVfpRegister d13
const SwVfpRegister s13
const LowDwVfpRegister d6
const SwVfpRegister s27
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
const int kHeapObjectTag
Definition: v8.h:5473
const DwVfpRegister d27
const unsigned kSRegSizeInBits
const LowDwVfpRegister d5
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const unsigned kSPRegInternalCode
const SwVfpRegister s0
const unsigned kNumberOfFPRegisters
bool Is(Object *obj)
const unsigned kSRegSize
const SwVfpRegister s19
const DwVfpRegister d26
const SwVfpRegister s5
const QwNeonRegister q1
#define CHECK_NE(unexpected, value)
Definition: checks.h:256
bool IncludesAliasOf(const CPURegister &other1, const CPURegister &other2=NoCPUReg, const CPURegister &other3=NoCPUReg, const CPURegister &other4=NoCPUReg) const
void ClobberFP(MacroAssembler *masm, RegList reg_list, double const value)
const LowDwVfpRegister d9
const SwVfpRegister s1
static Local< Context > New(Isolate *isolate, ExtensionConfiguration *extensions=NULL, Handle< ObjectTemplate > global_template=Handle< ObjectTemplate >(), Handle< Value > global_object=Handle< Value >())
Definition: api.cc:5188
const Register lr
MUST_USE_RESULT MaybeObject * CreateCode(const CodeDesc &desc, Code::Flags flags, Handle< Object > self_reference, bool immovable=false, bool crankshafted=false, int prologue_offset=Code::kPrologueOffsetNotSet)
Definition: heap.cc:4119
const unsigned kNumberOfRegisters
const LowDwVfpRegister d2
#define END()
const DwVfpRegister d29
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
const SwVfpRegister s30
const unsigned kDRegSize
static Register XRegFromCode(unsigned code)
CPURegister::RegisterType type() const
static int32_t ImmBranchRange(ImmBranchType branch_type)
uint64_t flags_nzcv() const
T Abs(T a)
Definition: utils.h:241
const SwVfpRegister s4
static Address return_address_from_call_start(Address pc)
const DwVfpRegister d30
const SwVfpRegister s20
const DwVfpRegister d20
RegList PopulateRegisterArray(Register *w, Register *x, Register *r, int reg_size, int reg_count, RegList allowed)
const SwVfpRegister s9
const unsigned kFramePointerRegCode
const LowDwVfpRegister d12
const int32_t kWMinInt
void Clobber(MacroAssembler *masm, RegList reg_list, uint64_t const value)
RegList PopulateFPRegisterArray(FPRegister *s, FPRegister *d, FPRegister *v, int reg_size, int reg_count, RegList allowed)
const unsigned kJSSPCode
bool Is(const CPURegister &other) const
const LowDwVfpRegister d1
#define SETUP()
const Register fp
#define ASSERT_EQUAL_FP32(expected, result)
#define SETUP_SIZE(buf_size)
const LowDwVfpRegister d8
const SwVfpRegister s11
#define TEARDOWN()
const SwVfpRegister s15
const DwVfpRegister d21
const int64_t kXMinInt
int signbit(double x)
const int32_t kWMaxInt
#define ASSERT_EQUAL_NZCV(expected)
const SwVfpRegister s24