v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-arm64-inl.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
29 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
30 
31 #include <ctype.h>
32 
33 #include "v8globals.h"
34 #include "globals.h"
35 
36 #include "arm64/assembler-arm64.h"
39 #include "arm64/instrument-arm64.h"
40 
41 
42 namespace v8 {
43 namespace internal {
44 
45 
46 MemOperand FieldMemOperand(Register object, int offset) {
47  return MemOperand(object, offset - kHeapObjectTag);
48 }
49 
50 
52  return UntagSmiMemOperand(object, offset - kHeapObjectTag);
53 }
54 
55 
56 MemOperand UntagSmiMemOperand(Register object, int offset) {
57  // Assumes that Smis are shifted by 32 bits and little endianness.
58  STATIC_ASSERT(kSmiShift == 32);
59  return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
60 }
61 
62 
64  ASSERT(!code_object_.is_null());
65  return code_object_;
66 }
67 
68 
70  const Register& rn,
71  const Operand& operand) {
72  ASSERT(allow_macro_instructions_);
73  ASSERT(!rd.IsZero());
74  LogicalMacro(rd, rn, operand, AND);
75 }
76 
77 
79  const Register& rn,
80  const Operand& operand) {
81  ASSERT(allow_macro_instructions_);
82  ASSERT(!rd.IsZero());
83  LogicalMacro(rd, rn, operand, ANDS);
84 }
85 
86 
88  const Operand& operand) {
89  ASSERT(allow_macro_instructions_);
90  LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
91 }
92 
93 
95  const Register& rn,
96  const Operand& operand) {
97  ASSERT(allow_macro_instructions_);
98  ASSERT(!rd.IsZero());
99  LogicalMacro(rd, rn, operand, BIC);
100 }
101 
102 
104  const Register& rn,
105  const Operand& operand) {
106  ASSERT(allow_macro_instructions_);
107  ASSERT(!rd.IsZero());
108  LogicalMacro(rd, rn, operand, BICS);
109 }
110 
111 
113  const Register& rn,
114  const Operand& operand) {
115  ASSERT(allow_macro_instructions_);
116  ASSERT(!rd.IsZero());
117  LogicalMacro(rd, rn, operand, ORR);
118 }
119 
120 
122  const Register& rn,
123  const Operand& operand) {
124  ASSERT(allow_macro_instructions_);
125  ASSERT(!rd.IsZero());
126  LogicalMacro(rd, rn, operand, ORN);
127 }
128 
129 
131  const Register& rn,
132  const Operand& operand) {
133  ASSERT(allow_macro_instructions_);
134  ASSERT(!rd.IsZero());
135  LogicalMacro(rd, rn, operand, EOR);
136 }
137 
138 
140  const Register& rn,
141  const Operand& operand) {
142  ASSERT(allow_macro_instructions_);
143  ASSERT(!rd.IsZero());
144  LogicalMacro(rd, rn, operand, EON);
145 }
146 
147 
149  const Operand& operand,
150  StatusFlags nzcv,
151  Condition cond) {
152  ASSERT(allow_macro_instructions_);
153  if (operand.IsImmediate() && (operand.immediate() < 0)) {
154  ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
155  } else {
156  ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
157  }
158 }
159 
160 
162  const Operand& operand,
163  StatusFlags nzcv,
164  Condition cond) {
165  ASSERT(allow_macro_instructions_);
166  if (operand.IsImmediate() && (operand.immediate() < 0)) {
167  ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
168  } else {
169  ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
170  }
171 }
172 
173 
175  const Register& rn,
176  const Operand& operand) {
177  ASSERT(allow_macro_instructions_);
178  if (operand.IsImmediate() && (operand.immediate() < 0)) {
179  AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
180  } else {
181  AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
182  }
183 }
184 
186  const Register& rn,
187  const Operand& operand) {
188  ASSERT(allow_macro_instructions_);
189  if (operand.IsImmediate() && (operand.immediate() < 0)) {
190  AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
191  } else {
192  AddSubMacro(rd, rn, operand, SetFlags, ADD);
193  }
194 }
195 
196 
198  const Register& rn,
199  const Operand& operand) {
200  ASSERT(allow_macro_instructions_);
201  if (operand.IsImmediate() && (operand.immediate() < 0)) {
202  AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
203  } else {
204  AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
205  }
206 }
207 
208 
210  const Register& rn,
211  const Operand& operand) {
212  ASSERT(allow_macro_instructions_);
213  if (operand.IsImmediate() && (operand.immediate() < 0)) {
214  AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
215  } else {
216  AddSubMacro(rd, rn, operand, SetFlags, SUB);
217  }
218 }
219 
220 
221 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
222  ASSERT(allow_macro_instructions_);
223  Adds(AppropriateZeroRegFor(rn), rn, operand);
224 }
225 
226 
227 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
228  ASSERT(allow_macro_instructions_);
229  Subs(AppropriateZeroRegFor(rn), rn, operand);
230 }
231 
232 
234  const Operand& operand) {
235  ASSERT(allow_macro_instructions_);
236  ASSERT(!rd.IsZero());
237  if (operand.IsImmediate()) {
238  Mov(rd, -operand.immediate());
239  } else {
240  Sub(rd, AppropriateZeroRegFor(rd), operand);
241  }
242 }
243 
244 
246  const Operand& operand) {
247  ASSERT(allow_macro_instructions_);
248  Subs(rd, AppropriateZeroRegFor(rd), operand);
249 }
250 
251 
253  const Register& rn,
254  const Operand& operand) {
255  ASSERT(allow_macro_instructions_);
256  ASSERT(!rd.IsZero());
257  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
258 }
259 
260 
262  const Register& rn,
263  const Operand& operand) {
264  ASSERT(allow_macro_instructions_);
265  ASSERT(!rd.IsZero());
266  AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
267 }
268 
269 
271  const Register& rn,
272  const Operand& operand) {
273  ASSERT(allow_macro_instructions_);
274  ASSERT(!rd.IsZero());
275  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
276 }
277 
278 
280  const Register& rn,
281  const Operand& operand) {
282  ASSERT(allow_macro_instructions_);
283  ASSERT(!rd.IsZero());
284  AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
285 }
286 
287 
289  const Operand& operand) {
290  ASSERT(allow_macro_instructions_);
291  ASSERT(!rd.IsZero());
293  Sbc(rd, zr, operand);
294 }
295 
296 
298  const Operand& operand) {
299  ASSERT(allow_macro_instructions_);
300  ASSERT(!rd.IsZero());
302  Sbcs(rd, zr, operand);
303 }
304 
305 
306 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
307  ASSERT(allow_macro_instructions_);
308  ASSERT(!rd.IsZero());
309  Mov(rd, ~imm);
310 }
311 
312 
313 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
314 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
315  ASSERT(allow_macro_instructions_); \
316  LoadStoreMacro(REG, addr, OP); \
317 }
319 #undef DEFINE_FUNCTION
320 
321 
322 void MacroAssembler::Adr(const Register& rd, Label* label) {
323  ASSERT(allow_macro_instructions_);
324  ASSERT(!rd.IsZero());
325  adr(rd, label);
326 }
327 
328 
330  const Register& rn,
331  unsigned shift) {
332  ASSERT(allow_macro_instructions_);
333  ASSERT(!rd.IsZero());
334  asr(rd, rn, shift);
335 }
336 
337 
339  const Register& rn,
340  const Register& rm) {
341  ASSERT(allow_macro_instructions_);
342  ASSERT(!rd.IsZero());
343  asrv(rd, rn, rm);
344 }
345 
346 
347 void MacroAssembler::B(Label* label) {
348  b(label);
349  CheckVeneerPool(false, false);
350 }
351 
352 
353 void MacroAssembler::B(Condition cond, Label* label) {
354  ASSERT(allow_macro_instructions_);
355  B(label, cond);
356 }
357 
358 
360  const Register& rn,
361  unsigned lsb,
362  unsigned width) {
363  ASSERT(allow_macro_instructions_);
364  ASSERT(!rd.IsZero());
365  bfi(rd, rn, lsb, width);
366 }
367 
368 
370  const Register& rn,
371  unsigned lsb,
372  unsigned width) {
373  ASSERT(allow_macro_instructions_);
374  ASSERT(!rd.IsZero());
375  bfxil(rd, rn, lsb, width);
376 }
377 
378 
379 void MacroAssembler::Bind(Label* label) {
380  ASSERT(allow_macro_instructions_);
381  bind(label);
382 }
383 
384 
385 void MacroAssembler::Bl(Label* label) {
386  ASSERT(allow_macro_instructions_);
387  bl(label);
388 }
389 
390 
391 void MacroAssembler::Blr(const Register& xn) {
392  ASSERT(allow_macro_instructions_);
393  ASSERT(!xn.IsZero());
394  blr(xn);
395 }
396 
397 
398 void MacroAssembler::Br(const Register& xn) {
399  ASSERT(allow_macro_instructions_);
400  ASSERT(!xn.IsZero());
401  br(xn);
402 }
403 
404 
406  ASSERT(allow_macro_instructions_);
407  brk(code);
408 }
409 
410 
412  const Register& rn,
413  Condition cond) {
414  ASSERT(allow_macro_instructions_);
415  ASSERT(!rd.IsZero());
416  ASSERT((cond != al) && (cond != nv));
417  cinc(rd, rn, cond);
418 }
419 
420 
422  const Register& rn,
423  Condition cond) {
424  ASSERT(allow_macro_instructions_);
425  ASSERT(!rd.IsZero());
426  ASSERT((cond != al) && (cond != nv));
427  cinv(rd, rn, cond);
428 }
429 
430 
431 void MacroAssembler::Cls(const Register& rd, const Register& rn) {
432  ASSERT(allow_macro_instructions_);
433  ASSERT(!rd.IsZero());
434  cls(rd, rn);
435 }
436 
437 
438 void MacroAssembler::Clz(const Register& rd, const Register& rn) {
439  ASSERT(allow_macro_instructions_);
440  ASSERT(!rd.IsZero());
441  clz(rd, rn);
442 }
443 
444 
446  const Register& rn,
447  Condition cond) {
448  ASSERT(allow_macro_instructions_);
449  ASSERT(!rd.IsZero());
450  ASSERT((cond != al) && (cond != nv));
451  cneg(rd, rn, cond);
452 }
453 
454 
455 // Conditionally zero the destination register. Only X registers are supported
456 // due to the truncation side-effect when used on W registers.
458  Condition cond) {
459  ASSERT(allow_macro_instructions_);
460  ASSERT(!rd.IsSP() && rd.Is64Bits());
461  ASSERT((cond != al) && (cond != nv));
462  csel(rd, xzr, rd, cond);
463 }
464 
465 
466 // Conditionally move a value into the destination register. Only X registers
467 // are supported due to the truncation side-effect when used on W registers.
469  const Register& rn,
470  Condition cond) {
471  ASSERT(allow_macro_instructions_);
472  ASSERT(!rd.IsSP());
473  ASSERT(rd.Is64Bits() && rn.Is64Bits());
474  ASSERT((cond != al) && (cond != nv));
475  if (!rd.is(rn)) {
476  csel(rd, rn, rd, cond);
477  }
478 }
479 
480 
481 void MacroAssembler::Cset(const Register& rd, Condition cond) {
482  ASSERT(allow_macro_instructions_);
483  ASSERT(!rd.IsZero());
484  ASSERT((cond != al) && (cond != nv));
485  cset(rd, cond);
486 }
487 
488 
489 void MacroAssembler::Csetm(const Register& rd, Condition cond) {
490  ASSERT(allow_macro_instructions_);
491  ASSERT(!rd.IsZero());
492  ASSERT((cond != al) && (cond != nv));
493  csetm(rd, cond);
494 }
495 
496 
498  const Register& rn,
499  const Register& rm,
500  Condition cond) {
501  ASSERT(allow_macro_instructions_);
502  ASSERT(!rd.IsZero());
503  ASSERT((cond != al) && (cond != nv));
504  csinc(rd, rn, rm, cond);
505 }
506 
507 
509  const Register& rn,
510  const Register& rm,
511  Condition cond) {
512  ASSERT(allow_macro_instructions_);
513  ASSERT(!rd.IsZero());
514  ASSERT((cond != al) && (cond != nv));
515  csinv(rd, rn, rm, cond);
516 }
517 
518 
520  const Register& rn,
521  const Register& rm,
522  Condition cond) {
523  ASSERT(allow_macro_instructions_);
524  ASSERT(!rd.IsZero());
525  ASSERT((cond != al) && (cond != nv));
526  csneg(rd, rn, rm, cond);
527 }
528 
529 
531  ASSERT(allow_macro_instructions_);
532  dmb(domain, type);
533 }
534 
535 
537  ASSERT(allow_macro_instructions_);
538  dsb(domain, type);
539 }
540 
541 
542 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
543  ASSERT(allow_macro_instructions_);
544  debug(message, code, params);
545 }
546 
547 
549  const Register& rn,
550  const Register& rm,
551  unsigned lsb) {
552  ASSERT(allow_macro_instructions_);
553  ASSERT(!rd.IsZero());
554  extr(rd, rn, rm, lsb);
555 }
556 
557 
558 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
559  ASSERT(allow_macro_instructions_);
560  fabs(fd, fn);
561 }
562 
563 
565  const FPRegister& fn,
566  const FPRegister& fm) {
567  ASSERT(allow_macro_instructions_);
568  fadd(fd, fn, fm);
569 }
570 
571 
573  const FPRegister& fm,
574  StatusFlags nzcv,
575  Condition cond) {
576  ASSERT(allow_macro_instructions_);
577  ASSERT((cond != al) && (cond != nv));
578  fccmp(fn, fm, nzcv, cond);
579 }
580 
581 
582 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
583  ASSERT(allow_macro_instructions_);
584  fcmp(fn, fm);
585 }
586 
587 
588 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
589  ASSERT(allow_macro_instructions_);
590  if (value != 0.0) {
591  UseScratchRegisterScope temps(this);
592  FPRegister tmp = temps.AcquireSameSizeAs(fn);
593  Fmov(tmp, value);
594  fcmp(fn, tmp);
595  } else {
596  fcmp(fn, value);
597  }
598 }
599 
600 
602  const FPRegister& fn,
603  const FPRegister& fm,
604  Condition cond) {
605  ASSERT(allow_macro_instructions_);
606  ASSERT((cond != al) && (cond != nv));
607  fcsel(fd, fn, fm, cond);
608 }
609 
610 
611 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
612  ASSERT(allow_macro_instructions_);
613  fcvt(fd, fn);
614 }
615 
616 
617 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
618  ASSERT(allow_macro_instructions_);
619  ASSERT(!rd.IsZero());
620  fcvtas(rd, fn);
621 }
622 
623 
624 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
625  ASSERT(allow_macro_instructions_);
626  ASSERT(!rd.IsZero());
627  fcvtau(rd, fn);
628 }
629 
630 
631 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
632  ASSERT(allow_macro_instructions_);
633  ASSERT(!rd.IsZero());
634  fcvtms(rd, fn);
635 }
636 
637 
638 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
639  ASSERT(allow_macro_instructions_);
640  ASSERT(!rd.IsZero());
641  fcvtmu(rd, fn);
642 }
643 
644 
645 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
646  ASSERT(allow_macro_instructions_);
647  ASSERT(!rd.IsZero());
648  fcvtns(rd, fn);
649 }
650 
651 
652 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
653  ASSERT(allow_macro_instructions_);
654  ASSERT(!rd.IsZero());
655  fcvtnu(rd, fn);
656 }
657 
658 
659 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
660  ASSERT(allow_macro_instructions_);
661  ASSERT(!rd.IsZero());
662  fcvtzs(rd, fn);
663 }
664 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
665  ASSERT(allow_macro_instructions_);
666  ASSERT(!rd.IsZero());
667  fcvtzu(rd, fn);
668 }
669 
670 
672  const FPRegister& fn,
673  const FPRegister& fm) {
674  ASSERT(allow_macro_instructions_);
675  fdiv(fd, fn, fm);
676 }
677 
678 
680  const FPRegister& fn,
681  const FPRegister& fm,
682  const FPRegister& fa) {
683  ASSERT(allow_macro_instructions_);
684  fmadd(fd, fn, fm, fa);
685 }
686 
687 
689  const FPRegister& fn,
690  const FPRegister& fm) {
691  ASSERT(allow_macro_instructions_);
692  fmax(fd, fn, fm);
693 }
694 
695 
697  const FPRegister& fn,
698  const FPRegister& fm) {
699  ASSERT(allow_macro_instructions_);
700  fmaxnm(fd, fn, fm);
701 }
702 
703 
705  const FPRegister& fn,
706  const FPRegister& fm) {
707  ASSERT(allow_macro_instructions_);
708  fmin(fd, fn, fm);
709 }
710 
711 
713  const FPRegister& fn,
714  const FPRegister& fm) {
715  ASSERT(allow_macro_instructions_);
716  fminnm(fd, fn, fm);
717 }
718 
719 
721  ASSERT(allow_macro_instructions_);
722  // Only emit an instruction if fd and fn are different, and they are both D
723  // registers. fmov(s0, s0) is not a no-op because it clears the top word of
724  // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
725  // top of q0, but FPRegister does not currently support Q registers.
726  if (!fd.Is(fn) || !fd.Is64Bits()) {
727  fmov(fd, fn);
728  }
729 }
730 
731 
733  ASSERT(allow_macro_instructions_);
734  fmov(fd, rn);
735 }
736 
737 
738 void MacroAssembler::Fmov(FPRegister fd, double imm) {
739  ASSERT(allow_macro_instructions_);
740  if (fd.Is32Bits()) {
741  Fmov(fd, static_cast<float>(imm));
742  return;
743  }
744 
745  ASSERT(fd.Is64Bits());
746  if (IsImmFP64(imm)) {
747  fmov(fd, imm);
748  } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
749  fmov(fd, xzr);
750  } else {
751  UseScratchRegisterScope temps(this);
752  Register tmp = temps.AcquireX();
753  // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
754  Mov(tmp, double_to_rawbits(imm));
755  Fmov(fd, tmp);
756  }
757 }
758 
759 
760 void MacroAssembler::Fmov(FPRegister fd, float imm) {
761  ASSERT(allow_macro_instructions_);
762  if (fd.Is64Bits()) {
763  Fmov(fd, static_cast<double>(imm));
764  return;
765  }
766 
767  ASSERT(fd.Is32Bits());
768  if (IsImmFP32(imm)) {
769  fmov(fd, imm);
770  } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
771  fmov(fd, wzr);
772  } else {
773  UseScratchRegisterScope temps(this);
774  Register tmp = temps.AcquireW();
775  // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
776  Mov(tmp, float_to_rawbits(imm));
777  Fmov(fd, tmp);
778  }
779 }
780 
781 
783  ASSERT(allow_macro_instructions_);
784  ASSERT(!rd.IsZero());
785  fmov(rd, fn);
786 }
787 
788 
790  const FPRegister& fn,
791  const FPRegister& fm,
792  const FPRegister& fa) {
793  ASSERT(allow_macro_instructions_);
794  fmsub(fd, fn, fm, fa);
795 }
796 
797 
799  const FPRegister& fn,
800  const FPRegister& fm) {
801  ASSERT(allow_macro_instructions_);
802  fmul(fd, fn, fm);
803 }
804 
805 
806 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
807  ASSERT(allow_macro_instructions_);
808  fneg(fd, fn);
809 }
810 
811 
813  const FPRegister& fn,
814  const FPRegister& fm,
815  const FPRegister& fa) {
816  ASSERT(allow_macro_instructions_);
817  fnmadd(fd, fn, fm, fa);
818 }
819 
820 
822  const FPRegister& fn,
823  const FPRegister& fm,
824  const FPRegister& fa) {
825  ASSERT(allow_macro_instructions_);
826  fnmsub(fd, fn, fm, fa);
827 }
828 
829 
830 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
831  ASSERT(allow_macro_instructions_);
832  frinta(fd, fn);
833 }
834 
835 
836 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
837  ASSERT(allow_macro_instructions_);
838  frintn(fd, fn);
839 }
840 
841 
842 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
843  ASSERT(allow_macro_instructions_);
844  frintz(fd, fn);
845 }
846 
847 
848 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
849  ASSERT(allow_macro_instructions_);
850  fsqrt(fd, fn);
851 }
852 
853 
855  const FPRegister& fn,
856  const FPRegister& fm) {
857  ASSERT(allow_macro_instructions_);
858  fsub(fd, fn, fm);
859 }
860 
861 
863  ASSERT(allow_macro_instructions_);
864  hint(code);
865 }
866 
867 
869  ASSERT(allow_macro_instructions_);
870  hlt(code);
871 }
872 
873 
875  ASSERT(allow_macro_instructions_);
876  isb();
877 }
878 
879 
881  const CPURegister& rt2,
882  const MemOperand& src) {
883  ASSERT(allow_macro_instructions_);
884  ASSERT(!AreAliased(rt, rt2));
885  ldnp(rt, rt2, src);
886 }
887 
888 
890  const CPURegister& rt2,
891  const MemOperand& src) {
892  ASSERT(allow_macro_instructions_);
893  ASSERT(!AreAliased(rt, rt2));
894  ldp(rt, rt2, src);
895 }
896 
897 
899  const Register& rt2,
900  const MemOperand& src) {
901  ASSERT(allow_macro_instructions_);
902  ASSERT(!rt.IsZero());
903  ASSERT(!rt2.IsZero());
904  ldpsw(rt, rt2, src);
905 }
906 
907 
908 void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
909  ASSERT(allow_macro_instructions_);
910  ldr(ft, imm);
911 }
912 
913 
914 void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
915  ASSERT(allow_macro_instructions_);
916  ASSERT(!rt.IsZero());
917  ldr(rt, imm);
918 }
919 
920 
922  const Register& rn,
923  unsigned shift) {
924  ASSERT(allow_macro_instructions_);
925  ASSERT(!rd.IsZero());
926  lsl(rd, rn, shift);
927 }
928 
929 
931  const Register& rn,
932  const Register& rm) {
933  ASSERT(allow_macro_instructions_);
934  ASSERT(!rd.IsZero());
935  lslv(rd, rn, rm);
936 }
937 
938 
940  const Register& rn,
941  unsigned shift) {
942  ASSERT(allow_macro_instructions_);
943  ASSERT(!rd.IsZero());
944  lsr(rd, rn, shift);
945 }
946 
947 
949  const Register& rn,
950  const Register& rm) {
951  ASSERT(allow_macro_instructions_);
952  ASSERT(!rd.IsZero());
953  lsrv(rd, rn, rm);
954 }
955 
956 
958  const Register& rn,
959  const Register& rm,
960  const Register& ra) {
961  ASSERT(allow_macro_instructions_);
962  ASSERT(!rd.IsZero());
963  madd(rd, rn, rm, ra);
964 }
965 
966 
968  const Register& rn,
969  const Register& rm) {
970  ASSERT(allow_macro_instructions_);
971  ASSERT(!rd.IsZero());
972  mneg(rd, rn, rm);
973 }
974 
975 
976 void MacroAssembler::Mov(const Register& rd, const Register& rn) {
977  ASSERT(allow_macro_instructions_);
978  ASSERT(!rd.IsZero());
979  // Emit a register move only if the registers are distinct, or if they are
980  // not X registers. Note that mov(w0, w0) is not a no-op because it clears
981  // the top word of x0.
982  if (!rd.Is(rn) || !rd.Is64Bits()) {
983  Assembler::mov(rd, rn);
984  }
985 }
986 
987 
988 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
989  ASSERT(allow_macro_instructions_);
990  ASSERT(!rd.IsZero());
991  movk(rd, imm, shift);
992 }
993 
994 
995 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
996  ASSERT(allow_macro_instructions_);
997  ASSERT(!rt.IsZero());
998  mrs(rt, sysreg);
999 }
1000 
1001 
1003  ASSERT(allow_macro_instructions_);
1004  ASSERT(!rt.IsZero());
1005  msr(sysreg, rt);
1006 }
1007 
1008 
1010  const Register& rn,
1011  const Register& rm,
1012  const Register& ra) {
1013  ASSERT(allow_macro_instructions_);
1014  ASSERT(!rd.IsZero());
1015  msub(rd, rn, rm, ra);
1016 }
1017 
1018 
1020  const Register& rn,
1021  const Register& rm) {
1022  ASSERT(allow_macro_instructions_);
1023  ASSERT(!rd.IsZero());
1024  mul(rd, rn, rm);
1025 }
1026 
1027 
1028 void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
1029  ASSERT(allow_macro_instructions_);
1030  ASSERT(!rd.IsZero());
1031  rbit(rd, rn);
1032 }
1033 
1034 
1036  ASSERT(allow_macro_instructions_);
1037  ASSERT(!xn.IsZero());
1038  ret(xn);
1039  CheckVeneerPool(false, false);
1040 }
1041 
1042 
1043 void MacroAssembler::Rev(const Register& rd, const Register& rn) {
1044  ASSERT(allow_macro_instructions_);
1045  ASSERT(!rd.IsZero());
1046  rev(rd, rn);
1047 }
1048 
1049 
1050 void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
1051  ASSERT(allow_macro_instructions_);
1052  ASSERT(!rd.IsZero());
1053  rev16(rd, rn);
1054 }
1055 
1056 
1057 void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
1058  ASSERT(allow_macro_instructions_);
1059  ASSERT(!rd.IsZero());
1060  rev32(rd, rn);
1061 }
1062 
1063 
1065  const Register& rs,
1066  unsigned shift) {
1067  ASSERT(allow_macro_instructions_);
1068  ASSERT(!rd.IsZero());
1069  ror(rd, rs, shift);
1070 }
1071 
1072 
1074  const Register& rn,
1075  const Register& rm) {
1076  ASSERT(allow_macro_instructions_);
1077  ASSERT(!rd.IsZero());
1078  rorv(rd, rn, rm);
1079 }
1080 
1081 
1083  const Register& rn,
1084  unsigned lsb,
1085  unsigned width) {
1086  ASSERT(allow_macro_instructions_);
1087  ASSERT(!rd.IsZero());
1088  sbfiz(rd, rn, lsb, width);
1089 }
1090 
1091 
1093  const Register& rn,
1094  unsigned lsb,
1095  unsigned width) {
1096  ASSERT(allow_macro_instructions_);
1097  ASSERT(!rd.IsZero());
1098  sbfx(rd, rn, lsb, width);
1099 }
1100 
1101 
1103  const Register& rn,
1104  unsigned fbits) {
1105  ASSERT(allow_macro_instructions_);
1106  scvtf(fd, rn, fbits);
1107 }
1108 
1109 
1111  const Register& rn,
1112  const Register& rm) {
1113  ASSERT(allow_macro_instructions_);
1114  ASSERT(!rd.IsZero());
1115  sdiv(rd, rn, rm);
1116 }
1117 
1118 
1120  const Register& rn,
1121  const Register& rm,
1122  const Register& ra) {
1123  ASSERT(allow_macro_instructions_);
1124  ASSERT(!rd.IsZero());
1125  smaddl(rd, rn, rm, ra);
1126 }
1127 
1128 
1130  const Register& rn,
1131  const Register& rm,
1132  const Register& ra) {
1133  ASSERT(allow_macro_instructions_);
1134  ASSERT(!rd.IsZero());
1135  smsubl(rd, rn, rm, ra);
1136 }
1137 
1138 
1140  const Register& rn,
1141  const Register& rm) {
1142  ASSERT(allow_macro_instructions_);
1143  ASSERT(!rd.IsZero());
1144  smull(rd, rn, rm);
1145 }
1146 
1147 
1149  const Register& rn,
1150  const Register& rm) {
1151  ASSERT(allow_macro_instructions_);
1152  ASSERT(!rd.IsZero());
1153  smulh(rd, rn, rm);
1154 }
1155 
1156 
1158  const CPURegister& rt2,
1159  const MemOperand& dst) {
1160  ASSERT(allow_macro_instructions_);
1161  stnp(rt, rt2, dst);
1162 }
1163 
1164 
1166  const CPURegister& rt2,
1167  const MemOperand& dst) {
1168  ASSERT(allow_macro_instructions_);
1169  stp(rt, rt2, dst);
1170 }
1171 
1172 
1173 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
1174  ASSERT(allow_macro_instructions_);
1175  ASSERT(!rd.IsZero());
1176  sxtb(rd, rn);
1177 }
1178 
1179 
1180 void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
1181  ASSERT(allow_macro_instructions_);
1182  ASSERT(!rd.IsZero());
1183  sxth(rd, rn);
1184 }
1185 
1186 
1187 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
1188  ASSERT(allow_macro_instructions_);
1189  ASSERT(!rd.IsZero());
1190  sxtw(rd, rn);
1191 }
1192 
1193 
1195  const Register& rn,
1196  unsigned lsb,
1197  unsigned width) {
1198  ASSERT(allow_macro_instructions_);
1199  ASSERT(!rd.IsZero());
1200  ubfiz(rd, rn, lsb, width);
1201 }
1202 
1203 
1205  const Register& rn,
1206  unsigned lsb,
1207  unsigned width) {
1208  ASSERT(allow_macro_instructions_);
1209  ASSERT(!rd.IsZero());
1210  ubfx(rd, rn, lsb, width);
1211 }
1212 
1213 
1215  const Register& rn,
1216  unsigned fbits) {
1217  ASSERT(allow_macro_instructions_);
1218  ucvtf(fd, rn, fbits);
1219 }
1220 
1221 
1223  const Register& rn,
1224  const Register& rm) {
1225  ASSERT(allow_macro_instructions_);
1226  ASSERT(!rd.IsZero());
1227  udiv(rd, rn, rm);
1228 }
1229 
1230 
1232  const Register& rn,
1233  const Register& rm,
1234  const Register& ra) {
1235  ASSERT(allow_macro_instructions_);
1236  ASSERT(!rd.IsZero());
1237  umaddl(rd, rn, rm, ra);
1238 }
1239 
1240 
1242  const Register& rn,
1243  const Register& rm,
1244  const Register& ra) {
1245  ASSERT(allow_macro_instructions_);
1246  ASSERT(!rd.IsZero());
1247  umsubl(rd, rn, rm, ra);
1248 }
1249 
1250 
1251 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
1252  ASSERT(allow_macro_instructions_);
1253  ASSERT(!rd.IsZero());
1254  uxtb(rd, rn);
1255 }
1256 
1257 
1258 void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
1259  ASSERT(allow_macro_instructions_);
1260  ASSERT(!rd.IsZero());
1261  uxth(rd, rn);
1262 }
1263 
1264 
1265 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1266  ASSERT(allow_macro_instructions_);
1267  ASSERT(!rd.IsZero());
1268  uxtw(rd, rn);
1269 }
1270 
1271 
1273  ASSERT(!csp.Is(sp_));
1274  // TODO(jbramley): Several callers rely on this not using scratch registers,
1275  // so we use the assembler directly here. However, this means that large
1276  // immediate values of 'space' cannot be handled cleanly. (Only 24-bits
1277  // immediates or values of 'space' that can be encoded in one instruction are
1278  // accepted.) Once we implement our flexible scratch register idea, we could
1279  // greatly simplify this function.
1280  InstructionAccurateScope scope(this);
1281  if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
1282  // The subtract instruction supports a 12-bit immediate, shifted left by
1283  // zero or 12 bits. So, in two instructions, we can subtract any immediate
1284  // between zero and (1 << 24) - 1.
1285  int64_t imm = space.immediate();
1286  ASSERT(is_uint24(imm));
1287 
1288  int64_t imm_top_12_bits = imm >> 12;
1289  sub(csp, StackPointer(), imm_top_12_bits << 12);
1290  imm -= imm_top_12_bits << 12;
1291  if (imm > 0) {
1292  sub(csp, csp, imm);
1293  }
1294  } else {
1295  sub(csp, StackPointer(), space);
1296  }
1297 }
1298 
1299 
1301  ExternalReference roots_array_start =
1302  ExternalReference::roots_array_start(isolate());
1303  Mov(root, Operand(roots_array_start));
1304 }
1305 
1306 
1308  ASSERT(dst.Is64Bits() && src.Is64Bits());
1309  Lsl(dst, src, kSmiShift);
1310 }
1311 
1312 
1313 void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
1314 
1315 
1317  ASSERT(dst.Is64Bits() && src.Is64Bits());
1319  AssertSmi(src);
1320  }
1321  Asr(dst, src, kSmiShift);
1322 }
1323 
1324 
1326 
1327 
1329  Register src,
1330  UntagMode mode) {
1331  ASSERT(dst.Is64Bits() && src.Is64Bits());
1333  AssertSmi(src);
1334  }
1335  Scvtf(dst, src, kSmiShift);
1336 }
1337 
1338 
1340  Register src,
1341  UntagMode mode) {
1342  ASSERT(dst.Is32Bits() && src.Is64Bits());
1344  AssertSmi(src);
1345  }
1346  Scvtf(dst, src, kSmiShift);
1347 }
1348 
1349 
1351  Label* smi_label,
1352  Label* not_smi_label) {
1353  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1354  // Check if the tag bit is set.
1355  if (smi_label) {
1356  Tbz(value, 0, smi_label);
1357  if (not_smi_label) {
1358  B(not_smi_label);
1359  }
1360  } else {
1361  ASSERT(not_smi_label);
1362  Tbnz(value, 0, not_smi_label);
1363  }
1364 }
1365 
1366 
1367 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1368  JumpIfSmi(value, NULL, not_smi_label);
1369 }
1370 
1371 
1373  Register value2,
1374  Label* both_smi_label,
1375  Label* not_smi_label) {
1376  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1377  UseScratchRegisterScope temps(this);
1378  Register tmp = temps.AcquireX();
1379  // Check if both tag bits are clear.
1380  Orr(tmp, value1, value2);
1381  JumpIfSmi(tmp, both_smi_label, not_smi_label);
1382 }
1383 
1384 
1386  Register value2,
1387  Label* either_smi_label,
1388  Label* not_smi_label) {
1389  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1390  UseScratchRegisterScope temps(this);
1391  Register tmp = temps.AcquireX();
1392  // Check if either tag bit is clear.
1393  And(tmp, value1, value2);
1394  JumpIfSmi(tmp, either_smi_label, not_smi_label);
1395 }
1396 
1397 
1399  Register value2,
1400  Label* not_smi_label) {
1401  JumpIfBothSmi(value1, value2, NULL, not_smi_label);
1402 }
1403 
1404 
1406  Register value2,
1407  Label* not_smi_label) {
1408  JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
1409 }
1410 
1411 
1413  Register type,
1414  Label* fail) {
1415  CompareObjectType(object, type, type, LAST_NAME_TYPE);
1416  B(hi, fail);
1417 }
1418 
1419 
1421  Register map,
1422  Register scratch,
1423  Label* fail) {
1424  Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1425  IsInstanceJSObjectType(map, scratch, fail);
1426 }
1427 
1428 
1430  Register scratch,
1431  Label* fail) {
1432  Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1433  // If cmp result is lt, the following ccmp will clear all flags.
1434  // Z == 0, N == V implies gt condition.
1437 
1438  // If we didn't get a valid label object just fall through and leave the
1439  // flags updated.
1440  if (fail != NULL) {
1441  B(gt, fail);
1442  }
1443 }
1444 
1445 
1447  Register type,
1448  Label* not_string,
1449  Label* string) {
1450  Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
1451  Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
1452 
1453  STATIC_ASSERT(kStringTag == 0);
1454  ASSERT((string != NULL) || (not_string != NULL));
1455  if (string == NULL) {
1456  TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1457  } else if (not_string == NULL) {
1458  TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
1459  } else {
1460  TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1461  B(string);
1462  }
1463 }
1464 
1465 
1467  UseScratchRegisterScope temps(this);
1468  Register tmp = temps.AcquireX();
1469  Mov(tmp, Operand(handle));
1470  Push(tmp);
1471 }
1472 
1473 
1474 void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
1475  uint64_t size = count * unit_size;
1476 
1477  if (size == 0) {
1478  return;
1479  }
1480 
1481  if (csp.Is(StackPointer())) {
1482  ASSERT(size % 16 == 0);
1483  } else {
1484  BumpSystemStackPointer(size);
1485  }
1486 
1487  Sub(StackPointer(), StackPointer(), size);
1488 }
1489 
1490 
1491 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
1492  ASSERT(IsPowerOf2(unit_size));
1493 
1494  if (unit_size == 0) {
1495  return;
1496  }
1497 
1498  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1499  const Operand size(count, LSL, shift);
1500 
1501  if (size.IsZero()) {
1502  return;
1503  }
1504 
1505  if (!csp.Is(StackPointer())) {
1506  BumpSystemStackPointer(size);
1507  }
1508 
1509  Sub(StackPointer(), StackPointer(), size);
1510 }
1511 
1512 
1513 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
1514  ASSERT(IsPowerOf2(unit_size));
1515  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1516  const Operand size(count_smi,
1517  (shift >= 0) ? (LSL) : (LSR),
1518  (shift >= 0) ? (shift) : (-shift));
1519 
1520  if (size.IsZero()) {
1521  return;
1522  }
1523 
1524  if (!csp.Is(StackPointer())) {
1525  BumpSystemStackPointer(size);
1526  }
1527 
1528  Sub(StackPointer(), StackPointer(), size);
1529 }
1530 
1531 
1532 void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
1533  uint64_t size = count * unit_size;
1534 
1535  if (size == 0) {
1536  return;
1537  }
1538 
1539  Add(StackPointer(), StackPointer(), size);
1540 
1541  if (csp.Is(StackPointer())) {
1542  ASSERT(size % 16 == 0);
1543  } else if (emit_debug_code()) {
1544  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1545  // but if we keep it matching StackPointer, the simulator can detect memory
1546  // accesses in the now-free part of the stack.
1547  Mov(csp, StackPointer());
1548  }
1549 }
1550 
1551 
1552 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1553  ASSERT(IsPowerOf2(unit_size));
1554 
1555  if (unit_size == 0) {
1556  return;
1557  }
1558 
1559  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1560  const Operand size(count, LSL, shift);
1561 
1562  if (size.IsZero()) {
1563  return;
1564  }
1565 
1566  Add(StackPointer(), StackPointer(), size);
1567 
1568  if (!csp.Is(StackPointer()) && emit_debug_code()) {
1569  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1570  // but if we keep it matching StackPointer, the simulator can detect memory
1571  // accesses in the now-free part of the stack.
1572  Mov(csp, StackPointer());
1573  }
1574 }
1575 
1576 
1577 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1578  ASSERT(IsPowerOf2(unit_size));
1579  const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1580  const Operand size(count_smi,
1581  (shift >= 0) ? (LSL) : (LSR),
1582  (shift >= 0) ? (shift) : (-shift));
1583 
1584  if (size.IsZero()) {
1585  return;
1586  }
1587 
1588  Add(StackPointer(), StackPointer(), size);
1589 
1590  if (!csp.Is(StackPointer()) && emit_debug_code()) {
1591  // It is safe to leave csp where it is when unwinding the JavaScript stack,
1592  // but if we keep it matching StackPointer, the simulator can detect memory
1593  // accesses in the now-free part of the stack.
1594  Mov(csp, StackPointer());
1595  }
1596 }
1597 
1598 
1600  const Operand& rhs,
1601  Condition cond,
1602  Label* label) {
1603  if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
1604  ((cond == eq) || (cond == ne))) {
1605  if (cond == eq) {
1606  Cbz(lhs, label);
1607  } else {
1608  Cbnz(lhs, label);
1609  }
1610  } else {
1611  Cmp(lhs, rhs);
1612  B(cond, label);
1613  }
1614 }
1615 
1616 
1618  const uint64_t bit_pattern,
1619  Label* label) {
1620  int bits = reg.SizeInBits();
1621  ASSERT(CountSetBits(bit_pattern, bits) > 0);
1622  if (CountSetBits(bit_pattern, bits) == 1) {
1623  Tbnz(reg, MaskToBit(bit_pattern), label);
1624  } else {
1625  Tst(reg, bit_pattern);
1626  B(ne, label);
1627  }
1628 }
1629 
1630 
1632  const uint64_t bit_pattern,
1633  Label* label) {
1634  int bits = reg.SizeInBits();
1635  ASSERT(CountSetBits(bit_pattern, bits) > 0);
1636  if (CountSetBits(bit_pattern, bits) == 1) {
1637  Tbz(reg, MaskToBit(bit_pattern), label);
1638  } else {
1639  Tst(reg, bit_pattern);
1640  B(eq, label);
1641  }
1642 }
1643 
1644 
1645 void MacroAssembler::InlineData(uint64_t data) {
1646  ASSERT(is_uint16(data));
1647  InstructionAccurateScope scope(this, 1);
1648  movz(xzr, data);
1649 }
1650 
1651 
1653  InstructionAccurateScope scope(this, 1);
1655 }
1656 
1657 
1659  InstructionAccurateScope scope(this, 1);
1661 }
1662 
1663 
1664 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1665  ASSERT(strlen(marker_name) == 2);
1666 
1667  // We allow only printable characters in the marker names. Unprintable
1668  // characters are reserved for controlling features of the instrumentation.
1669  ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1670 
1671  InstructionAccurateScope scope(this, 1);
1672  movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1673 }
1674 
1675 } } // namespace v8::internal
1676 
1677 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
void Csetm(const Register &rd, Condition cond)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void Neg(const Register &rd, const Operand &operand)
void SmiUntag(Register reg, SBit s=LeaveCC)
void lsl(const Register &rd, const Register &rn, unsigned shift)
void Mvn(const Register &rd, uint64_t imm)
Isolate * isolate() const
Definition: assembler.h:62
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
void Adr(const Register &rd, Label *label)
void ClaimBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void TestAndBranchIfAllClear(const Register &reg, const uint64_t bit_pattern, Label *label)
void Cmn(const Register &rn, const Operand &operand)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void Frintn(const FPRegister &fd, const FPRegister &fn)
void Adcs(const Register &rd, const Register &rn, const Operand &operand)
void Ands(const Register &rd, const Register &rn, const Operand &operand)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
void Udiv(const Register &rd, const Register &rn, const Register &rm)
void Fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void Fsqrt(const FPRegister &fd, const FPRegister &fn)
void CmovX(const Register &rd, const Register &rn, Condition cond)
void SmiTag(Register reg, SBit s=LeaveCC)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void sxtw(const Register &rd, const Register &rn)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
void Ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void mrs(Register dst, SRegister s, Condition cond=al)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fcvtas(const Register &rd, const FPRegister &fn)
void Debug(const char *message, uint32_t code, Instr params=BREAK)
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void SmiUntagToFloat(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void Ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Fabs(const FPRegister &fd, const FPRegister &fn)
void Cneg(const Register &rd, const Register &rn, Condition cond)
STATIC_ASSERT((reg_zero==(reg_not_zero^1))&&(reg_bit_clear==(reg_bit_set^1))&&(always==(never^1)))
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
void rev16(const Register &rd, const Register &rn)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Sdiv(const Register &rd, const Register &rn, const Register &rm)
const unsigned kXRegSizeInBits
void Fcvtnu(const Register &rd, const FPRegister &fn)
void CzeroX(const Register &rd, Condition cond)
void csetm(const Register &rd, Condition cond)
void Fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
void Fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void b(int branch_offset, Condition cond=al)
void JumpIfSmi(Register value, Label *smi_label)
void Ldr(const FPRegister &ft, double imm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
Definition: flags.cc:665
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void Adc(const Register &rd, const Register &rn, const Operand &operand)
void bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
const Register & StackPointer() const
void clz(Register dst, Register src, Condition cond=al)
void fcvtns(const Register &rd, const FPRegister &fn)
void frintz(const FPRegister &fd, const FPRegister &fn)
int MaskToBit(uint64_t mask)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Bfi(Register dst, Register src, Register scratch, int lsb, int width, Condition cond=al)
void Fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void rbit(const Register &rd, const Register &rn)
#define ASSERT(condition)
Definition: checks.h:329
void JumpIfBothNotSmi(Register value1, Register value2, Label *not_smi_label)
void Fcvtau(const Register &rd, const FPRegister &fn)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
void frintn(const FPRegister &fd, const FPRegister &fn)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void Ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void fneg(const FPRegister &fd, const FPRegister &fn)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void Bic(const Register &rd, const Register &rn, const Operand &operand)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void cinv(const Register &rd, const Register &rn, Condition cond)
void Negs(const Register &rd, const Operand &operand)
void Umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void udiv(const Register &rd, const Register &rn, const Register &rm)
void AssertSmi(Register object)
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void asr(const Register &rd, const Register &rn, unsigned shift)
void Msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void rev32(const Register &rd, const Register &rn)
void IsObjectNameType(Register object, Register scratch, Label *fail)
int CountSetBits(uint64_t value, int width)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
void Csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void ret(const Register &xn=lr)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static bool IsImmFP64(double imm)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
void cneg(const Register &rd, const Register &rn, Condition cond)
void Sbcs(const Register &rd, const Register &rn, const Operand &operand)
MemOperand UntagSmiMemOperand(Register object, int offset)
void sxtb(const Register &rd, const Register &rn)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void Fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void Cbnz(const Register &rt, Label *label)
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void uxth(const Register &rd, const Register &rn)
void Sxtb(const Register &rd, const Register &rn)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Uxtw(const Register &rd, const Register &rn)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void smulh(const Register &rd, const Register &rn, const Register &rm)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void br(const Register &xn)
void rev(const Register &rd, const Register &rn)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void TestAndBranchIfAnySet(const Register &reg, const uint64_t bit_pattern, Label *label)
void Sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Fcmp(const FPRegister &fn, const FPRegister &fm)
#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP)
void Dmb(BarrierDomain domain, BarrierType type)
void Fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Fcvtns(const Register &rd, const FPRegister &fn)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void Eon(const Register &rd, const Register &rn, const Operand &operand)
void fmov(FPRegister fd, double imm)
const int kHeapObjectTag
Definition: v8.h:5473
void Rev32(const Register &rd, const Register &rn)
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Fmov(FPRegister fd, FPRegister fn)
void Uxtb(const Register &rd, const Register &rn)
void Fcvtzs(const Register &rd, const FPRegister &fn)
void JumpIfEitherNotSmi(Register value1, Register value2, Label *not_smi_label)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void Sxtw(const Register &rd, const Register &rn)
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void mneg(const Register &rd, const Register &rn, const Register &rm)
void Ngc(const Register &rd, const Operand &operand)
void blr(const Register &xn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
void ror(const Register &rd, const Register &rs, unsigned shift)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void Bics(const Register &rd, const Register &rn, const Operand &operand)
void ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void Fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Cinc(const Register &rd, const Register &rn, Condition cond)
void fcvtau(const Register &rd, const FPRegister &fn)
void Mneg(const Register &rd, const Register &rn, const Register &rm)
const int kBitsPerByte
Definition: globals.h:287
const int kSmiShift
bool IsPowerOf2(T x)
Definition: utils.h:51
void movz(const Register &rd, uint64_t imm, int shift=-1)
void SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void fcvtnu(const Register &rd, const FPRegister &fn)
void sxth(const Register &rd, const Register &rn)
void Csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
const uint32_t kStringTag
Definition: objects.h:598
void Cset(const Register &rd, Condition cond)
void fcvtzu(const Register &rd, const FPRegister &fn)
void Fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Fneg(const FPRegister &fd, const FPRegister &fn)
void cinc(const Register &rd, const Register &rn, Condition cond)
void Fcvtzu(const Register &rd, const FPRegister &fn)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void AnnotateInstrumentation(const char *marker_name)
void uxtb(Register dst, const Operand &src, Condition cond=al)
void Cbz(const Register &rt, Label *label)
void Drop(int count, Condition cond=al)
void Ngcs(const Register &rd, const Operand &operand)
void Sxth(const Register &rd, const Register &rn)
void Cls(const Register &rd, const Register &rn)
void Stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static const int kMapOffset
Definition: objects.h:1890
void Orn(const Register &rd, const Register &rn, const Operand &operand)
bool is(Register reg) const
const uint32_t kIsNotStringMask
Definition: objects.h:597
static bool IsImmFP32(float imm)
void cset(const Register &rd, Condition cond)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
void Rev(const Register &rd, const Register &rn)
void Fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
void Clz(const Register &rd, const Register &rn)
void Ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void bl(int branch_offset, Condition cond=al)
MemOperand FieldMemOperand(Register object, int offset)
void dmb(BarrierDomain domain, BarrierType type)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
bool emit_debug_code() const
Definition: assembler.h:65
void Smulh(const Register &rd, const Register &rn, const Register &rm)
void hint(SystemHint code)
void fsqrt(const FPRegister &fd, const FPRegister &fn)
void fcvtas(const Register &rd, const FPRegister &fn)
void Rbit(const Register &rd, const Register &rn)
void Frintz(const FPRegister &fd, const FPRegister &fn)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void asrv(const Register &rd, const Register &rn, const Register &rm)
void Msr(SystemRegister sysreg, const Register &rt)
void Mrs(const Register &rt, SystemRegister sysreg)
void Ror(const Register &rd, const Register &rs, unsigned shift)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
const int kSmiTagSize
Definition: v8.h:5479
void BumpSystemStackPointer(const Operand &space)
void dsb(BarrierDomain domain, BarrierType type)
void Frinta(const FPRegister &fd, const FPRegister &fn)
void Fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Cmp(const Register &rn, const Operand &operand)
void Tst(const Register &rn, const Operand &operand)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void Fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void fcvtmu(const Register &rd, const FPRegister &fn)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
const int kSmiTag
Definition: v8.h:5478
void JumpIfNotSmi(Register value, Label *not_smi_label)
void cls(const Register &rd, const Register &rn)
void Smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void movn(const Register &rd, uint64_t imm, int shift=-1)
void Claim(uint64_t count, uint64_t unit_size=kXRegSize)
void Adds(const Register &rd, const Register &rn, const Operand &operand)
void Fcvtms(const Register &rd, const FPRegister &fn)
void Fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void Fcvtmu(const Register &rd, const FPRegister &fn)
void Smull(const Register &rd, const Register &rn, const Register &rm)
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void Rev16(const Register &rd, const Register &rn)
void Movk(const Register &rd, uint64_t imm, int shift=-1)
void fcvtzs(const Register &rd, const FPRegister &fn)
void Stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
bool Is(const CPURegister &other) const
void rorv(const Register &rd, const Register &rn, const Register &rm)
void fcvt(const FPRegister &fd, const FPRegister &fn)
void Ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void DropBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void Madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void uxtw(const Register &rd, const Register &rn)
void Asr(const Register &rd, const Register &rn, unsigned shift)
void adr(const Register &rd, Label *label)
void JumpIfBothSmi(Register value1, Register value2, Label *both_smi_label, Label *not_smi_label=NULL)
#define LS_MACRO_LIST(V)
void Sbc(const Register &rd, const Register &rn, const Operand &operand)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
int CountTrailingZeros(uint64_t value, int width)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
int64_t immediate() const
Register AcquireSameSizeAs(const Register &reg)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void frinta(const FPRegister &fd, const FPRegister &fn)
void Fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fcvtms(const Register &rd, const FPRegister &fn)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Fcvt(const FPRegister &fd, const FPRegister &fn)
void Dsb(BarrierDomain domain, BarrierType type)
void Cinv(const Register &rd, const Register &rn, Condition cond)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
static const int kInstanceTypeOffset
Definition: objects.h:6459
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void Ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void Uxth(const Register &rd, const Register &rn)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)