v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
29 #define V8_ARM64_ASSEMBLER_ARM64_H_
30 
31 #include <list>
32 #include <map>
33 
34 #include "globals.h"
35 #include "utils.h"
36 #include "assembler.h"
37 #include "serialize.h"
39 #include "arm64/cpu-arm64.h"
40 
41 
42 namespace v8 {
43 namespace internal {
44 
45 
46 // -----------------------------------------------------------------------------
47 // Registers.
48 #define REGISTER_CODE_LIST(R) \
49 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
50 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
51 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
52 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
53 
54 
55 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
56 
57 
58 // Some CPURegister methods can return Register and FPRegister types, so we
59 // need to declare them in advance.
60 struct Register;
61 struct FPRegister;
62 
63 
64 struct CPURegister {
65  enum RegisterType {
66  // The kInvalid value is used to detect uninitialized static instances,
67  // which are always zero-initialized before any constructors are called.
68  kInvalid = 0,
72  };
73 
74  static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
75  CPURegister r = {code, size, type};
76  return r;
77  }
78 
79  unsigned code() const;
80  RegisterType type() const;
81  RegList Bit() const;
82  unsigned SizeInBits() const;
83  int SizeInBytes() const;
84  bool Is32Bits() const;
85  bool Is64Bits() const;
86  bool IsValid() const;
87  bool IsValidOrNone() const;
88  bool IsValidRegister() const;
89  bool IsValidFPRegister() const;
90  bool IsNone() const;
91  bool Is(const CPURegister& other) const;
92 
93  bool IsZero() const;
94  bool IsSP() const;
95 
96  bool IsRegister() const;
97  bool IsFPRegister() const;
98 
99  Register X() const;
100  Register W() const;
101  FPRegister D() const;
102  FPRegister S() const;
103 
104  bool IsSameSizeAndType(const CPURegister& other) const;
105 
106  // V8 compatibility.
107  bool is(const CPURegister& other) const { return Is(other); }
108  bool is_valid() const { return IsValid(); }
109 
110  unsigned reg_code;
111  unsigned reg_size;
113 };
114 
115 
116 struct Register : public CPURegister {
117  static Register Create(unsigned code, unsigned size) {
119  }
120 
122  reg_code = 0;
123  reg_size = 0;
125  }
126 
127  explicit Register(const CPURegister& r) {
128  reg_code = r.reg_code;
129  reg_size = r.reg_size;
130  reg_type = r.reg_type;
132  }
133 
134  Register(const Register& r) { // NOLINT(runtime/explicit)
135  reg_code = r.reg_code;
136  reg_size = r.reg_size;
137  reg_type = r.reg_type;
139  }
140 
141  bool IsValid() const {
142  ASSERT(IsRegister() || IsNone());
143  return IsValidRegister();
144  }
145 
146  static Register XRegFromCode(unsigned code);
147  static Register WRegFromCode(unsigned code);
148 
149  // Start of V8 compatibility section ---------------------
150  // These memebers are necessary for compilation.
151  // A few of them may be unused for now.
152 
153  static const int kNumRegisters = kNumberOfRegisters;
154  static int NumRegisters() { return kNumRegisters; }
155 
156  // We allow crankshaft to use the following registers:
157  // - x0 to x15
158  // - x18 to x24
159  // - x27 (also context)
160  //
161  // TODO(all): Register x25 is currently free and could be available for
162  // crankshaft, but we don't use it as we might use it as a per function
163  // literal pool pointer in the future.
164  //
165  // TODO(all): Consider storing cp in x25 to have only two ranges.
166  // We split allocatable registers in three ranges called
167  // - "low range"
168  // - "high range"
169  // - "context"
170  static const unsigned kAllocatableLowRangeBegin = 0;
171  static const unsigned kAllocatableLowRangeEnd = 15;
172  static const unsigned kAllocatableHighRangeBegin = 18;
173  static const unsigned kAllocatableHighRangeEnd = 24;
174  static const unsigned kAllocatableContext = 27;
175 
176  // Gap between low and high ranges.
177  static const int kAllocatableRangeGapSize =
179 
180  static const int kMaxNumAllocatableRegisters =
184 
185  // Return true if the register is one that crankshaft can allocate.
186  bool IsAllocatable() const {
187  return ((reg_code == kAllocatableContext) ||
191  }
192 
193  static Register FromAllocationIndex(unsigned index) {
194  ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
195  // cp is the last allocatable register.
196  if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
198  }
199 
200  // Handle low and high ranges.
201  return (index <= kAllocatableLowRangeEnd)
202  ? from_code(index)
204  }
205 
206  static const char* AllocationIndexToString(int index) {
207  ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
209  (kAllocatableLowRangeEnd == 15) &&
210  (kAllocatableHighRangeBegin == 18) &&
211  (kAllocatableHighRangeEnd == 24) &&
212  (kAllocatableContext == 27));
213  const char* const names[] = {
214  "x0", "x1", "x2", "x3", "x4",
215  "x5", "x6", "x7", "x8", "x9",
216  "x10", "x11", "x12", "x13", "x14",
217  "x15", "x18", "x19", "x20", "x21",
218  "x22", "x23", "x24", "x27",
219  };
220  return names[index];
221  }
222 
223  static int ToAllocationIndex(Register reg) {
224  ASSERT(reg.IsAllocatable());
225  unsigned code = reg.code();
226  if (code == kAllocatableContext) {
227  return NumAllocatableRegisters() - 1;
228  }
229 
230  return (code <= kAllocatableLowRangeEnd)
231  ? code
232  : code - kAllocatableRangeGapSize;
233  }
234 
235  static Register from_code(int code) {
236  // Always return an X register.
237  return Register::Create(code, kXRegSizeInBits);
238  }
239 
240  // End of V8 compatibility section -----------------------
241 };
242 
243 
244 struct FPRegister : public CPURegister {
245  static FPRegister Create(unsigned code, unsigned size) {
246  return FPRegister(
248  }
249 
251  reg_code = 0;
252  reg_size = 0;
254  }
255 
256  explicit FPRegister(const CPURegister& r) {
257  reg_code = r.reg_code;
258  reg_size = r.reg_size;
259  reg_type = r.reg_type;
261  }
262 
263  FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
264  reg_code = r.reg_code;
265  reg_size = r.reg_size;
266  reg_type = r.reg_type;
268  }
269 
270  bool IsValid() const {
271  ASSERT(IsFPRegister() || IsNone());
272  return IsValidFPRegister();
273  }
274 
275  static FPRegister SRegFromCode(unsigned code);
276  static FPRegister DRegFromCode(unsigned code);
277 
278  // Start of V8 compatibility section ---------------------
280 
281  // Crankshaft can use all the FP registers except:
282  // - d15 which is used to keep the 0 double value
283  // - d30 which is used in crankshaft as a double scratch register
284  // - d31 which is used in the MacroAssembler as a double scratch register
285  static const unsigned kAllocatableLowRangeBegin = 0;
286  static const unsigned kAllocatableLowRangeEnd = 14;
287  static const unsigned kAllocatableHighRangeBegin = 16;
288  static const unsigned kAllocatableHighRangeEnd = 29;
289 
290  static const RegList kAllocatableFPRegisters = 0x3fff7fff;
291 
292  // Gap between low and high ranges.
293  static const int kAllocatableRangeGapSize =
295 
296  static const int kMaxNumAllocatableRegisters =
300 
301  // Return true if the register is one that crankshaft can allocate.
302  bool IsAllocatable() const {
303  return (Bit() & kAllocatableFPRegisters) != 0;
304  }
305 
306  static FPRegister FromAllocationIndex(unsigned int index) {
307  ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
308 
309  return (index <= kAllocatableLowRangeEnd)
310  ? from_code(index)
312  }
313 
314  static const char* AllocationIndexToString(int index) {
315  ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
317  (kAllocatableLowRangeEnd == 14) &&
318  (kAllocatableHighRangeBegin == 16) &&
319  (kAllocatableHighRangeEnd == 29));
320  const char* const names[] = {
321  "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
322  "d8", "d9", "d10", "d11", "d12", "d13", "d14",
323  "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
324  "d24", "d25", "d26", "d27", "d28", "d29"
325  };
326  return names[index];
327  }
328 
329  static int ToAllocationIndex(FPRegister reg) {
330  ASSERT(reg.IsAllocatable());
331  unsigned code = reg.code();
332 
333  return (code <= kAllocatableLowRangeEnd)
334  ? code
335  : code - kAllocatableRangeGapSize;
336  }
337 
338  static FPRegister from_code(int code) {
339  // Always return a D register.
340  return FPRegister::Create(code, kDRegSizeInBits);
341  }
342  // End of V8 compatibility section -----------------------
343 };
344 
345 
346 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
347 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
348 
349 
350 #if defined(ARM64_DEFINE_REG_STATICS)
351 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
352  const CPURegister init_##register_class##_##name = {code, size, type}; \
353  const register_class& name = *reinterpret_cast<const register_class*>( \
354  &init_##register_class##_##name)
355 #define ALIAS_REGISTER(register_class, alias, name) \
356  const register_class& alias = *reinterpret_cast<const register_class*>( \
357  &init_##register_class##_##name)
358 #else
359 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
360  extern const register_class& name
361 #define ALIAS_REGISTER(register_class, alias, name) \
362  extern const register_class& alias
363 #endif // defined(ARM64_DEFINE_REG_STATICS)
364 
365 // No*Reg is used to indicate an unused argument, or an error case. Note that
366 // these all compare equal (using the Is() method). The Register and FPRegister
367 // variants are provided for convenience.
368 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
369 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
370 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
371 
372 // v8 compatibility.
374 
375 #define DEFINE_REGISTERS(N) \
376  INITIALIZE_REGISTER(Register, w##N, N, \
377  kWRegSizeInBits, CPURegister::kRegister); \
378  INITIALIZE_REGISTER(Register, x##N, N, \
379  kXRegSizeInBits, CPURegister::kRegister);
381 #undef DEFINE_REGISTERS
382 
387 
388 #define DEFINE_FPREGISTERS(N) \
389  INITIALIZE_REGISTER(FPRegister, s##N, N, \
390  kSRegSizeInBits, CPURegister::kFPRegister); \
391  INITIALIZE_REGISTER(FPRegister, d##N, N, \
392  kDRegSizeInBits, CPURegister::kFPRegister);
394 #undef DEFINE_FPREGISTERS
395 
396 #undef INITIALIZE_REGISTER
397 
398 // Registers aliases.
399 ALIAS_REGISTER(Register, ip0, x16);
400 ALIAS_REGISTER(Register, ip1, x17);
401 ALIAS_REGISTER(Register, wip0, w16);
402 ALIAS_REGISTER(Register, wip1, w17);
403 // Root register.
404 ALIAS_REGISTER(Register, root, x26);
405 ALIAS_REGISTER(Register, rr, x26);
406 // Context pointer register.
407 ALIAS_REGISTER(Register, cp, x27);
408 // We use a register as a JS stack pointer to overcome the restriction on the
409 // architectural SP alignment.
410 // We chose x28 because it is contiguous with the other specific purpose
411 // registers.
412 STATIC_ASSERT(kJSSPCode == 28);
413 ALIAS_REGISTER(Register, jssp, x28);
414 ALIAS_REGISTER(Register, wjssp, w28);
415 ALIAS_REGISTER(Register, fp, x29);
416 ALIAS_REGISTER(Register, lr, x30);
417 ALIAS_REGISTER(Register, xzr, x31);
418 ALIAS_REGISTER(Register, wzr, w31);
419 
420 // Keeps the 0 double value.
421 ALIAS_REGISTER(FPRegister, fp_zero, d15);
422 // Crankshaft double scratch register.
423 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30);
424 // MacroAssembler double scratch register.
425 ALIAS_REGISTER(FPRegister, fp_scratch, d31);
426 
427 #undef ALIAS_REGISTER
428 
429 
430 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
431  Register reg2 = NoReg,
432  Register reg3 = NoReg,
433  Register reg4 = NoReg);
434 
435 
436 // AreAliased returns true if any of the named registers overlap. Arguments set
437 // to NoReg are ignored. The system stack pointer may be specified.
438 bool AreAliased(const CPURegister& reg1,
439  const CPURegister& reg2,
440  const CPURegister& reg3 = NoReg,
441  const CPURegister& reg4 = NoReg,
442  const CPURegister& reg5 = NoReg,
443  const CPURegister& reg6 = NoReg,
444  const CPURegister& reg7 = NoReg,
445  const CPURegister& reg8 = NoReg);
446 
447 // AreSameSizeAndType returns true if all of the specified registers have the
448 // same size, and are of the same type. The system stack pointer may be
449 // specified. Arguments set to NoReg are ignored, as are any subsequent
450 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
451 bool AreSameSizeAndType(const CPURegister& reg1,
452  const CPURegister& reg2,
453  const CPURegister& reg3 = NoCPUReg,
454  const CPURegister& reg4 = NoCPUReg,
455  const CPURegister& reg5 = NoCPUReg,
456  const CPURegister& reg6 = NoCPUReg,
457  const CPURegister& reg7 = NoCPUReg,
458  const CPURegister& reg8 = NoCPUReg);
459 
460 
461 typedef FPRegister DoubleRegister;
462 
463 
464 // -----------------------------------------------------------------------------
465 // Lists of registers.
466 class CPURegList {
467  public:
468  explicit CPURegList(CPURegister reg1,
469  CPURegister reg2 = NoCPUReg,
470  CPURegister reg3 = NoCPUReg,
471  CPURegister reg4 = NoCPUReg)
472  : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
473  size_(reg1.SizeInBits()), type_(reg1.type()) {
474  ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
475  ASSERT(IsValid());
476  }
477 
479  : list_(list), size_(size), type_(type) {
480  ASSERT(IsValid());
481  }
482 
484  unsigned first_reg, unsigned last_reg)
485  : size_(size), type_(type) {
486  ASSERT(((type == CPURegister::kRegister) &&
487  (last_reg < kNumberOfRegisters)) ||
488  ((type == CPURegister::kFPRegister) &&
489  (last_reg < kNumberOfFPRegisters)));
490  ASSERT(last_reg >= first_reg);
491  list_ = (1UL << (last_reg + 1)) - 1;
492  list_ &= ~((1UL << first_reg) - 1);
493  ASSERT(IsValid());
494  }
495 
497  ASSERT(IsValid());
498  return type_;
499  }
500 
501  RegList list() const {
502  ASSERT(IsValid());
503  return list_;
504  }
505 
506  inline void set_list(RegList new_list) {
507  ASSERT(IsValid());
508  list_ = new_list;
509  }
510 
511  // Combine another CPURegList into this one. Registers that already exist in
512  // this list are left unchanged. The type and size of the registers in the
513  // 'other' list must match those in this list.
514  void Combine(const CPURegList& other);
515 
516  // Remove every register in the other CPURegList from this one. Registers that
517  // do not exist in this list are ignored. The type and size of the registers
518  // in the 'other' list must match those in this list.
519  void Remove(const CPURegList& other);
520 
521  // Variants of Combine and Remove which take CPURegisters.
522  void Combine(const CPURegister& other);
523  void Remove(const CPURegister& other1,
524  const CPURegister& other2 = NoCPUReg,
525  const CPURegister& other3 = NoCPUReg,
526  const CPURegister& other4 = NoCPUReg);
527 
528  // Variants of Combine and Remove which take a single register by its code;
529  // the type and size of the register is inferred from this list.
530  void Combine(int code);
531  void Remove(int code);
532 
533  // Remove all callee-saved registers from the list. This can be useful when
534  // preparing registers for an AAPCS64 function call, for example.
535  void RemoveCalleeSaved();
536 
539 
540  // AAPCS64 callee-saved registers.
541  static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
542  static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
543 
544  // AAPCS64 caller-saved registers. Note that this includes lr.
545  static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
546  static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
547 
548  // Registers saved as safepoints.
550 
551  bool IsEmpty() const {
552  ASSERT(IsValid());
553  return list_ == 0;
554  }
555 
556  bool IncludesAliasOf(const CPURegister& other1,
557  const CPURegister& other2 = NoCPUReg,
558  const CPURegister& other3 = NoCPUReg,
559  const CPURegister& other4 = NoCPUReg) const {
560  ASSERT(IsValid());
561  RegList list = 0;
562  if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
563  if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
564  if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
565  if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
566  return (list_ & list) != 0;
567  }
568 
569  int Count() const {
570  ASSERT(IsValid());
571  return CountSetBits(list_, kRegListSizeInBits);
572  }
573 
574  unsigned RegisterSizeInBits() const {
575  ASSERT(IsValid());
576  return size_;
577  }
578 
579  unsigned RegisterSizeInBytes() const {
580  int size_in_bits = RegisterSizeInBits();
581  ASSERT((size_in_bits % kBitsPerByte) == 0);
582  return size_in_bits / kBitsPerByte;
583  }
584 
585  private:
586  RegList list_;
587  unsigned size_;
589 
590  bool IsValid() const {
591  const RegList kValidRegisters = 0x8000000ffffffff;
592  const RegList kValidFPRegisters = 0x0000000ffffffff;
593  switch (type_) {
595  return (list_ & kValidRegisters) == list_;
597  return (list_ & kValidFPRegisters) == list_;
599  return list_ == 0;
600  default:
601  UNREACHABLE();
602  return false;
603  }
604  }
605 };
606 
607 
608 // AAPCS64 callee-saved registers.
609 #define kCalleeSaved CPURegList::GetCalleeSaved()
610 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
611 
612 
613 // AAPCS64 caller-saved registers. Note that this includes lr.
614 #define kCallerSaved CPURegList::GetCallerSaved()
615 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
616 
617 
618 // -----------------------------------------------------------------------------
619 // Operands.
621 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
622 
623 // Represents an operand in a machine instruction.
624 class Operand {
625  // TODO(all): If necessary, study more in details which methods
626  // TODO(all): should be inlined or not.
627  public:
628  // rm, {<shift> {#<shift_amount>}}
629  // where <shift> is one of {LSL, LSR, ASR, ROR}.
630  // <shift_amount> is uint6_t.
631  // This is allowed to be an implicit constructor because Operand is
632  // a wrapper class that doesn't normally perform any type conversion.
633  inline Operand(Register reg,
634  Shift shift = LSL,
635  unsigned shift_amount = 0); // NOLINT(runtime/explicit)
636 
637  // rm, <extend> {#<shift_amount>}
638  // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
639  // <shift_amount> is uint2_t.
640  inline Operand(Register reg,
641  Extend extend,
642  unsigned shift_amount = 0);
643 
644  template<typename T>
645  inline explicit Operand(Handle<T> handle);
646 
647  // Implicit constructor for all int types, ExternalReference, and Smi.
648  template<typename T>
649  inline Operand(T t); // NOLINT(runtime/explicit)
650 
651  // Implicit constructor for int types.
652  template<typename int_t>
653  inline Operand(int_t t, RelocInfo::Mode rmode);
654 
655  inline bool IsImmediate() const;
656  inline bool IsShiftedRegister() const;
657  inline bool IsExtendedRegister() const;
658  inline bool IsZero() const;
659 
660  // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
661  // which helps in the encoding of instructions that use the stack pointer.
662  inline Operand ToExtendedRegister() const;
663 
664  inline int64_t immediate() const;
665  inline Register reg() const;
666  inline Shift shift() const;
667  inline Extend extend() const;
668  inline unsigned shift_amount() const;
669 
670  // Relocation information.
671  RelocInfo::Mode rmode() const { return rmode_; }
672  void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
673  bool NeedsRelocation() const;
674 
675  // Helpers
676  inline static Operand UntagSmi(Register smi);
677  inline static Operand UntagSmiAndScale(Register smi, int scale);
678 
679  private:
680  void initialize_handle(Handle<Object> value);
681  int64_t immediate_;
682  Register reg_;
683  Shift shift_;
684  Extend extend_;
685  unsigned shift_amount_;
686  RelocInfo::Mode rmode_;
687 };
688 
689 
690 // MemOperand represents a memory operand in a load or store instruction.
691 class MemOperand {
692  public:
693  inline explicit MemOperand(Register base,
694  ptrdiff_t offset = 0,
696  inline explicit MemOperand(Register base,
698  Shift shift = LSL,
699  unsigned shift_amount = 0);
700  inline explicit MemOperand(Register base,
701  Register regoffset,
702  Extend extend,
703  unsigned shift_amount = 0);
704  inline explicit MemOperand(Register base,
705  const Operand& offset,
707 
708  const Register& base() const { return base_; }
709  const Register& regoffset() const { return regoffset_; }
710  ptrdiff_t offset() const { return offset_; }
711  AddrMode addrmode() const { return addrmode_; }
712  Shift shift() const { return shift_; }
713  Extend extend() const { return extend_; }
714  unsigned shift_amount() const { return shift_amount_; }
715  inline bool IsImmediateOffset() const;
716  inline bool IsRegisterOffset() const;
717  inline bool IsPreIndex() const;
718  inline bool IsPostIndex() const;
719 
720  // For offset modes, return the offset as an Operand. This helper cannot
721  // handle indexed modes.
722  inline Operand OffsetAsOperand() const;
723 
724  private:
725  Register base_;
726  Register regoffset_;
727  ptrdiff_t offset_;
728  AddrMode addrmode_;
729  Shift shift_;
730  Extend extend_;
731  unsigned shift_amount_;
732 };
733 
734 
735 // -----------------------------------------------------------------------------
736 // Assembler.
737 
738 class Assembler : public AssemblerBase {
739  public:
740  // Create an assembler. Instructions and relocation information are emitted
741  // into a buffer, with the instructions starting from the beginning and the
742  // relocation information starting from the end of the buffer. See CodeDesc
743  // for a detailed comment on the layout (globals.h).
744  //
745  // If the provided buffer is NULL, the assembler allocates and grows its own
746  // buffer, and buffer_size determines the initial buffer size. The buffer is
747  // owned by the assembler and deallocated upon destruction of the assembler.
748  //
749  // If the provided buffer is not NULL, the assembler uses the provided buffer
750  // for code generation and assumes its size to be buffer_size. If the buffer
751  // is too small, a fatal error occurs. No deallocation of the buffer is done
752  // upon destruction of the assembler.
753  Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
754 
755  virtual ~Assembler();
756 
757  virtual void AbortedCodeGeneration() {
758  num_pending_reloc_info_ = 0;
759  }
760 
761  // System functions ---------------------------------------------------------
762  // Start generating code from the beginning of the buffer, discarding any code
763  // and data that has already been emitted into the buffer.
764  //
765  // In order to avoid any accidental transfer of state, Reset ASSERTs that the
766  // constant pool is not blocked.
767  void Reset();
768 
769  // GetCode emits any pending (non-emitted) code and fills the descriptor
770  // desc. GetCode() is idempotent; it returns the same result if no other
771  // Assembler functions are invoked in between GetCode() calls.
772  //
773  // The descriptor (desc) can be NULL. In that case, the code is finalized as
774  // usual, but the descriptor is not populated.
775  void GetCode(CodeDesc* desc);
776 
777  // Insert the smallest number of nop instructions
778  // possible to align the pc offset to a multiple
779  // of m. m must be a power of 2 (>= 4).
780  void Align(int m);
781 
782  inline void Unreachable();
783 
784  // Label --------------------------------------------------------------------
785  // Bind a label to the current pc. Note that labels can only be bound once,
786  // and if labels are linked to other instructions, they _must_ be bound
787  // before they go out of scope.
788  void bind(Label* label);
789 
790 
791  // RelocInfo and pools ------------------------------------------------------
792 
793  // Record relocation information for current pc_.
794  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
795 
796  // Return the address in the constant pool of the code target address used by
797  // the branch/call instruction at pc.
799 
800  // Read/Modify the code target address in the branch/call instruction at pc.
801  inline static Address target_address_at(Address pc,
802  ConstantPoolArray* constant_pool);
803  inline static void set_target_address_at(Address pc,
804  ConstantPoolArray* constant_pool,
805  Address target);
806  static inline Address target_address_at(Address pc, Code* code);
807  static inline void set_target_address_at(Address pc,
808  Code* code,
809  Address target);
810 
811  // Return the code target address at a call site from the return address of
812  // that call in the instruction stream.
814 
815  // Given the address of the beginning of a call, return the address in the
816  // instruction stream that call will return from.
818 
819  // This sets the branch destination (which is in the constant pool on ARM).
820  // This is for calls and branches within generated code.
821  inline static void deserialization_set_special_target_at(
822  Address constant_pool_entry, Code* code, Address target);
823 
824  // All addresses in the constant pool are the same size as pointers.
825  static const int kSpecialTargetSize = kPointerSize;
826 
827  // The sizes of the call sequences emitted by MacroAssembler::Call.
828  // Wherever possible, use MacroAssembler::CallSize instead of these constants,
829  // as it will choose the correct value for a given relocation mode.
830  //
831  // Without relocation:
832  // movz temp, #(target & 0x000000000000ffff)
833  // movk temp, #(target & 0x00000000ffff0000)
834  // movk temp, #(target & 0x0000ffff00000000)
835  // blr temp
836  //
837  // With relocation:
838  // ldr temp, =target
839  // blr temp
842 
843  // Size of the generated code in bytes
844  uint64_t SizeOfGeneratedCode() const {
845  ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
846  return pc_ - buffer_;
847  }
848 
849  // Return the code size generated from label to the current position.
850  uint64_t SizeOfCodeGeneratedSince(const Label* label) {
851  ASSERT(label->is_bound());
852  ASSERT(pc_offset() >= label->pos());
854  return pc_offset() - label->pos();
855  }
856 
857  // Check the size of the code generated since the given label. This function
858  // is used primarily to work around comparisons between signed and unsigned
859  // quantities, since V8 uses both.
860  // TODO(jbramley): Work out what sign to use for these things and if possible,
861  // change things to be consistent.
862  void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
863  ASSERT(size >= 0);
864  ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
865  }
866 
867  // Return the number of instructions generated from label to the
868  // current position.
869  int InstructionsGeneratedSince(const Label* label) {
871  }
872 
873  // Number of instructions generated for the return sequence in
874  // FullCodeGenerator::EmitReturnSequence.
875  static const int kJSRetSequenceInstructions = 7;
876  // Distance between start of patched return sequence and the emitted address
877  // to jump to.
878  static const int kPatchReturnSequenceAddressOffset = 0;
879  static const int kPatchDebugBreakSlotAddressOffset = 0;
880 
881  // Number of instructions necessary to be able to later patch it to a call.
882  // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
883  static const int kDebugBreakSlotInstructions = 4;
884  static const int kDebugBreakSlotLength =
886 
887  static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
888 
889  // Prevent contant pool emission until EndBlockConstPool is called.
890  // Call to this function can be nested but must be followed by an equal
891  // number of call to EndBlockConstpool.
892  void StartBlockConstPool();
893 
894  // Resume constant pool emission. Need to be called as many time as
895  // StartBlockConstPool to have an effect.
896  void EndBlockConstPool();
897 
898  bool is_const_pool_blocked() const;
899  static bool IsConstantPoolAt(Instruction* instr);
900  static int ConstantPoolSizeAt(Instruction* instr);
901  // See Assembler::CheckConstPool for more info.
902  void ConstantPoolMarker(uint32_t size);
903  void EmitPoolGuard();
904  void ConstantPoolGuard();
905 
906  // Prevent veneer pool emission until EndBlockVeneerPool is called.
907  // Call to this function can be nested but must be followed by an equal
908  // number of call to EndBlockConstpool.
909  void StartBlockVeneerPool();
910 
911  // Resume constant pool emission. Need to be called as many time as
912  // StartBlockVeneerPool to have an effect.
913  void EndBlockVeneerPool();
914 
915  bool is_veneer_pool_blocked() const {
916  return veneer_pool_blocked_nesting_ > 0;
917  }
918 
919  // Block/resume emission of constant pools and veneer pools.
923  }
924  void EndBlockPools() {
927  }
928 
929  // Debugging ----------------------------------------------------------------
930  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
931  void RecordComment(const char* msg);
932  int buffer_space() const;
933 
934  // Mark address of the ExitJSFrame code.
935  void RecordJSReturn();
936 
937  // Mark address of a debug break slot.
938  void RecordDebugBreakSlot();
939 
940  // Record the emission of a constant pool.
941  //
942  // The emission of constant and veneer pools depends on the size of the code
943  // generated and the number of RelocInfo recorded.
944  // The Debug mechanism needs to map code offsets between two versions of a
945  // function, compiled with and without debugger support (see for example
946  // Debug::PrepareForBreakPoints()).
947  // Compiling functions with debugger support generates additional code
948  // (Debug::GenerateSlot()). This may affect the emission of the pools and
949  // cause the version of the code with debugger support to have pools generated
950  // in different places.
951  // Recording the position and size of emitted pools allows to correctly
952  // compute the offset mappings between the different versions of a function in
953  // all situations.
954  //
955  // The parameter indicates the size of the pool (in bytes), including
956  // the marker and branch over the data.
957  void RecordConstPool(int size);
958 
959 
960  // Instruction set functions ------------------------------------------------
961 
962  // Branch / Jump instructions.
963  // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
964  // Branch to register.
965  void br(const Register& xn);
966 
967  // Branch-link to register.
968  void blr(const Register& xn);
969 
970  // Branch to register with return hint.
971  void ret(const Register& xn = lr);
972 
973  // Unconditional branch to label.
974  void b(Label* label);
975 
976  // Conditional branch to label.
977  void b(Label* label, Condition cond);
978 
979  // Unconditional branch to PC offset.
980  void b(int imm26);
981 
982  // Conditional branch to PC offset.
983  void b(int imm19, Condition cond);
984 
985  // Branch-link to label / pc offset.
986  void bl(Label* label);
987  void bl(int imm26);
988 
989  // Compare and branch to label / pc offset if zero.
990  void cbz(const Register& rt, Label* label);
991  void cbz(const Register& rt, int imm19);
992 
993  // Compare and branch to label / pc offset if not zero.
994  void cbnz(const Register& rt, Label* label);
995  void cbnz(const Register& rt, int imm19);
996 
997  // Test bit and branch to label / pc offset if zero.
998  void tbz(const Register& rt, unsigned bit_pos, Label* label);
999  void tbz(const Register& rt, unsigned bit_pos, int imm14);
1000 
1001  // Test bit and branch to label / pc offset if not zero.
1002  void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1003  void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1004 
1005  // Address calculation instructions.
1006  // Calculate a PC-relative address. Unlike for branches the offset in adr is
1007  // unscaled (i.e. the result can be unaligned).
1008  void adr(const Register& rd, Label* label);
1009  void adr(const Register& rd, int imm21);
1010 
1011  // Data Processing instructions.
1012  // Add.
1013  void add(const Register& rd,
1014  const Register& rn,
1015  const Operand& operand);
1016 
1017  // Add and update status flags.
1018  void adds(const Register& rd,
1019  const Register& rn,
1020  const Operand& operand);
1021 
1022  // Compare negative.
1023  void cmn(const Register& rn, const Operand& operand);
1024 
1025  // Subtract.
1026  void sub(const Register& rd,
1027  const Register& rn,
1028  const Operand& operand);
1029 
1030  // Subtract and update status flags.
1031  void subs(const Register& rd,
1032  const Register& rn,
1033  const Operand& operand);
1034 
1035  // Compare.
1036  void cmp(const Register& rn, const Operand& operand);
1037 
1038  // Negate.
1039  void neg(const Register& rd,
1040  const Operand& operand);
1041 
1042  // Negate and update status flags.
1043  void negs(const Register& rd,
1044  const Operand& operand);
1045 
1046  // Add with carry bit.
1047  void adc(const Register& rd,
1048  const Register& rn,
1049  const Operand& operand);
1050 
1051  // Add with carry bit and update status flags.
1052  void adcs(const Register& rd,
1053  const Register& rn,
1054  const Operand& operand);
1055 
1056  // Subtract with carry bit.
1057  void sbc(const Register& rd,
1058  const Register& rn,
1059  const Operand& operand);
1060 
1061  // Subtract with carry bit and update status flags.
1062  void sbcs(const Register& rd,
1063  const Register& rn,
1064  const Operand& operand);
1065 
1066  // Negate with carry bit.
1067  void ngc(const Register& rd,
1068  const Operand& operand);
1069 
1070  // Negate with carry bit and update status flags.
1071  void ngcs(const Register& rd,
1072  const Operand& operand);
1073 
1074  // Logical instructions.
1075  // Bitwise and (A & B).
1076  void and_(const Register& rd,
1077  const Register& rn,
1078  const Operand& operand);
1079 
1080  // Bitwise and (A & B) and update status flags.
1081  void ands(const Register& rd,
1082  const Register& rn,
1083  const Operand& operand);
1084 
1085  // Bit test, and set flags.
1086  void tst(const Register& rn, const Operand& operand);
1087 
1088  // Bit clear (A & ~B).
1089  void bic(const Register& rd,
1090  const Register& rn,
1091  const Operand& operand);
1092 
1093  // Bit clear (A & ~B) and update status flags.
1094  void bics(const Register& rd,
1095  const Register& rn,
1096  const Operand& operand);
1097 
1098  // Bitwise or (A | B).
1099  void orr(const Register& rd, const Register& rn, const Operand& operand);
1100 
1101  // Bitwise nor (A | ~B).
1102  void orn(const Register& rd, const Register& rn, const Operand& operand);
1103 
1104  // Bitwise eor/xor (A ^ B).
1105  void eor(const Register& rd, const Register& rn, const Operand& operand);
1106 
1107  // Bitwise enor/xnor (A ^ ~B).
1108  void eon(const Register& rd, const Register& rn, const Operand& operand);
1109 
1110  // Logical shift left variable.
1111  void lslv(const Register& rd, const Register& rn, const Register& rm);
1112 
1113  // Logical shift right variable.
1114  void lsrv(const Register& rd, const Register& rn, const Register& rm);
1115 
1116  // Arithmetic shift right variable.
1117  void asrv(const Register& rd, const Register& rn, const Register& rm);
1118 
1119  // Rotate right variable.
1120  void rorv(const Register& rd, const Register& rn, const Register& rm);
1121 
1122  // Bitfield instructions.
1123  // Bitfield move.
1124  void bfm(const Register& rd,
1125  const Register& rn,
1126  unsigned immr,
1127  unsigned imms);
1128 
1129  // Signed bitfield move.
1130  void sbfm(const Register& rd,
1131  const Register& rn,
1132  unsigned immr,
1133  unsigned imms);
1134 
1135  // Unsigned bitfield move.
1136  void ubfm(const Register& rd,
1137  const Register& rn,
1138  unsigned immr,
1139  unsigned imms);
1140 
1141  // Bfm aliases.
1142  // Bitfield insert.
1143  void bfi(const Register& rd,
1144  const Register& rn,
1145  unsigned lsb,
1146  unsigned width) {
1147  ASSERT(width >= 1);
1148  ASSERT(lsb + width <= rn.SizeInBits());
1149  bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1150  }
1151 
1152  // Bitfield extract and insert low.
1153  void bfxil(const Register& rd,
1154  const Register& rn,
1155  unsigned lsb,
1156  unsigned width) {
1157  ASSERT(width >= 1);
1158  ASSERT(lsb + width <= rn.SizeInBits());
1159  bfm(rd, rn, lsb, lsb + width - 1);
1160  }
1161 
1162  // Sbfm aliases.
1163  // Arithmetic shift right.
1164  void asr(const Register& rd, const Register& rn, unsigned shift) {
1165  ASSERT(shift < rd.SizeInBits());
1166  sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1167  }
1168 
1169  // Signed bitfield insert in zero.
1170  void sbfiz(const Register& rd,
1171  const Register& rn,
1172  unsigned lsb,
1173  unsigned width) {
1174  ASSERT(width >= 1);
1175  ASSERT(lsb + width <= rn.SizeInBits());
1176  sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1177  }
1178 
1179  // Signed bitfield extract.
1180  void sbfx(const Register& rd,
1181  const Register& rn,
1182  unsigned lsb,
1183  unsigned width) {
1184  ASSERT(width >= 1);
1185  ASSERT(lsb + width <= rn.SizeInBits());
1186  sbfm(rd, rn, lsb, lsb + width - 1);
1187  }
1188 
1189  // Signed extend byte.
1190  void sxtb(const Register& rd, const Register& rn) {
1191  sbfm(rd, rn, 0, 7);
1192  }
1193 
1194  // Signed extend halfword.
1195  void sxth(const Register& rd, const Register& rn) {
1196  sbfm(rd, rn, 0, 15);
1197  }
1198 
1199  // Signed extend word.
1200  void sxtw(const Register& rd, const Register& rn) {
1201  sbfm(rd, rn, 0, 31);
1202  }
1203 
1204  // Ubfm aliases.
1205  // Logical shift left.
1206  void lsl(const Register& rd, const Register& rn, unsigned shift) {
1207  unsigned reg_size = rd.SizeInBits();
1208  ASSERT(shift < reg_size);
1209  ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1210  }
1211 
1212  // Logical shift right.
1213  void lsr(const Register& rd, const Register& rn, unsigned shift) {
1214  ASSERT(shift < rd.SizeInBits());
1215  ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1216  }
1217 
1218  // Unsigned bitfield insert in zero.
1219  void ubfiz(const Register& rd,
1220  const Register& rn,
1221  unsigned lsb,
1222  unsigned width) {
1223  ASSERT(width >= 1);
1224  ASSERT(lsb + width <= rn.SizeInBits());
1225  ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1226  }
1227 
1228  // Unsigned bitfield extract.
1229  void ubfx(const Register& rd,
1230  const Register& rn,
1231  unsigned lsb,
1232  unsigned width) {
1233  ASSERT(width >= 1);
1234  ASSERT(lsb + width <= rn.SizeInBits());
1235  ubfm(rd, rn, lsb, lsb + width - 1);
1236  }
1237 
1238  // Unsigned extend byte.
1239  void uxtb(const Register& rd, const Register& rn) {
1240  ubfm(rd, rn, 0, 7);
1241  }
1242 
1243  // Unsigned extend halfword.
1244  void uxth(const Register& rd, const Register& rn) {
1245  ubfm(rd, rn, 0, 15);
1246  }
1247 
1248  // Unsigned extend word.
1249  void uxtw(const Register& rd, const Register& rn) {
1250  ubfm(rd, rn, 0, 31);
1251  }
1252 
1253  // Extract.
1254  void extr(const Register& rd,
1255  const Register& rn,
1256  const Register& rm,
1257  unsigned lsb);
1258 
1259  // Conditional select: rd = cond ? rn : rm.
1260  void csel(const Register& rd,
1261  const Register& rn,
1262  const Register& rm,
1263  Condition cond);
1264 
1265  // Conditional select increment: rd = cond ? rn : rm + 1.
1266  void csinc(const Register& rd,
1267  const Register& rn,
1268  const Register& rm,
1269  Condition cond);
1270 
1271  // Conditional select inversion: rd = cond ? rn : ~rm.
1272  void csinv(const Register& rd,
1273  const Register& rn,
1274  const Register& rm,
1275  Condition cond);
1276 
1277  // Conditional select negation: rd = cond ? rn : -rm.
1278  void csneg(const Register& rd,
1279  const Register& rn,
1280  const Register& rm,
1281  Condition cond);
1282 
1283  // Conditional set: rd = cond ? 1 : 0.
1284  void cset(const Register& rd, Condition cond);
1285 
1286  // Conditional set minus: rd = cond ? -1 : 0.
1287  void csetm(const Register& rd, Condition cond);
1288 
1289  // Conditional increment: rd = cond ? rn + 1 : rn.
1290  void cinc(const Register& rd, const Register& rn, Condition cond);
1291 
1292  // Conditional invert: rd = cond ? ~rn : rn.
1293  void cinv(const Register& rd, const Register& rn, Condition cond);
1294 
1295  // Conditional negate: rd = cond ? -rn : rn.
1296  void cneg(const Register& rd, const Register& rn, Condition cond);
1297 
1298  // Extr aliases.
1299  void ror(const Register& rd, const Register& rs, unsigned shift) {
1300  extr(rd, rs, rs, shift);
1301  }
1302 
1303  // Conditional comparison.
1304  // Conditional compare negative.
1305  void ccmn(const Register& rn,
1306  const Operand& operand,
1307  StatusFlags nzcv,
1308  Condition cond);
1309 
1310  // Conditional compare.
1311  void ccmp(const Register& rn,
1312  const Operand& operand,
1313  StatusFlags nzcv,
1314  Condition cond);
1315 
1316  // Multiplication.
1317  // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1318  void mul(const Register& rd, const Register& rn, const Register& rm);
1319 
1320  // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1321  void madd(const Register& rd,
1322  const Register& rn,
1323  const Register& rm,
1324  const Register& ra);
1325 
1326  // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1327  void mneg(const Register& rd, const Register& rn, const Register& rm);
1328 
1329  // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1330  void msub(const Register& rd,
1331  const Register& rn,
1332  const Register& rm,
1333  const Register& ra);
1334 
1335  // 32 x 32 -> 64-bit multiply.
1336  void smull(const Register& rd, const Register& rn, const Register& rm);
1337 
1338  // Xd = bits<127:64> of Xn * Xm.
1339  void smulh(const Register& rd, const Register& rn, const Register& rm);
1340 
1341  // Signed 32 x 32 -> 64-bit multiply and accumulate.
1342  void smaddl(const Register& rd,
1343  const Register& rn,
1344  const Register& rm,
1345  const Register& ra);
1346 
1347  // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1348  void umaddl(const Register& rd,
1349  const Register& rn,
1350  const Register& rm,
1351  const Register& ra);
1352 
1353  // Signed 32 x 32 -> 64-bit multiply and subtract.
1354  void smsubl(const Register& rd,
1355  const Register& rn,
1356  const Register& rm,
1357  const Register& ra);
1358 
1359  // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1360  void umsubl(const Register& rd,
1361  const Register& rn,
1362  const Register& rm,
1363  const Register& ra);
1364 
1365  // Signed integer divide.
1366  void sdiv(const Register& rd, const Register& rn, const Register& rm);
1367 
1368  // Unsigned integer divide.
1369  void udiv(const Register& rd, const Register& rn, const Register& rm);
1370 
1371  // Bit count, bit reverse and endian reverse.
1372  void rbit(const Register& rd, const Register& rn);
1373  void rev16(const Register& rd, const Register& rn);
1374  void rev32(const Register& rd, const Register& rn);
1375  void rev(const Register& rd, const Register& rn);
1376  void clz(const Register& rd, const Register& rn);
1377  void cls(const Register& rd, const Register& rn);
1378 
1379  // Memory instructions.
1380 
1381  // Load literal from pc + offset_from_pc.
1382  void LoadLiteral(const CPURegister& rt, int offset_from_pc);
1383 
1384  // Load integer or FP register.
1385  void ldr(const CPURegister& rt, const MemOperand& src);
1386 
1387  // Store integer or FP register.
1388  void str(const CPURegister& rt, const MemOperand& dst);
1389 
1390  // Load word with sign extension.
1391  void ldrsw(const Register& rt, const MemOperand& src);
1392 
1393  // Load byte.
1394  void ldrb(const Register& rt, const MemOperand& src);
1395 
1396  // Store byte.
1397  void strb(const Register& rt, const MemOperand& dst);
1398 
1399  // Load byte with sign extension.
1400  void ldrsb(const Register& rt, const MemOperand& src);
1401 
1402  // Load half-word.
1403  void ldrh(const Register& rt, const MemOperand& src);
1404 
1405  // Store half-word.
1406  void strh(const Register& rt, const MemOperand& dst);
1407 
1408  // Load half-word with sign extension.
1409  void ldrsh(const Register& rt, const MemOperand& src);
1410 
1411  // Load integer or FP register pair.
1412  void ldp(const CPURegister& rt, const CPURegister& rt2,
1413  const MemOperand& src);
1414 
1415  // Store integer or FP register pair.
1416  void stp(const CPURegister& rt, const CPURegister& rt2,
1417  const MemOperand& dst);
1418 
1419  // Load word pair with sign extension.
1420  void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1421 
1422  // Load integer or FP register pair, non-temporal.
1423  void ldnp(const CPURegister& rt, const CPURegister& rt2,
1424  const MemOperand& src);
1425 
1426  // Store integer or FP register pair, non-temporal.
1427  void stnp(const CPURegister& rt, const CPURegister& rt2,
1428  const MemOperand& dst);
1429 
1430  // Load literal to register.
1431  void ldr(const Register& rt, uint64_t imm);
1432 
1433  // Load literal to FP register.
1434  void ldr(const FPRegister& ft, double imm);
1435  void ldr(const FPRegister& ft, float imm);
1436 
1437  // Move instructions. The default shift of -1 indicates that the move
1438  // instruction will calculate an appropriate 16-bit immediate and left shift
1439  // that is equal to the 64-bit immediate argument. If an explicit left shift
1440  // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1441  //
1442  // For movk, an explicit shift can be used to indicate which half word should
1443  // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1444  // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1445  // most-significant.
1446 
1447  // Move and keep.
1448  void movk(const Register& rd, uint64_t imm, int shift = -1) {
1449  MoveWide(rd, imm, shift, MOVK);
1450  }
1451 
1452  // Move with non-zero.
1453  void movn(const Register& rd, uint64_t imm, int shift = -1) {
1454  MoveWide(rd, imm, shift, MOVN);
1455  }
1456 
1457  // Move with zero.
1458  void movz(const Register& rd, uint64_t imm, int shift = -1) {
1459  MoveWide(rd, imm, shift, MOVZ);
1460  }
1461 
1462  // Misc instructions.
1463  // Monitor debug-mode breakpoint.
1464  void brk(int code);
1465 
1466  // Halting debug-mode breakpoint.
1467  void hlt(int code);
1468 
1469  // Move register to register.
1470  void mov(const Register& rd, const Register& rn);
1471 
1472  // Move NOT(operand) to register.
1473  void mvn(const Register& rd, const Operand& operand);
1474 
1475  // System instructions.
1476  // Move to register from system register.
1477  void mrs(const Register& rt, SystemRegister sysreg);
1478 
1479  // Move from register to system register.
1480  void msr(SystemRegister sysreg, const Register& rt);
1481 
1482  // System hint.
1483  void hint(SystemHint code);
1484 
1485  // Data memory barrier
1486  void dmb(BarrierDomain domain, BarrierType type);
1487 
1488  // Data synchronization barrier
1489  void dsb(BarrierDomain domain, BarrierType type);
1490 
1491  // Instruction synchronization barrier
1492  void isb();
1493 
1494  // Alias for system instructions.
1495  void nop() { hint(NOP); }
1496 
1497  // Different nop operations are used by the code generator to detect certain
1498  // states of the generated code.
1504  };
1505 
1507  ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1509  }
1510 
1511  // FP instructions.
1512  // Move immediate to FP register.
1513  void fmov(FPRegister fd, double imm);
1514  void fmov(FPRegister fd, float imm);
1515 
1516  // Move FP register to register.
1517  void fmov(Register rd, FPRegister fn);
1518 
1519  // Move register to FP register.
1520  void fmov(FPRegister fd, Register rn);
1521 
1522  // Move FP register to FP register.
1523  void fmov(FPRegister fd, FPRegister fn);
1524 
1525  // FP add.
1526  void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1527 
1528  // FP subtract.
1529  void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1530 
1531  // FP multiply.
1532  void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1533 
1534  // FP fused multiply and add.
1535  void fmadd(const FPRegister& fd,
1536  const FPRegister& fn,
1537  const FPRegister& fm,
1538  const FPRegister& fa);
1539 
1540  // FP fused multiply and subtract.
1541  void fmsub(const FPRegister& fd,
1542  const FPRegister& fn,
1543  const FPRegister& fm,
1544  const FPRegister& fa);
1545 
1546  // FP fused multiply, add and negate.
1547  void fnmadd(const FPRegister& fd,
1548  const FPRegister& fn,
1549  const FPRegister& fm,
1550  const FPRegister& fa);
1551 
1552  // FP fused multiply, subtract and negate.
1553  void fnmsub(const FPRegister& fd,
1554  const FPRegister& fn,
1555  const FPRegister& fm,
1556  const FPRegister& fa);
1557 
1558  // FP divide.
1559  void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1560 
1561  // FP maximum.
1562  void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1563 
1564  // FP minimum.
1565  void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1566 
1567  // FP maximum.
1568  void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1569 
1570  // FP minimum.
1571  void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1572 
1573  // FP absolute.
1574  void fabs(const FPRegister& fd, const FPRegister& fn);
1575 
1576  // FP negate.
1577  void fneg(const FPRegister& fd, const FPRegister& fn);
1578 
1579  // FP square root.
1580  void fsqrt(const FPRegister& fd, const FPRegister& fn);
1581 
1582  // FP round to integer (nearest with ties to away).
1583  void frinta(const FPRegister& fd, const FPRegister& fn);
1584 
1585  // FP round to integer (nearest with ties to even).
1586  void frintn(const FPRegister& fd, const FPRegister& fn);
1587 
1588  // FP round to integer (towards zero.)
1589  void frintz(const FPRegister& fd, const FPRegister& fn);
1590 
1591  // FP compare registers.
1592  void fcmp(const FPRegister& fn, const FPRegister& fm);
1593 
1594  // FP compare immediate.
1595  void fcmp(const FPRegister& fn, double value);
1596 
1597  // FP conditional compare.
1598  void fccmp(const FPRegister& fn,
1599  const FPRegister& fm,
1600  StatusFlags nzcv,
1601  Condition cond);
1602 
1603  // FP conditional select.
1604  void fcsel(const FPRegister& fd,
1605  const FPRegister& fn,
1606  const FPRegister& fm,
1607  Condition cond);
1608 
1609  // Common FP Convert function
1610  void FPConvertToInt(const Register& rd,
1611  const FPRegister& fn,
1612  FPIntegerConvertOp op);
1613 
1614  // FP convert between single and double precision.
1615  void fcvt(const FPRegister& fd, const FPRegister& fn);
1616 
1617  // Convert FP to unsigned integer (nearest with ties to away).
1618  void fcvtau(const Register& rd, const FPRegister& fn);
1619 
1620  // Convert FP to signed integer (nearest with ties to away).
1621  void fcvtas(const Register& rd, const FPRegister& fn);
1622 
1623  // Convert FP to unsigned integer (round towards -infinity).
1624  void fcvtmu(const Register& rd, const FPRegister& fn);
1625 
1626  // Convert FP to signed integer (round towards -infinity).
1627  void fcvtms(const Register& rd, const FPRegister& fn);
1628 
1629  // Convert FP to unsigned integer (nearest with ties to even).
1630  void fcvtnu(const Register& rd, const FPRegister& fn);
1631 
1632  // Convert FP to signed integer (nearest with ties to even).
1633  void fcvtns(const Register& rd, const FPRegister& fn);
1634 
1635  // Convert FP to unsigned integer (round towards zero).
1636  void fcvtzu(const Register& rd, const FPRegister& fn);
1637 
1638  // Convert FP to signed integer (rounf towards zero).
1639  void fcvtzs(const Register& rd, const FPRegister& fn);
1640 
1641  // Convert signed integer or fixed point to FP.
1642  void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1643 
1644  // Convert unsigned integer or fixed point to FP.
1645  void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1646 
1647  // Instruction functions used only for test, debug, and patching.
1648  // Emit raw instructions in the instruction stream.
1649  void dci(Instr raw_inst) { Emit(raw_inst); }
1650 
1651  // Emit 8 bits of data in the instruction stream.
1652  void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1653 
1654  // Emit 32 bits of data in the instruction stream.
1655  void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1656 
1657  // Emit 64 bits of data in the instruction stream.
1658  void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1659 
1660  // Copy a string into the instruction stream, including the terminating NULL
1661  // character. The instruction pointer (pc_) is then aligned correctly for
1662  // subsequent instructions.
1663  void EmitStringData(const char * string) {
1664  size_t len = strlen(string) + 1;
1665  ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
1666  EmitData(string, len);
1667  // Pad with NULL characters until pc_ is aligned.
1668  const char pad[] = {'\0', '\0', '\0', '\0'};
1669  STATIC_ASSERT(sizeof(pad) == kInstructionSize);
1670  byte* next_pc = AlignUp(pc_, kInstructionSize);
1671  EmitData(&pad, next_pc - pc_);
1672  }
1673 
1674  // Pseudo-instructions ------------------------------------------------------
1675 
1676  // Parameters are described in arm64/instructions-arm64.h.
1677  void debug(const char* message, uint32_t code, Instr params = BREAK);
1678 
1679  // Required by V8.
1680  void dd(uint32_t data) { dc32(data); }
1681  void db(uint8_t data) { dc8(data); }
1682 
1683  // Code generation helpers --------------------------------------------------
1684 
1685  unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
1686 
1687  Instruction* InstructionAt(int offset) const {
1688  return reinterpret_cast<Instruction*>(buffer_ + offset);
1689  }
1690 
1691  // Register encoding.
1692  static Instr Rd(CPURegister rd) {
1693  ASSERT(rd.code() != kSPRegInternalCode);
1694  return rd.code() << Rd_offset;
1695  }
1696 
1697  static Instr Rn(CPURegister rn) {
1698  ASSERT(rn.code() != kSPRegInternalCode);
1699  return rn.code() << Rn_offset;
1700  }
1701 
1702  static Instr Rm(CPURegister rm) {
1703  ASSERT(rm.code() != kSPRegInternalCode);
1704  return rm.code() << Rm_offset;
1705  }
1706 
1707  static Instr Ra(CPURegister ra) {
1708  ASSERT(ra.code() != kSPRegInternalCode);
1709  return ra.code() << Ra_offset;
1710  }
1711 
1712  static Instr Rt(CPURegister rt) {
1713  ASSERT(rt.code() != kSPRegInternalCode);
1714  return rt.code() << Rt_offset;
1715  }
1716 
1717  static Instr Rt2(CPURegister rt2) {
1718  ASSERT(rt2.code() != kSPRegInternalCode);
1719  return rt2.code() << Rt2_offset;
1720  }
1721 
1722  // These encoding functions allow the stack pointer to be encoded, and
1723  // disallow the zero register.
1724  static Instr RdSP(Register rd) {
1725  ASSERT(!rd.IsZero());
1726  return (rd.code() & kRegCodeMask) << Rd_offset;
1727  }
1728 
1729  static Instr RnSP(Register rn) {
1730  ASSERT(!rn.IsZero());
1731  return (rn.code() & kRegCodeMask) << Rn_offset;
1732  }
1733 
1734  // Flags encoding.
1735  inline static Instr Flags(FlagsUpdate S);
1736  inline static Instr Cond(Condition cond);
1737 
1738  // PC-relative address encoding.
1739  inline static Instr ImmPCRelAddress(int imm21);
1740 
1741  // Branch encoding.
1742  inline static Instr ImmUncondBranch(int imm26);
1743  inline static Instr ImmCondBranch(int imm19);
1744  inline static Instr ImmCmpBranch(int imm19);
1745  inline static Instr ImmTestBranch(int imm14);
1746  inline static Instr ImmTestBranchBit(unsigned bit_pos);
1747 
1748  // Data Processing encoding.
1749  inline static Instr SF(Register rd);
1750  inline static Instr ImmAddSub(int64_t imm);
1751  inline static Instr ImmS(unsigned imms, unsigned reg_size);
1752  inline static Instr ImmR(unsigned immr, unsigned reg_size);
1753  inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1754  inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1755  inline static Instr ImmLLiteral(int imm19);
1756  inline static Instr BitN(unsigned bitn, unsigned reg_size);
1757  inline static Instr ShiftDP(Shift shift);
1758  inline static Instr ImmDPShift(unsigned amount);
1759  inline static Instr ExtendMode(Extend extend);
1760  inline static Instr ImmExtendShift(unsigned left_shift);
1761  inline static Instr ImmCondCmp(unsigned imm);
1762  inline static Instr Nzcv(StatusFlags nzcv);
1763 
1764  // MemOperand offset encoding.
1765  inline static Instr ImmLSUnsigned(int imm12);
1766  inline static Instr ImmLS(int imm9);
1767  inline static Instr ImmLSPair(int imm7, LSDataSize size);
1768  inline static Instr ImmShiftLS(unsigned shift_amount);
1769  inline static Instr ImmException(int imm16);
1770  inline static Instr ImmSystemRegister(int imm15);
1771  inline static Instr ImmHint(int imm7);
1772  inline static Instr ImmBarrierDomain(int imm2);
1773  inline static Instr ImmBarrierType(int imm2);
1774  inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1775 
1776  // Move immediates encoding.
1777  inline static Instr ImmMoveWide(uint64_t imm);
1778  inline static Instr ShiftMoveWide(int64_t shift);
1779 
1780  // FP Immediates.
1781  static Instr ImmFP32(float imm);
1782  static Instr ImmFP64(double imm);
1783  inline static Instr FPScale(unsigned scale);
1784 
1785  // FP register type.
1786  inline static Instr FPType(FPRegister fd);
1787 
1788  // Class for scoping postponing the constant pool generation.
1789  class BlockConstPoolScope {
1790  public:
1791  explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1792  assem_->StartBlockConstPool();
1793  }
1795  assem_->EndBlockConstPool();
1796  }
1797 
1798  private:
1799  Assembler* assem_;
1800 
1801  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1802  };
1803 
1804  // Check if is time to emit a constant pool.
1805  void CheckConstPool(bool force_emit, bool require_jump);
1806 
1807  // Allocate a constant pool of the correct size for the generated code.
1808  MaybeObject* AllocateConstantPool(Heap* heap);
1809 
1810  // Generate the constant pool for the generated code.
1811  void PopulateConstantPool(ConstantPoolArray* constant_pool);
1812 
1813  // Returns true if we should emit a veneer as soon as possible for a branch
1814  // which can at most reach to specified pc.
1815  bool ShouldEmitVeneer(int max_reachable_pc,
1816  int margin = kVeneerDistanceMargin);
1819  }
1820 
1821  // The maximum code size generated for a veneer. Currently one branch
1822  // instruction. This is for code size checking purposes, and can be extended
1823  // in the future for example if we decide to add nops between the veneers.
1824  static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1825 
1826  void RecordVeneerPool(int location_offset, int size);
1827  // Emits veneers for branches that are approaching their maximum range.
1828  // If need_protection is true, the veneers are protected by a branch jumping
1829  // over the code.
1830  void EmitVeneers(bool force_emit, bool need_protection,
1831  int margin = kVeneerDistanceMargin);
1833  // Checks whether veneers need to be emitted at this point.
1834  // If force_emit is set, a veneer is generated for *all* unresolved branches.
1835  void CheckVeneerPool(bool force_emit, bool require_jump,
1836  int margin = kVeneerDistanceMargin);
1837 
1838 
1840  public:
1841  explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1842  assem_->StartBlockPools();
1843  }
1845  assem_->EndBlockPools();
1846  }
1847 
1848  private:
1849  Assembler* assem_;
1850 
1851  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1852  };
1853 
1854  // Available for constrained code generation scopes. Prefer
1855  // MacroAssembler::Mov() when possible.
1856  inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
1857 
1858  protected:
1859  inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1860 
1861  void LoadStore(const CPURegister& rt,
1862  const MemOperand& addr,
1863  LoadStoreOp op);
1864  static bool IsImmLSUnscaled(ptrdiff_t offset);
1865  static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
1866 
1867  void Logical(const Register& rd,
1868  const Register& rn,
1869  const Operand& operand,
1870  LogicalOp op);
1871  void LogicalImmediate(const Register& rd,
1872  const Register& rn,
1873  unsigned n,
1874  unsigned imm_s,
1875  unsigned imm_r,
1876  LogicalOp op);
1877  static bool IsImmLogical(uint64_t value,
1878  unsigned width,
1879  unsigned* n,
1880  unsigned* imm_s,
1881  unsigned* imm_r);
1882 
1883  void ConditionalCompare(const Register& rn,
1884  const Operand& operand,
1885  StatusFlags nzcv,
1886  Condition cond,
1888  static bool IsImmConditionalCompare(int64_t immediate);
1889 
1890  void AddSubWithCarry(const Register& rd,
1891  const Register& rn,
1892  const Operand& operand,
1893  FlagsUpdate S,
1894  AddSubWithCarryOp op);
1895 
1896  // Functions for emulating operands not directly supported by the instruction
1897  // set.
1898  void EmitShift(const Register& rd,
1899  const Register& rn,
1900  Shift shift,
1901  unsigned amount);
1902  void EmitExtendShift(const Register& rd,
1903  const Register& rn,
1904  Extend extend,
1905  unsigned left_shift);
1906 
1907  void AddSub(const Register& rd,
1908  const Register& rn,
1909  const Operand& operand,
1910  FlagsUpdate S,
1911  AddSubOp op);
1912  static bool IsImmAddSub(int64_t immediate);
1913 
1914  static bool IsImmFP32(float imm);
1915  static bool IsImmFP64(double imm);
1916 
1917  // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1918  // registers. Only simple loads are supported; sign- and zero-extension (such
1919  // as in LDPSW_x or LDRB_w) are not supported.
1920  static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1921  static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1922  const CPURegister& rt2);
1923  static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
1924  static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
1925  const CPURegister& rt2);
1927  const CPURegister& rt, const CPURegister& rt2);
1929  const CPURegister& rt, const CPURegister& rt2);
1930 
1931  // Remove the specified branch from the unbound label link chain.
1932  // If available, a veneer for this label can be used for other branches in the
1933  // chain if the link chain cannot be fixed up without this branch.
1935  Label* label,
1936  Instruction* label_veneer = NULL);
1937 
1938  private:
1939  // Instruction helpers.
1940  void MoveWide(const Register& rd,
1941  uint64_t imm,
1942  int shift,
1943  MoveWideImmediateOp mov_op);
1944  void DataProcShiftedRegister(const Register& rd,
1945  const Register& rn,
1946  const Operand& operand,
1947  FlagsUpdate S,
1948  Instr op);
1949  void DataProcExtendedRegister(const Register& rd,
1950  const Register& rn,
1951  const Operand& operand,
1952  FlagsUpdate S,
1953  Instr op);
1954  void LoadStorePair(const CPURegister& rt,
1955  const CPURegister& rt2,
1956  const MemOperand& addr,
1957  LoadStorePairOp op);
1958  void LoadStorePairNonTemporal(const CPURegister& rt,
1959  const CPURegister& rt2,
1960  const MemOperand& addr,
1962  // Register the relocation information for the operand and load its value
1963  // into rt.
1964  void LoadRelocatedValue(const CPURegister& rt,
1965  const Operand& operand,
1966  LoadLiteralOp op);
1967  void ConditionalSelect(const Register& rd,
1968  const Register& rn,
1969  const Register& rm,
1970  Condition cond,
1971  ConditionalSelectOp op);
1972  void DataProcessing1Source(const Register& rd,
1973  const Register& rn,
1975  void DataProcessing3Source(const Register& rd,
1976  const Register& rn,
1977  const Register& rm,
1978  const Register& ra,
1980  void FPDataProcessing1Source(const FPRegister& fd,
1981  const FPRegister& fn,
1983  void FPDataProcessing2Source(const FPRegister& fd,
1984  const FPRegister& fn,
1985  const FPRegister& fm,
1987  void FPDataProcessing3Source(const FPRegister& fd,
1988  const FPRegister& fn,
1989  const FPRegister& fm,
1990  const FPRegister& fa,
1992 
1993  // Label helpers.
1994 
1995  // Return an offset for a label-referencing instruction, typically a branch.
1996  int LinkAndGetByteOffsetTo(Label* label);
1997 
1998  // This is the same as LinkAndGetByteOffsetTo, but return an offset
1999  // suitable for fields that take instruction offsets.
2000  inline int LinkAndGetInstructionOffsetTo(Label* label);
2001 
2002  static const int kStartOfLabelLinkChain = 0;
2003 
2004  // Verify that a label's link chain is intact.
2005  void CheckLabelLinkChain(Label const * label);
2006 
2007  void RecordLiteral(int64_t imm, unsigned size);
2008 
2009  // Postpone the generation of the constant pool for the specified number of
2010  // instructions.
2011  void BlockConstPoolFor(int instructions);
2012 
2013  // Emit the instruction at pc_.
2014  void Emit(Instr instruction) {
2015  STATIC_ASSERT(sizeof(*pc_) == 1);
2016  STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2017  ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2018 
2019  memcpy(pc_, &instruction, sizeof(instruction));
2020  pc_ += sizeof(instruction);
2021  CheckBuffer();
2022  }
2023 
2024  // Emit data inline in the instruction stream.
2025  void EmitData(void const * data, unsigned size) {
2026  ASSERT(sizeof(*pc_) == 1);
2027  ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
2028 
2029  // TODO(all): Somehow register we have some data here. Then we can
2030  // disassemble it correctly.
2031  memcpy(pc_, data, size);
2032  pc_ += size;
2033  CheckBuffer();
2034  }
2035 
2036  void GrowBuffer();
2037  void CheckBuffer();
2038 
2039  // Pc offset of the next constant pool check.
2040  int next_constant_pool_check_;
2041 
2042  // Constant pool generation
2043  // Pools are emitted in the instruction stream, preferably after unconditional
2044  // jumps or after returns from functions (in dead code locations).
2045  // If a long code sequence does not contain unconditional jumps, it is
2046  // necessary to emit the constant pool before the pool gets too far from the
2047  // location it is accessed from. In this case, we emit a jump over the emitted
2048  // constant pool.
2049  // Constants in the pool may be addresses of functions that gets relocated;
2050  // if so, a relocation info entry is associated to the constant pool entry.
2051 
2052  // Repeated checking whether the constant pool should be emitted is rather
2053  // expensive. By default we only check again once a number of instructions
2054  // has been generated. That also means that the sizing of the buffers is not
2055  // an exact science, and that we rely on some slop to not overrun buffers.
2056  static const int kCheckConstPoolIntervalInst = 128;
2057  static const int kCheckConstPoolInterval =
2058  kCheckConstPoolIntervalInst * kInstructionSize;
2059 
2060  // Constants in pools are accessed via pc relative addressing, which can
2061  // reach +/-4KB thereby defining a maximum distance between the instruction
2062  // and the accessed constant.
2063  static const int kMaxDistToConstPool = 4 * KB;
2064  static const int kMaxNumPendingRelocInfo =
2065  kMaxDistToConstPool / kInstructionSize;
2066 
2067 
2068  // Average distance beetween a constant pool and the first instruction
2069  // accessing the constant pool. Longer distance should result in less I-cache
2070  // pollution.
2071  // In practice the distance will be smaller since constant pool emission is
2072  // forced after function return and sometimes after unconditional branches.
2073  static const int kAvgDistToConstPool =
2074  kMaxDistToConstPool - kCheckConstPoolInterval;
2075 
2076  // Emission of the constant pool may be blocked in some code sequences.
2077  int const_pool_blocked_nesting_; // Block emission if this is not zero.
2078  int no_const_pool_before_; // Block emission before this pc offset.
2079 
2080  // Keep track of the first instruction requiring a constant pool entry
2081  // since the previous constant pool was emitted.
2082  int first_const_pool_use_;
2083 
2084  // Emission of the veneer pools may be blocked in some code sequences.
2085  int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2086 
2087  // Relocation info generation
2088  // Each relocation is encoded as a variable size value
2089  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2090  RelocInfoWriter reloc_info_writer;
2091 
2092  // Relocation info records are also used during code generation as temporary
2093  // containers for constants and code target addresses until they are emitted
2094  // to the constant pool. These pending relocation info records are temporarily
2095  // stored in a separate buffer until a constant pool is emitted.
2096  // If every instruction in a long sequence is accessing the pool, we need one
2097  // pending relocation entry per instruction.
2098 
2099  // the buffer of pending relocation info
2100  RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
2101  // number of pending reloc info entries in the buffer
2102  int num_pending_reloc_info_;
2103 
2104  // Relocation for a type-recording IC has the AST id added to it. This
2105  // member variable is a way to pass the information from the call site to
2106  // the relocation info.
2107  TypeFeedbackId recorded_ast_id_;
2108 
2109  inline TypeFeedbackId RecordedAstId();
2110  inline void ClearRecordedAstId();
2111 
2112  protected:
2113  // Record the AST id of the CallIC being compiled, so that it can be placed
2114  // in the relocation information.
2117  recorded_ast_id_ = ast_id;
2118  }
2119 
2120  // Code generation
2121  // The relocation writer's position is at least kGap bytes below the end of
2122  // the generated instructions. This is so that multi-instruction sequences do
2123  // not have to check for overflow. The same is true for writes of large
2124  // relocation info entries, and debug strings encoded in the instruction
2125  // stream.
2126  static const int kGap = 128;
2127 
2128  public:
2130  public:
2131  FarBranchInfo(int offset, Label* label)
2132  : pc_offset_(offset), label_(label) {}
2133  // Offset of the branch in the code generation buffer.
2135  // The label branched to.
2136  Label* label_;
2137  };
2138 
2139  protected:
2140  // Information about unresolved (forward) branches.
2141  // The Assembler is only allowed to delete out-of-date information from here
2142  // after a label is bound. The MacroAssembler uses this information to
2143  // generate veneers.
2144  //
2145  // The second member gives information about the unresolved branch. The first
2146  // member of the pair is the maximum offset that the branch can reach in the
2147  // buffer. The map is sorted according to this reachable offset, allowing to
2148  // easily check when veneers need to be emitted.
2149  // Note that the maximum reachable offset (first member of the pairs) should
2150  // always be positive but has the same type as the return value for
2151  // pc_offset() for convenience.
2152  std::multimap<int, FarBranchInfo> unresolved_branches_;
2153 
2154  // We generate a veneer for a branch if we reach within this distance of the
2155  // limit of the range.
2156  static const int kVeneerDistanceMargin = 1 * KB;
2157  // The factor of 2 is a finger in the air guess. With a default margin of
2158  // 1KB, that leaves us an addional 256 instructions to avoid generating a
2159  // protective branch.
2160  static const int kVeneerNoProtectionFactor = 2;
2161  static const int kVeneerDistanceCheckMargin =
2164  ASSERT(!unresolved_branches_.empty());
2165  return unresolved_branches_.begin()->first;
2166  }
2167  // This is similar to next_constant_pool_check_ and helps reduce the overhead
2168  // of checking for veneer pools.
2169  // It is maintained to the closest unresolved branch limit minus the maximum
2170  // veneer margin (or kMaxInt if there are no unresolved branches).
2172 
2173  private:
2174  // If a veneer is emitted for a branch instruction, that instruction must be
2175  // removed from the associated label's link chain so that the assembler does
2176  // not later attempt (likely unsuccessfully) to patch it to branch directly to
2177  // the label.
2178  void DeleteUnresolvedBranchInfoForLabel(Label* label);
2179 
2180  private:
2181  PositionsRecorder positions_recorder_;
2182  friend class PositionsRecorder;
2183  friend class EnsureSpace;
2184 };
2185 
2187  public:
2188  // Create an Assembler with a buffer starting at 'start'.
2189  // The buffer size is
2190  // size of instructions to patch + kGap
2191  // Where kGap is the distance from which the Assembler tries to grow the
2192  // buffer.
2193  // If more or fewer instructions than expected are generated or if some
2194  // relocation information takes space in the buffer, the PatchingAssembler
2195  // will crash trying to grow the buffer.
2196  PatchingAssembler(Instruction* start, unsigned count)
2197  : Assembler(NULL,
2198  reinterpret_cast<byte*>(start),
2199  count * kInstructionSize + kGap) {
2200  StartBlockPools();
2201  }
2202 
2203  PatchingAssembler(byte* start, unsigned count)
2204  : Assembler(NULL, start, count * kInstructionSize + kGap) {
2205  // Block constant pool emission.
2206  StartBlockPools();
2207  }
2208 
2210  // Const pool should still be blocked.
2212  EndBlockPools();
2213  // Verify we have generated the number of instruction we expected.
2214  ASSERT((pc_offset() + kGap) == buffer_size_);
2215  // Verify no relocation information has been emitted.
2217  // Flush the Instruction cache.
2218  size_t length = buffer_size_ - kGap;
2219  CPU::FlushICache(buffer_, length);
2220  }
2221 };
2222 
2223 
2224 class EnsureSpace BASE_EMBEDDED {
2225  public:
2226  explicit EnsureSpace(Assembler* assembler) {
2227  assembler->CheckBuffer();
2228  }
2229 };
2230 
2231 } } // namespace v8::internal
2232 
2233 #endif // V8_ARM64_ASSEMBLER_ARM64_H_
void RecordVeneerPool(int location_offset, int size)
byte * Address
Definition: globals.h:186
static Instr ImmPCRelAddress(int imm21)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void cbnz(const Register &rt, Label *label)
void sbfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void lsl(const Register &rd, const Register &rn, unsigned shift)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
const Register & base() const
static FPRegister Create(unsigned code, unsigned size)
static Instr ImmSystemRegister(int imm15)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static const int kDebugBreakSlotInstructions
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
void db(uint8_t data)
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
static const unsigned kAllocatableContext
void uxtb(const Register &rd, const Register &rn)
static Instr Cond(Condition cond)
static const unsigned kAllocatableHighRangeBegin
void LoadRelocated(const CPURegister &rt, const Operand &operand)
static Instr FPScale(unsigned scale)
void ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
Register(const CPURegister &r)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
unsigned shift_amount() const
void sxtw(const Register &rd, const Register &rn)
void set_rmode(RelocInfo::Mode rmode)
static const int kCallSizeWithoutRelocation
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
void mrs(Register dst, SRegister s, Condition cond=al)
static const int kAllocatableRangeGapSize
static const RegList kAllocatableFPRegisters
void sbfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr ImmCmpBranch(int imm19)
const Register cp
static const int kCallSizeWithRelocation
static Instr ImmTestBranch(int imm14)
static bool IsImmLSUnscaled(ptrdiff_t offset)
bool IsExtendedRegister() const
RegisterType type() const
ptrdiff_t offset() const
const DwVfpRegister d31
const unsigned kDRegSizeInBits
void negs(const Register &rd, const Operand &operand)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const int kMaxVeneerCodeSize
const int KB
Definition: globals.h:245
unsigned shift_amount() const
static Register FromAllocationIndex(unsigned index)
static Instr ShiftDP(Shift shift)
void bfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static int NumAllocatableRegisters()
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
static const int kPatchDebugBreakSlotReturnOffset
int SizeOfCodeGeneratedSince(Label *label)
void rev16(const Register &rd, const Register &rn)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static const int kMaxNumAllocatableRegisters
static LSDataSize CalcLSDataSize(LoadStoreOp op)
void adcs(const Register &rd, const Register &rn, const Operand &operand)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static FPRegister SRegFromCode(unsigned code)
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
Operand OffsetAsOperand() const
void dd(uint32_t data)
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
void csetm(const Register &rd, Condition cond)
static const unsigned kAllocatableHighRangeEnd
void bics(const Register &rd, const Register &rn, const Operand &operand)
static int NumAllocatableRegisters()
void b(int branch_offset, Condition cond=al)
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
Definition: flags.cc:665
static Instr ImmLS(int imm9)
void cmn(Register src1, const Operand &src2, Condition cond=al)
uint32_t RegList
Definition: frames.h:41
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
static Instr ImmBarrierType(int imm2)
void clz(Register dst, Register src, Condition cond=al)
static bool IsConstantPoolAt(Instruction *instr)
bool is(const CPURegister &other) const
bool IsNone() const
Definition: utils.h:1150
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
void fcvtns(const Register &rd, const FPRegister &fn)
void orn(const Register &rd, const Register &rn, const Operand &operand)
void frintz(const FPRegister &fd, const FPRegister &fn)
static const int kMaxNumRegisters
void bfi(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void RecordConstPool(int size)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr RdSP(Register rd)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void rbit(const Register &rd, const Register &rn)
const LowDwVfpRegister d15
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
static const int kPatchReturnSequenceAddressOffset
friend class BlockConstPoolScope
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
void SetRecordedAstId(TypeFeedbackId ast_id)
CPURegister PopHighestIndex()
void ands(const Register &rd, const Register &rn, const Operand &operand)
void frintn(const FPRegister &fd, const FPRegister &fn)
unsigned RegisterSizeInBytes() const
static Instr Flags(FlagsUpdate S)
void eon(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmFP32(float imm)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void nop(NopMarkerTypes n)
bool ShouldEmitVeneer(int max_reachable_pc, int margin=kVeneerDistanceMargin)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void fneg(const FPRegister &fd, const FPRegister &fn)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cinv(const Register &rd, const Register &rn, Condition cond)
void RemoveBranchFromLabelLinkChain(Instruction *branch, Label *label, Instruction *label_veneer=NULL)
#define DEFINE_REGISTERS(N)
static Operand UntagSmiAndScale(Register smi, int scale)
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
CPURegister PopLowestIndex()
void strb(Register src, const MemOperand &dst, Condition cond=al)
PatchingAssembler(Instruction *start, unsigned count)
static Instr Rt2(CPURegister rt2)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void udiv(const Register &rd, const Register &rn, const Register &rm)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void asr(const Register &rd, const Register &rn, unsigned shift)
static const int kNumRegisters
void rev32(const Register &rd, const Register &rn)
EnsureSpace(Assembler *assembler)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
const uint64_t kSmiShiftMask
int CountSetBits(uint64_t value, int width)
uint64_t SizeOfCodeGeneratedSince(const Label *label)
static const char * AllocationIndexToString(int index)
uint8_t byte
Definition: globals.h:185
const unsigned kWRegSizeInBits
void ngcs(const Register &rd, const Operand &operand)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
void ret(const Register &xn=lr)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
bool ShouldEmitVeneers(int margin=kVeneerDistanceMargin)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr ImmUncondBranch(int imm26)
static bool IsImmFP64(double imm)
static const unsigned kAllocatableHighRangeEnd
void cneg(const Register &rd, const Register &rn, Condition cond)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
static const int kMaxNumAllocatableRegisters
void sxtb(const Register &rd, const Register &rn)
#define UNREACHABLE()
Definition: checks.h:52
void neg(const Register &rd, const Operand &operand)
static Instr ImmMoveWide(uint64_t imm)
int InstructionsGeneratedSince(const Label *label)
static Instr ImmException(int imm16)
void FPConvertToInt(const Register &rd, const FPRegister &fn, FPIntegerConvertOp op)
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
uint64_t SizeOfGeneratedCode() const
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void uxth(const Register &rd, const Register &rn)
static Instr ImmTestBranchBit(unsigned bit_pos)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void cbz(const Register &rt, Label *label)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
bool IsAllocatable() const
void sbcs(const Register &rd, const Register &rn, const Operand &operand)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void ubfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void smulh(const Register &rd, const Register &rn, const Register &rm)
static const char * AllocationIndexToString(int index)
void br(const Register &xn)
void EmitVeneers(bool force_emit, bool need_protection, int margin=kVeneerDistanceMargin)
static const int kSpecialTargetSize
AddrMode addrmode() const
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void rev(const Register &rd, const Register &rn)
void GetCode(CodeDesc *desc)
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
ALIAS_REGISTER(Register, ip0, x16)
static bool IsImmAddSub(int64_t immediate)
const int kPointerSize
Definition: globals.h:268
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
PatchingAssembler(byte *start, unsigned count)
static const int kVeneerDistanceMargin
const Register & regoffset() const
const unsigned kInstructionSize
static bool IsImmConditionalCompare(int64_t immediate)
void dc64(uint64_t data)
void fmov(FPRegister fd, double imm)
static FPRegister FromAllocationIndex(unsigned int index)
static Instr ImmExtendShift(unsigned left_shift)
FPRegister(const CPURegister &r)
bool IsSameSizeAndType(const CPURegister &other) const
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
Instruction * InstructionAt(int offset) const
static Register WRegFromCode(unsigned code)
bool is_veneer_pool_blocked() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
FPRegister(const FPRegister &r)
void mneg(const Register &rd, const Register &rn, const Register &rm)
const Register pc
static const int kJSRetSequenceInstructions
static Instr Rd(CPURegister rd)
void blr(const Register &xn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
const unsigned kSPRegInternalCode
void ror(const Register &rd, const Register &rs, unsigned shift)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Register from_code(int code)
static Instr Rn(CPURegister rn)
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
#define REGISTER_CODE_LIST(R)
static const int kVeneerNoProtectionFactor
void adds(const Register &rd, const Register &rn, const Operand &operand)
void ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
MaybeObject * AllocateConstantPool(Heap *heap)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
void fcvtau(const Register &rd, const FPRegister &fn)
void CheckConstPool(bool force_emit, bool require_jump)
T AlignUp(T pointer, size_t alignment)
Definition: utils.h:153
const unsigned kNumberOfFPRegisters
const int kBitsPerByte
Definition: globals.h:287
const int kSmiShift
void movz(const Register &rd, uint64_t imm, int shift=-1)
static const unsigned kAllocatableHighRangeBegin
static Address target_pointer_address_at(Address pc)
void ldrsw(const Register &rt, const MemOperand &src)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void fcvtnu(const Register &rd, const FPRegister &fn)
void sxth(const Register &rd, const Register &rn)
#define BASE_EMBEDDED
Definition: allocation.h:68
void fcvtzu(const Register &rd, const FPRegister &fn)
void ngc(const Register &rd, const Operand &operand)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cinc(const Register &rd, const Register &rn, Condition cond)
bool IncludesAliasOf(const CPURegister &other1, const CPURegister &other2=NoCPUReg, const CPURegister &other3=NoCPUReg, const CPURegister &other4=NoCPUReg) const
FarBranchInfo(int offset, Label *label)
static const int kDebugBreakSlotLength
void set_list(RegList new_list)
static Instr Ra(CPURegister ra)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
#define T(name, string, precedence)
Definition: token.cc:48
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void dci(Instr raw_inst)
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
bool NeedsRelocation() const
void ldr(Register dst, const MemOperand &src, Condition cond=al)
unsigned RegisterSizeInBits() const
const Register lr
static int ToAllocationIndex(FPRegister reg)
const unsigned kNumberOfRegisters
static int NumAllocatableRegisters()
const unsigned kRegCodeMask
void Combine(const CPURegList &other)
void ConstantPoolMarker(uint32_t size)
static bool IsImmFP32(float imm)
void dc32(uint32_t data)
static Instr Rt(CPURegister rt)
void cset(const Register &rd, Condition cond)
static Instr ImmLLiteral(int imm19)
unsigned num_pending_reloc_info() const
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
int unresolved_branches_first_limit() const
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static Instr Rm(CPURegister rm)
static const unsigned kAllocatableLowRangeEnd
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
void dmb(BarrierDomain domain, BarrierType type)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
CPURegList(CPURegister::RegisterType type, unsigned size, unsigned first_reg, unsigned last_reg)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void hint(SystemHint code)
static FPRegister DRegFromCode(unsigned code)
void fsqrt(const FPRegister &fd, const FPRegister &fn)
void fcvtas(const Register &rd, const FPRegister &fn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
static Instr ImmBarrierDomain(int imm2)
static Address target_address_from_return_address(Address pc)
bool is_const_pool_blocked() const
friend class PositionsRecorder
static const unsigned kAllocatableLowRangeBegin
const int kSmiShiftSize
Definition: v8.h:5539
void EmitStringData(const char *string)
static Instr ImmLSPair(int imm7, LSDataSize size)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Register XRegFromCode(unsigned code)
const int kSmiTagSize
Definition: v8.h:5479
std::multimap< int, FarBranchInfo > unresolved_branches_
void dsb(BarrierDomain domain, BarrierType type)
CPURegister::RegisterType type() const
static Instr ImmHint(int imm7)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Remove(const CPURegList &other)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void fcvtmu(const Register &rd, const FPRegister &fn)
void dc8(uint8_t data)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr ImmR(unsigned immr, unsigned reg_size)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Address return_address_from_call_start(Address pc)
const DwVfpRegister d30
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
void cls(const Register &rd, const Register &rn)
PositionsRecorder * positions_recorder()
void movn(const Register &rd, uint64_t imm, int shift=-1)
static Instr ImmS(unsigned imms, unsigned reg_size)
#define DEFINE_FPREGISTERS(N)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static const unsigned kAllocatableLowRangeBegin
void AssertSizeOfCodeGeneratedSince(const Label *label, ptrdiff_t size)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
static Instr ImmDPShift(unsigned amount)
void ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr RnSP(Register rn)
virtual void AbortedCodeGeneration()
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
static Instr ImmFP64(double imm)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
const Register no_reg
const unsigned kJSSPCode
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
static int ToAllocationIndex(Register reg)
bool Is(const CPURegister &other) const
Register(const Register &r)
void rorv(const Register &rd, const Register &rn, const Register &rm)
static const unsigned kAllocatableLowRangeEnd
void fcvt(const FPRegister &fd, const FPRegister &fn)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Register fp
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
void uxtw(const Register &rd, const Register &rn)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void adr(const Register &rd, Label *label)
static Instr BitN(unsigned bitn, unsigned reg_size)
static Operand UntagSmi(Register smi)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
int64_t immediate() const
static const int kAllocatableRangeGapSize
static Instr FPType(FPRegister fd)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
static CPURegList GetSafepointSavedRegisters()
void frinta(const FPRegister &fd, const FPRegister &fn)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
static const int kPatchDebugBreakSlotAddressOffset
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
Operand ToExtendedRegister() const
void fcvtms(const Register &rd, const FPRegister &fn)
TypeFeedbackId recorded_ast_id_
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static int ConstantPoolSizeAt(Instruction *instr)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void tst(Register src1, const Operand &src2, Condition cond=al)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
RelocInfo::Mode rmode() const
static FPRegister from_code(int code)
void subs(const Register &rd, const Register &rn, const Operand &operand)
INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister)
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)