v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm64-inl.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
29 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
30 
31 #include "arm64/assembler-arm64.h"
32 #include "cpu.h"
33 #include "debug.h"
34 
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 void RelocInfo::apply(intptr_t delta) {
41  UNIMPLEMENTED();
42 }
43 
44 
45 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
46  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
47  Assembler::set_target_address_at(pc_, host_, target);
48  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
49  Object* target_code = Code::GetCodeFromTargetAddress(target);
50  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
51  host(), this, HeapObject::cast(target_code));
52  }
53 }
54 
55 
56 inline unsigned CPURegister::code() const {
57  ASSERT(IsValid());
58  return reg_code;
59 }
60 
61 
64  return reg_type;
65 }
66 
67 
68 inline RegList CPURegister::Bit() const {
69  ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
70  return IsValid() ? 1UL << reg_code : 0;
71 }
72 
73 
74 inline unsigned CPURegister::SizeInBits() const {
75  ASSERT(IsValid());
76  return reg_size;
77 }
78 
79 
80 inline int CPURegister::SizeInBytes() const {
81  ASSERT(IsValid());
82  ASSERT(SizeInBits() % 8 == 0);
83  return reg_size / 8;
84 }
85 
86 
87 inline bool CPURegister::Is32Bits() const {
88  ASSERT(IsValid());
89  return reg_size == 32;
90 }
91 
92 
93 inline bool CPURegister::Is64Bits() const {
94  ASSERT(IsValid());
95  return reg_size == 64;
96 }
97 
98 
99 inline bool CPURegister::IsValid() const {
100  if (IsValidRegister() || IsValidFPRegister()) {
101  ASSERT(!IsNone());
102  return true;
103  } else {
104  ASSERT(IsNone());
105  return false;
106  }
107 }
108 
109 
110 inline bool CPURegister::IsValidRegister() const {
111  return IsRegister() &&
114 }
115 
116 
117 inline bool CPURegister::IsValidFPRegister() const {
118  return IsFPRegister() &&
121 }
122 
123 
124 inline bool CPURegister::IsNone() const {
125  // kNoRegister types should always have size 0 and code 0.
126  ASSERT((reg_type != kNoRegister) || (reg_code == 0));
127  ASSERT((reg_type != kNoRegister) || (reg_size == 0));
128 
129  return reg_type == kNoRegister;
130 }
131 
132 
133 inline bool CPURegister::Is(const CPURegister& other) const {
134  ASSERT(IsValidOrNone() && other.IsValidOrNone());
135  return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
136  (reg_type == other.reg_type);
137 }
138 
139 
140 inline bool CPURegister::IsRegister() const {
141  return reg_type == kRegister;
142 }
143 
144 
145 inline bool CPURegister::IsFPRegister() const {
146  return reg_type == kFPRegister;
147 }
148 
149 
150 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
151  return (reg_size == other.reg_size) && (reg_type == other.reg_type);
152 }
153 
154 
155 inline bool CPURegister::IsValidOrNone() const {
156  return IsValid() || IsNone();
157 }
158 
159 
160 inline bool CPURegister::IsZero() const {
161  ASSERT(IsValid());
162  return IsRegister() && (reg_code == kZeroRegCode);
163 }
164 
165 
166 inline bool CPURegister::IsSP() const {
167  ASSERT(IsValid());
168  return IsRegister() && (reg_code == kSPRegInternalCode);
169 }
170 
171 
172 inline void CPURegList::Combine(const CPURegList& other) {
173  ASSERT(IsValid());
174  ASSERT(other.type() == type_);
175  ASSERT(other.RegisterSizeInBits() == size_);
176  list_ |= other.list();
177 }
178 
179 
180 inline void CPURegList::Remove(const CPURegList& other) {
181  ASSERT(IsValid());
182  if (other.type() == type_) {
183  list_ &= ~other.list();
184  }
185 }
186 
187 
188 inline void CPURegList::Combine(const CPURegister& other) {
189  ASSERT(other.type() == type_);
190  ASSERT(other.SizeInBits() == size_);
191  Combine(other.code());
192 }
193 
194 
195 inline void CPURegList::Remove(const CPURegister& other1,
196  const CPURegister& other2,
197  const CPURegister& other3,
198  const CPURegister& other4) {
199  if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
200  if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
201  if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
202  if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
203 }
204 
205 
206 inline void CPURegList::Combine(int code) {
207  ASSERT(IsValid());
208  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
209  list_ |= (1UL << code);
210 }
211 
212 
213 inline void CPURegList::Remove(int code) {
214  ASSERT(IsValid());
215  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
216  list_ &= ~(1UL << code);
217 }
218 
219 
221  // This function returns the zero register when code = 31. The stack pointer
222  // can not be returned.
223  ASSERT(code < kNumberOfRegisters);
224  return Register::Create(code, kXRegSizeInBits);
225 }
226 
227 
229  ASSERT(code < kNumberOfRegisters);
230  return Register::Create(code, kWRegSizeInBits);
231 }
232 
233 
236  return FPRegister::Create(code, kSRegSizeInBits);
237 }
238 
239 
242  return FPRegister::Create(code, kDRegSizeInBits);
243 }
244 
245 
246 inline Register CPURegister::W() const {
249 }
250 
251 
252 inline Register CPURegister::X() const {
255 }
256 
257 
258 inline FPRegister CPURegister::S() const {
261 }
262 
263 
264 inline FPRegister CPURegister::D() const {
267 }
268 
269 
270 // Operand.
271 template<typename T>
272 Operand::Operand(Handle<T> value) : reg_(NoReg) {
273  initialize_handle(value);
274 }
275 
276 
277 // Default initializer is for int types
278 template<typename int_t>
280  static const bool kIsIntType = true;
281  static inline RelocInfo::Mode rmode_for(int_t) {
282  return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
283  }
284  static inline int64_t immediate_for(int_t t) {
285  STATIC_ASSERT(sizeof(int_t) <= 8);
286  return t;
287  }
288 };
289 
290 
291 template<>
293  static const bool kIsIntType = false;
294  static inline RelocInfo::Mode rmode_for(Smi* t) {
295  return RelocInfo::NONE64;
296  }
297  static inline int64_t immediate_for(Smi* t) {;
298  return reinterpret_cast<int64_t>(t);
299  }
300 };
301 
302 
303 template<>
304 struct OperandInitializer<ExternalReference> {
305  static const bool kIsIntType = false;
306  static inline RelocInfo::Mode rmode_for(ExternalReference t) {
307  return RelocInfo::EXTERNAL_REFERENCE;
308  }
309  static inline int64_t immediate_for(ExternalReference t) {;
310  return reinterpret_cast<int64_t>(t.address());
311  }
312 };
313 
314 
315 template<typename T>
317  : immediate_(OperandInitializer<T>::immediate_for(t)),
318  reg_(NoReg),
319  rmode_(OperandInitializer<T>::rmode_for(t)) {}
320 
321 
322 template<typename T>
323 Operand::Operand(T t, RelocInfo::Mode rmode)
324  : immediate_(OperandInitializer<T>::immediate_for(t)),
325  reg_(NoReg),
326  rmode_(rmode) {
328 }
329 
330 
331 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
332  : reg_(reg),
333  shift_(shift),
334  extend_(NO_EXTEND),
335  shift_amount_(shift_amount),
336  rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
337  ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
338  ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
339  ASSERT(!reg.IsSP());
340 }
341 
342 
343 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
344  : reg_(reg),
345  shift_(NO_SHIFT),
346  extend_(extend),
347  shift_amount_(shift_amount),
348  rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
349  ASSERT(reg.IsValid());
350  ASSERT(shift_amount <= 4);
351  ASSERT(!reg.IsSP());
352 
353  // Extend modes SXTX and UXTX require a 64-bit register.
354  ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
355 }
356 
357 
358 bool Operand::IsImmediate() const {
359  return reg_.Is(NoReg);
360 }
361 
362 
364  return reg_.IsValid() && (shift_ != NO_SHIFT);
365 }
366 
367 
369  return reg_.IsValid() && (extend_ != NO_EXTEND);
370 }
371 
372 
373 bool Operand::IsZero() const {
374  if (IsImmediate()) {
375  return immediate() == 0;
376  } else {
377  return reg().IsZero();
378  }
379 }
380 
381 
384  ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
385  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
386 }
387 
388 
389 int64_t Operand::immediate() const {
390  ASSERT(IsImmediate());
391  return immediate_;
392 }
393 
394 
397  return reg_;
398 }
399 
400 
403  return shift_;
404 }
405 
406 
409  return extend_;
410 }
411 
412 
413 unsigned Operand::shift_amount() const {
415  return shift_amount_;
416 }
417 
418 
420  ASSERT(smi.Is64Bits());
421  return Operand(smi, ASR, kSmiShift);
422 }
423 
424 
426  ASSERT(smi.Is64Bits());
427  ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
428  if (scale > kSmiShift) {
429  return Operand(smi, LSL, scale - kSmiShift);
430  } else if (scale < kSmiShift) {
431  return Operand(smi, ASR, kSmiShift - scale);
432  }
433  return Operand(smi);
434 }
435 
436 
437 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
438  : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
439  shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
440  ASSERT(base.Is64Bits() && !base.IsZero());
441 }
442 
443 
445  Register regoffset,
446  Extend extend,
447  unsigned shift_amount)
448  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
449  shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
450  ASSERT(base.Is64Bits() && !base.IsZero());
451  ASSERT(!regoffset.IsSP());
452  ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
453 
454  // SXTX extend mode requires a 64-bit offset register.
455  ASSERT(regoffset.Is64Bits() || (extend != SXTX));
456 }
457 
458 
460  Register regoffset,
461  Shift shift,
462  unsigned shift_amount)
463  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
464  shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
465  ASSERT(base.Is64Bits() && !base.IsZero());
466  ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
467  ASSERT(shift == LSL);
468 }
469 
470 
471 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
472  : base_(base), addrmode_(addrmode) {
473  ASSERT(base.Is64Bits() && !base.IsZero());
474 
475  if (offset.IsImmediate()) {
476  offset_ = offset.immediate();
477 
478  regoffset_ = NoReg;
479  } else if (offset.IsShiftedRegister()) {
480  ASSERT(addrmode == Offset);
481 
482  regoffset_ = offset.reg();
483  shift_= offset.shift();
484  shift_amount_ = offset.shift_amount();
485 
486  extend_ = NO_EXTEND;
487  offset_ = 0;
488 
489  // These assertions match those in the shifted-register constructor.
490  ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
491  ASSERT(shift_ == LSL);
492  } else {
493  ASSERT(offset.IsExtendedRegister());
494  ASSERT(addrmode == Offset);
495 
496  regoffset_ = offset.reg();
497  extend_ = offset.extend();
498  shift_amount_ = offset.shift_amount();
499 
500  shift_= NO_SHIFT;
501  offset_ = 0;
502 
503  // These assertions match those in the extended-register constructor.
504  ASSERT(!regoffset_.IsSP());
505  ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
506  ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
507  }
508 }
509 
511  return (addrmode_ == Offset) && regoffset_.Is(NoReg);
512 }
513 
514 
516  return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
517 }
518 
519 
521  return addrmode_ == PreIndex;
522 }
523 
524 
526  return addrmode_ == PostIndex;
527 }
528 
530  if (IsImmediateOffset()) {
531  return offset();
532  } else {
534  if (extend() == NO_EXTEND) {
535  return Operand(regoffset(), shift(), shift_amount());
536  } else {
537  return Operand(regoffset(), extend(), shift_amount());
538  }
539  }
540 }
541 
542 
544 #ifdef USE_SIMULATOR
545  debug("UNREACHABLE", __LINE__, BREAK);
546 #else
547  // Crash by branching to 0. lr now points near the fault.
548  Emit(BLR | Rn(xzr));
549 #endif
550 }
551 
552 
554  Instruction* instr = reinterpret_cast<Instruction*>(pc);
555  ASSERT(instr->IsLdrLiteralX());
556  return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
557 }
558 
559 
560 // Read/Modify the code target address in the branch/call instruction at pc.
562  ConstantPoolArray* constant_pool) {
564 }
565 
566 
568  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
569  return target_address_at(pc, constant_pool);
570 }
571 
572 
574  // Returns the address of the call target from the return address that will
575  // be returned to after a call.
576  // Call sequence on ARM64 is:
577  // ldr ip0, #... @ load from literal pool
578  // blr ip0
579  Address candidate = pc - 2 * kInstructionSize;
580  Instruction* instr = reinterpret_cast<Instruction*>(candidate);
581  USE(instr);
582  ASSERT(instr->IsLdrLiteralX());
583  return candidate;
584 }
585 
586 
588  // The call, generated by MacroAssembler::Call, is one of two possible
589  // sequences:
590  //
591  // Without relocation:
592  // movz temp, #(target & 0x000000000000ffff)
593  // movk temp, #(target & 0x00000000ffff0000)
594  // movk temp, #(target & 0x0000ffff00000000)
595  // blr temp
596  //
597  // With relocation:
598  // ldr temp, =target
599  // blr temp
600  //
601  // The return address is immediately after the blr instruction in both cases,
602  // so it can be found by adding the call size to the address at the start of
603  // the call sequence.
604  STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
605  STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
606 
607  Instruction* instr = reinterpret_cast<Instruction*>(pc);
608  if (instr->IsMovz()) {
609  // Verify the instruction sequence.
610  ASSERT(instr->following(1)->IsMovk());
611  ASSERT(instr->following(2)->IsMovk());
612  ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
614  } else {
615  // Verify the instruction sequence.
616  ASSERT(instr->IsLdrLiteralX());
617  ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
619  }
620 }
621 
622 
624  Address constant_pool_entry, Code* code, Address target) {
625  Memory::Address_at(constant_pool_entry) = target;
626 }
627 
628 
630  ConstantPoolArray* constant_pool,
631  Address target) {
633  // Intuitively, we would think it is necessary to always flush the
634  // instruction cache after patching a target address in the code as follows:
635  // CPU::FlushICache(pc, sizeof(target));
636  // However, on ARM, an instruction is actually patched in the case of
637  // embedded constants of the form:
638  // ldr ip, [pc, #...]
639  // since the instruction accessing this address in the constant pool remains
640  // unchanged, a flush is not required.
641 }
642 
643 
645  Code* code,
646  Address target) {
647  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
648  set_target_address_at(pc, constant_pool, target);
649 }
650 
651 
652 int RelocInfo::target_address_size() {
653  return kPointerSize;
654 }
655 
656 
657 Address RelocInfo::target_address() {
658  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
659  return Assembler::target_address_at(pc_, host_);
660 }
661 
662 
663 Address RelocInfo::target_address_address() {
664  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
665  || rmode_ == EMBEDDED_OBJECT
666  || rmode_ == EXTERNAL_REFERENCE);
668 }
669 
670 
671 Address RelocInfo::constant_pool_entry_address() {
672  ASSERT(IsInConstantPool());
674 }
675 
676 
677 Object* RelocInfo::target_object() {
678  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
679  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
680 }
681 
682 
683 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
684  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
685  return Handle<Object>(reinterpret_cast<Object**>(
686  Assembler::target_address_at(pc_, host_)));
687 }
688 
689 
690 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
691  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
692  ASSERT(!target->IsConsString());
694  reinterpret_cast<Address>(target));
695  if (mode == UPDATE_WRITE_BARRIER &&
696  host() != NULL &&
697  target->IsHeapObject()) {
698  host()->GetHeap()->incremental_marking()->RecordWrite(
699  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
700  }
701 }
702 
703 
704 Address RelocInfo::target_reference() {
705  ASSERT(rmode_ == EXTERNAL_REFERENCE);
706  return Assembler::target_address_at(pc_, host_);
707 }
708 
709 
710 Address RelocInfo::target_runtime_entry(Assembler* origin) {
711  ASSERT(IsRuntimeEntry(rmode_));
712  return target_address();
713 }
714 
715 
716 void RelocInfo::set_target_runtime_entry(Address target,
717  WriteBarrierMode mode) {
718  ASSERT(IsRuntimeEntry(rmode_));
719  if (target_address() != target) set_target_address(target, mode);
720 }
721 
722 
723 Handle<Cell> RelocInfo::target_cell_handle() {
724  UNIMPLEMENTED();
725  Cell *null_cell = NULL;
726  return Handle<Cell>(null_cell);
727 }
728 
729 
730 Cell* RelocInfo::target_cell() {
731  ASSERT(rmode_ == RelocInfo::CELL);
733 }
734 
735 
736 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
737  UNIMPLEMENTED();
738 }
739 
740 
741 static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
742 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
743 
744 
745 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
746  UNREACHABLE(); // This should never be reached on ARM64.
747  return Handle<Object>();
748 }
749 
750 
751 Code* RelocInfo::code_age_stub() {
752  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
754  // Read the stub entry point from the code age sequence.
755  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
756  return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
757 }
758 
759 
760 void RelocInfo::set_code_age_stub(Code* stub) {
761  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
763  // Overwrite the stub entry point in the code age sequence. This is loaded as
764  // a literal so there is no need to call FlushICache here.
765  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
766  Memory::Address_at(stub_entry_address) = stub->instruction_start();
767 }
768 
769 
770 Address RelocInfo::call_address() {
771  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
772  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
773  // For the above sequences the Relocinfo points to the load literal loading
774  // the call address.
775  return Assembler::target_address_at(pc_, host_);
776 }
777 
778 
779 void RelocInfo::set_call_address(Address target) {
780  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
781  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
782  Assembler::set_target_address_at(pc_, host_, target);
783  if (host() != NULL) {
784  Object* target_code = Code::GetCodeFromTargetAddress(target);
785  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
786  host(), this, HeapObject::cast(target_code));
787  }
788 }
789 
790 
791 void RelocInfo::WipeOut() {
792  ASSERT(IsEmbeddedObject(rmode_) ||
793  IsCodeTarget(rmode_) ||
794  IsRuntimeEntry(rmode_) ||
795  IsExternalReference(rmode_));
797 }
798 
799 
800 bool RelocInfo::IsPatchedReturnSequence() {
801  // The sequence must be:
802  // ldr ip0, [pc, #offset]
803  // blr ip0
804  // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
805  Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
806  Instruction* i2 = i1->following();
807  return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
808  i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
809 }
810 
811 
812 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
813  Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
814  return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
815 }
816 
817 
818 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
819  RelocInfo::Mode mode = rmode();
820  if (mode == RelocInfo::EMBEDDED_OBJECT) {
821  visitor->VisitEmbeddedPointer(this);
822  } else if (RelocInfo::IsCodeTarget(mode)) {
823  visitor->VisitCodeTarget(this);
824  } else if (mode == RelocInfo::CELL) {
825  visitor->VisitCell(this);
826  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
827  visitor->VisitExternalReference(this);
828 #ifdef ENABLE_DEBUGGER_SUPPORT
829  } else if (((RelocInfo::IsJSReturn(mode) &&
830  IsPatchedReturnSequence()) ||
831  (RelocInfo::IsDebugBreakSlot(mode) &&
832  IsPatchedDebugBreakSlotSequence())) &&
833  isolate->debug()->has_break_points()) {
834  visitor->VisitDebugTarget(this);
835 #endif
836  } else if (RelocInfo::IsRuntimeEntry(mode)) {
837  visitor->VisitRuntimeEntry(this);
838  }
839 }
840 
841 
842 template<typename StaticVisitor>
843 void RelocInfo::Visit(Heap* heap) {
844  RelocInfo::Mode mode = rmode();
845  if (mode == RelocInfo::EMBEDDED_OBJECT) {
846  StaticVisitor::VisitEmbeddedPointer(heap, this);
847  } else if (RelocInfo::IsCodeTarget(mode)) {
848  StaticVisitor::VisitCodeTarget(heap, this);
849  } else if (mode == RelocInfo::CELL) {
850  StaticVisitor::VisitCell(heap, this);
851  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
852  StaticVisitor::VisitExternalReference(this);
853 #ifdef ENABLE_DEBUGGER_SUPPORT
854  } else if (heap->isolate()->debug()->has_break_points() &&
855  ((RelocInfo::IsJSReturn(mode) &&
856  IsPatchedReturnSequence()) ||
857  (RelocInfo::IsDebugBreakSlot(mode) &&
858  IsPatchedDebugBreakSlotSequence()))) {
859  StaticVisitor::VisitDebugTarget(heap, this);
860 #endif
861  } else if (RelocInfo::IsRuntimeEntry(mode)) {
862  StaticVisitor::VisitRuntimeEntry(this);
863  }
864 }
865 
866 
868  ASSERT(rt.IsValid());
869  if (rt.IsRegister()) {
870  return rt.Is64Bits() ? LDR_x : LDR_w;
871  } else {
872  ASSERT(rt.IsFPRegister());
873  return rt.Is64Bits() ? LDR_d : LDR_s;
874  }
875 }
876 
877 
879  const CPURegister& rt2) {
880  ASSERT(AreSameSizeAndType(rt, rt2));
881  USE(rt2);
882  if (rt.IsRegister()) {
883  return rt.Is64Bits() ? LDP_x : LDP_w;
884  } else {
885  ASSERT(rt.IsFPRegister());
886  return rt.Is64Bits() ? LDP_d : LDP_s;
887  }
888 }
889 
890 
892  ASSERT(rt.IsValid());
893  if (rt.IsRegister()) {
894  return rt.Is64Bits() ? STR_x : STR_w;
895  } else {
896  ASSERT(rt.IsFPRegister());
897  return rt.Is64Bits() ? STR_d : STR_s;
898  }
899 }
900 
901 
903  const CPURegister& rt2) {
904  ASSERT(AreSameSizeAndType(rt, rt2));
905  USE(rt2);
906  if (rt.IsRegister()) {
907  return rt.Is64Bits() ? STP_x : STP_w;
908  } else {
909  ASSERT(rt.IsFPRegister());
910  return rt.Is64Bits() ? STP_d : STP_s;
911  }
912 }
913 
914 
916  const CPURegister& rt, const CPURegister& rt2) {
917  ASSERT(AreSameSizeAndType(rt, rt2));
918  USE(rt2);
919  if (rt.IsRegister()) {
920  return rt.Is64Bits() ? LDNP_x : LDNP_w;
921  } else {
922  ASSERT(rt.IsFPRegister());
923  return rt.Is64Bits() ? LDNP_d : LDNP_s;
924  }
925 }
926 
927 
929  const CPURegister& rt, const CPURegister& rt2) {
930  ASSERT(AreSameSizeAndType(rt, rt2));
931  USE(rt2);
932  if (rt.IsRegister()) {
933  return rt.Is64Bits() ? STNP_x : STNP_w;
934  } else {
935  ASSERT(rt.IsFPRegister());
936  return rt.Is64Bits() ? STNP_d : STNP_s;
937  }
938 }
939 
940 
941 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
942  ASSERT(kStartOfLabelLinkChain == 0);
943  int offset = LinkAndGetByteOffsetTo(label);
944  ASSERT(IsAligned(offset, kInstructionSize));
945  return offset >> kInstructionSizeLog2;
946 }
947 
948 
950  if (S == SetFlags) {
951  return 1 << FlagsUpdate_offset;
952  } else if (S == LeaveFlags) {
953  return 0 << FlagsUpdate_offset;
954  }
955  UNREACHABLE();
956  return 0;
957 }
958 
959 
961  return cond << Condition_offset;
962 }
963 
964 
966  CHECK(is_int21(imm21));
967  Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
968  Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
969  Instr immlo = imm << ImmPCRelLo_offset;
970  return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
971 }
972 
973 
975  CHECK(is_int26(imm26));
976  return truncate_to_int26(imm26) << ImmUncondBranch_offset;
977 }
978 
979 
981  CHECK(is_int19(imm19));
982  return truncate_to_int19(imm19) << ImmCondBranch_offset;
983 }
984 
985 
987  CHECK(is_int19(imm19));
988  return truncate_to_int19(imm19) << ImmCmpBranch_offset;
989 }
990 
991 
993  CHECK(is_int14(imm14));
994  return truncate_to_int14(imm14) << ImmTestBranch_offset;
995 }
996 
997 
999  ASSERT(is_uint6(bit_pos));
1000  // Subtract five from the shift offset, as we need bit 5 from bit_pos.
1001  unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
1002  unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
1003  b5 &= ImmTestBranchBit5_mask;
1004  b40 &= ImmTestBranchBit40_mask;
1005  return b5 | b40;
1006 }
1007 
1008 
1010  return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
1011 }
1012 
1013 
1015  ASSERT(IsImmAddSub(imm));
1016  if (is_uint12(imm)) { // No shift required.
1017  return imm << ImmAddSub_offset;
1018  } else {
1019  return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
1020  }
1021 }
1022 
1023 
1024 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
1025  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
1026  ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
1027  USE(reg_size);
1028  return imms << ImmS_offset;
1029 }
1030 
1031 
1032 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
1033  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1034  ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1035  USE(reg_size);
1036  ASSERT(is_uint6(immr));
1037  return immr << ImmR_offset;
1038 }
1039 
1040 
1041 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
1042  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1043  ASSERT(is_uint6(imms));
1044  ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
1045  USE(reg_size);
1046  return imms << ImmSetBits_offset;
1047 }
1048 
1049 
1050 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
1051  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1052  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1053  ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1054  USE(reg_size);
1055  return immr << ImmRotate_offset;
1056 }
1057 
1058 
1060  CHECK(is_int19(imm19));
1061  return truncate_to_int19(imm19) << ImmLLiteral_offset;
1062 }
1063 
1064 
1065 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1066  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1067  ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
1068  USE(reg_size);
1069  return bitn << BitN_offset;
1070 }
1071 
1072 
1074  ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1075  return shift << ShiftDP_offset;
1076 }
1077 
1078 
1079 Instr Assembler::ImmDPShift(unsigned amount) {
1080  ASSERT(is_uint6(amount));
1081  return amount << ImmDPShift_offset;
1082 }
1083 
1084 
1086  return extend << ExtendMode_offset;
1087 }
1088 
1089 
1090 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1091  ASSERT(left_shift <= 4);
1092  return left_shift << ImmExtendShift_offset;
1093 }
1094 
1095 
1097  ASSERT(is_uint5(imm));
1098  return imm << ImmCondCmp_offset;
1099 }
1100 
1101 
1103  return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1104 }
1105 
1106 
1108  ASSERT(is_uint12(imm12));
1109  return imm12 << ImmLSUnsigned_offset;
1110 }
1111 
1112 
1114  ASSERT(is_int9(imm9));
1115  return truncate_to_int9(imm9) << ImmLS_offset;
1116 }
1117 
1118 
1120  ASSERT(((imm7 >> size) << size) == imm7);
1121  int scaled_imm7 = imm7 >> size;
1122  ASSERT(is_int7(scaled_imm7));
1123  return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1124 }
1125 
1126 
1127 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1128  ASSERT(is_uint1(shift_amount));
1129  return shift_amount << ImmShiftLS_offset;
1130 }
1131 
1132 
1134  ASSERT(is_uint16(imm16));
1135  return imm16 << ImmException_offset;
1136 }
1137 
1138 
1140  ASSERT(is_uint15(imm15));
1141  return imm15 << ImmSystemRegister_offset;
1142 }
1143 
1144 
1146  ASSERT(is_uint7(imm7));
1147  return imm7 << ImmHint_offset;
1148 }
1149 
1150 
1152  ASSERT(is_uint2(imm2));
1153  return imm2 << ImmBarrierDomain_offset;
1154 }
1155 
1156 
1158  ASSERT(is_uint2(imm2));
1159  return imm2 << ImmBarrierType_offset;
1160 }
1161 
1162 
1164  ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1165  return static_cast<LSDataSize>(op >> SizeLS_offset);
1166 }
1167 
1168 
1170  ASSERT(is_uint16(imm));
1171  return imm << ImmMoveWide_offset;
1172 }
1173 
1174 
1176  ASSERT(is_uint2(shift));
1177  return shift << ShiftMoveWide_offset;
1178 }
1179 
1180 
1182  return fd.Is64Bits() ? FP64 : FP32;
1183 }
1184 
1185 
1186 Instr Assembler::FPScale(unsigned scale) {
1187  ASSERT(is_uint6(scale));
1188  return scale << FPScale_offset;
1189 }
1190 
1191 
1193  return reg.Is64Bits() ? xzr : wzr;
1194 }
1195 
1196 
1197 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
1198  LoadRelocatedValue(rt, operand, LDR_x_lit);
1199 }
1200 
1201 
1202 inline void Assembler::CheckBuffer() {
1203  ASSERT(pc_ < (buffer_ + buffer_size_));
1204  if (buffer_space() < kGap) {
1205  GrowBuffer();
1206  }
1208  CheckVeneerPool(false, true);
1209  }
1210  if (pc_offset() >= next_constant_pool_check_) {
1211  CheckConstPool(false, true);
1212  }
1213 }
1214 
1215 
1216 TypeFeedbackId Assembler::RecordedAstId() {
1218  return recorded_ast_id_;
1219 }
1220 
1221 
1224 }
1225 
1226 
1227 } } // namespace v8::internal
1228 
1229 #endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
byte * Address
Definition: globals.h:186
static Instr ImmPCRelAddress(int imm21)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const unsigned kInstructionSizeLog2
static FPRegister Create(unsigned code, unsigned size)
static Instr ImmSystemRegister(int imm15)
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
static Instr Cond(Condition cond)
void LoadRelocated(const CPURegister &rt, const Operand &operand)
static Instr FPScale(unsigned scale)
unsigned shift_amount() const
static const int kCallSizeWithoutRelocation
static Instr ImmCmpBranch(int imm19)
static const int kCallSizeWithRelocation
static Instr ImmTestBranch(int imm14)
bool IsExtendedRegister() const
RegisterType type() const
ptrdiff_t offset() const
const unsigned kDRegSizeInBits
unsigned shift_amount() const
static TypeFeedbackId None()
Definition: utils.h:1149
static Instr ShiftDP(Shift shift)
const unsigned kZeroRegCode
static HeapObject * cast(Object *obj)
static RelocInfo::Mode rmode_for(Smi *t)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
const int kSmiValueSize
Definition: v8.h:5540
static FPRegister SRegFromCode(unsigned code)
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
Operand OffsetAsOperand() const
kSerializedDataOffset Object
Definition: objects-inl.h:5016
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
static Instr ImmLS(int imm9)
uint32_t RegList
Definition: frames.h:41
static Instr ImmBarrierType(int imm2)
bool IsNone() const
Definition: utils.h:1150
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
static Instr Flags(FlagsUpdate S)
static RelocInfo::Mode rmode_for(ExternalReference t)
#define CHECK(condition)
Definition: checks.h:75
static LoadStoreOp LoadOpFor(const CPURegister &rt)
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
static Operand UntagSmiAndScale(Register smi, int scale)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4589
const unsigned kWRegSizeInBits
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static Instr ImmUncondBranch(int imm26)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
#define UNREACHABLE()
Definition: checks.h:52
static Instr ImmMoveWide(uint64_t imm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Instr ImmException(int imm16)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static Instr ImmTestBranchBit(unsigned bit_pos)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
Instruction * ImmPCOffsetTarget()
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static bool IsImmAddSub(int64_t immediate)
const int kPointerSize
Definition: globals.h:268
const Register & regoffset() const
static Address & Address_at(Address addr)
Definition: v8memory.h:79
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
bool IsAligned(T value, U alignment)
Definition: utils.h:211
static Instr ImmExtendShift(unsigned left_shift)
bool IsSameSizeAndType(const CPURegister &other) const
const unsigned kSRegSizeInBits
static Register WRegFromCode(unsigned code)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register pc
const unsigned kSPRegInternalCode
static Instr Rn(CPURegister rn)
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
void CheckConstPool(bool force_emit, bool require_jump)
const unsigned kNumberOfFPRegisters
const int kBitsPerByte
Definition: globals.h:287
const int kSmiShift
static Address target_pointer_address_at(Address pc)
void debug(const char *message, uint32_t code, Instr params=BREAK)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
#define T(name, string, precedence)
Definition: token.cc:48
unsigned RegisterSizeInBits() const
const unsigned kNumberOfRegisters
void Combine(const CPURegList &other)
static Instr ImmLLiteral(int imm19)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static RelocInfo::Mode rmode_for(int_t)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static FPRegister DRegFromCode(unsigned code)
#define UNIMPLEMENTED()
Definition: checks.h:50
static Instr ImmBarrierDomain(int imm2)
static Address target_address_from_return_address(Address pc)
static Instr ImmLSPair(int imm7, LSDataSize size)
static Register XRegFromCode(unsigned code)
CPURegister::RegisterType type() const
static Instr ImmHint(int imm7)
void Remove(const CPURegList &other)
void USE(T)
Definition: globals.h:341
static Instr ImmR(unsigned immr, unsigned reg_size)
static Address return_address_from_call_start(Address pc)
static bool IsYoungSequence(byte *sequence)
static Instr ImmS(unsigned imms, unsigned reg_size)
static int64_t immediate_for(int_t t)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static Instr ImmDPShift(unsigned amount)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
bool Is(const CPURegister &other) const
static Instr BitN(unsigned bitn, unsigned reg_size)
static Operand UntagSmi(Register smi)
int64_t immediate() const
static Instr FPType(FPRegister fd)
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
Operand ToExtendedRegister() const
static Cell * FromValueAddress(Address value)
Definition: objects.h:9532
TypeFeedbackId recorded_ast_id_
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)