v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following
11 // disclaimer in the documentation and/or other materials provided
12 // with the distribution.
13 // * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived
15 // from this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 #include "v8.h"
30 
31 #if V8_TARGET_ARCH_ARM64
32 
33 #define ARM64_DEFINE_REG_STATICS
34 
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // -----------------------------------------------------------------------------
42 // CpuFeatures utilities (for V8 compatibility).
43 
44 ExternalReference ExternalReference::cpu_features() {
45  return ExternalReference(&CpuFeatures::supported_);
46 }
47 
48 
49 // -----------------------------------------------------------------------------
50 // CPURegList utilities.
51 
52 CPURegister CPURegList::PopLowestIndex() {
53  ASSERT(IsValid());
54  if (IsEmpty()) {
55  return NoCPUReg;
56  }
57  int index = CountTrailingZeros(list_, kRegListSizeInBits);
58  ASSERT((1 << index) & list_);
59  Remove(index);
60  return CPURegister::Create(index, size_, type_);
61 }
62 
63 
64 CPURegister CPURegList::PopHighestIndex() {
65  ASSERT(IsValid());
66  if (IsEmpty()) {
67  return NoCPUReg;
68  }
69  int index = CountLeadingZeros(list_, kRegListSizeInBits);
70  index = kRegListSizeInBits - 1 - index;
71  ASSERT((1 << index) & list_);
72  Remove(index);
73  return CPURegister::Create(index, size_, type_);
74 }
75 
76 
78  if (type() == CPURegister::kRegister) {
80  } else if (type() == CPURegister::kFPRegister) {
82  } else {
84  ASSERT(IsEmpty());
85  // The list must already be empty, so do nothing.
86  }
87 }
88 
89 
90 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
91  return CPURegList(CPURegister::kRegister, size, 19, 29);
92 }
93 
94 
95 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
96  return CPURegList(CPURegister::kFPRegister, size, 8, 15);
97 }
98 
99 
100 CPURegList CPURegList::GetCallerSaved(unsigned size) {
101  // Registers x0-x18 and lr (x30) are caller-saved.
103  list.Combine(lr);
104  return list;
105 }
106 
107 
108 CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
109  // Registers d0-d7 and d16-d31 are caller-saved.
110  CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
111  list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
112  return list;
113 }
114 
115 
116 // This function defines the list of registers which are associated with a
117 // safepoint slot. Safepoint register slots are saved contiguously on the stack.
118 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
119 // code to index in the safepoint register slots. Any change here can affect
120 // this mapping.
123  list.Combine(
125 
126  // Note that unfortunately we can't use symbolic names for registers and have
127  // to directly use register codes. This is because this function is used to
128  // initialize some static variables and we can't rely on register variables
129  // to be initialized due to static initialization order issues in C++.
130 
131  // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
132  // preserved outside of the macro assembler.
133  list.Remove(16);
134  list.Remove(17);
135 
136  // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
137  // is a caller-saved register according to the procedure call standard.
138  list.Combine(18);
139 
140  // Drop jssp as the stack pointer doesn't need to be included.
141  list.Remove(28);
142 
143  // Add the link register (x30) to the safepoint list.
144  list.Combine(30);
145 
146  return list;
147 }
148 
149 
150 // -----------------------------------------------------------------------------
151 // Implementation of RelocInfo
152 
153 const int RelocInfo::kApplyMask = 0;
154 
155 
156 bool RelocInfo::IsCodedSpecially() {
157  // The deserializer needs to know whether a pointer is specially coded. Being
158  // specially coded on ARM64 means that it is a movz/movk sequence. We don't
159  // generate those for relocatable pointers.
160  return false;
161 }
162 
163 
164 bool RelocInfo::IsInConstantPool() {
165  Instruction* instr = reinterpret_cast<Instruction*>(pc_);
166  return instr->IsLdrLiteralX();
167 }
168 
169 
170 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
171  // Patch the code at the current address with the supplied instructions.
172  Instr* pc = reinterpret_cast<Instr*>(pc_);
173  Instr* instr = reinterpret_cast<Instr*>(instructions);
174  for (int i = 0; i < instruction_count; i++) {
175  *(pc + i) = *(instr + i);
176  }
177 
178  // Indicate that code has changed.
179  CPU::FlushICache(pc_, instruction_count * kInstructionSize);
180 }
181 
182 
183 // Patch the code at the current PC with a call to the target address.
184 // Additional guard instructions can be added if required.
185 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
186  UNIMPLEMENTED();
187 }
188 
189 
190 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
191  Register reg3, Register reg4) {
192  CPURegList regs(reg1, reg2, reg3, reg4);
193  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
194  Register candidate = Register::FromAllocationIndex(i);
195  if (regs.IncludesAliasOf(candidate)) continue;
196  return candidate;
197  }
198  UNREACHABLE();
199  return NoReg;
200 }
201 
202 
203 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
204  const CPURegister& reg3, const CPURegister& reg4,
205  const CPURegister& reg5, const CPURegister& reg6,
206  const CPURegister& reg7, const CPURegister& reg8) {
207  int number_of_valid_regs = 0;
208  int number_of_valid_fpregs = 0;
209 
210  RegList unique_regs = 0;
211  RegList unique_fpregs = 0;
212 
213  const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
214 
215  for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
216  if (regs[i].IsRegister()) {
217  number_of_valid_regs++;
218  unique_regs |= regs[i].Bit();
219  } else if (regs[i].IsFPRegister()) {
220  number_of_valid_fpregs++;
221  unique_fpregs |= regs[i].Bit();
222  } else {
223  ASSERT(!regs[i].IsValid());
224  }
225  }
226 
227  int number_of_unique_regs =
228  CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
229  int number_of_unique_fpregs =
230  CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
231 
232  ASSERT(number_of_valid_regs >= number_of_unique_regs);
233  ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
234 
235  return (number_of_valid_regs != number_of_unique_regs) ||
236  (number_of_valid_fpregs != number_of_unique_fpregs);
237 }
238 
239 
240 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
241  const CPURegister& reg3, const CPURegister& reg4,
242  const CPURegister& reg5, const CPURegister& reg6,
243  const CPURegister& reg7, const CPURegister& reg8) {
244  ASSERT(reg1.IsValid());
245  bool match = true;
246  match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
247  match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
248  match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
249  match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
250  match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
251  match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
252  match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
253  return match;
254 }
255 
256 
257 void Operand::initialize_handle(Handle<Object> handle) {
258  AllowDeferredHandleDereference using_raw_address;
259 
260  // Verify all Objects referred by code are NOT in new space.
261  Object* obj = *handle;
262  if (obj->IsHeapObject()) {
263  ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
264  immediate_ = reinterpret_cast<intptr_t>(handle.location());
265  rmode_ = RelocInfo::EMBEDDED_OBJECT;
266  } else {
267  STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
268  immediate_ = reinterpret_cast<intptr_t>(obj);
269  rmode_ = RelocInfo::NONE64;
270  }
271 }
272 
273 
274 bool Operand::NeedsRelocation() const {
275  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
276 #ifdef DEBUG
277  if (!Serializer::enabled()) {
279  }
280 #endif
281  return Serializer::enabled();
282  }
283 
284  return !RelocInfo::IsNone(rmode_);
285 }
286 
287 
288 // Assembler
289 
290 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
291  : AssemblerBase(isolate, buffer, buffer_size),
292  recorded_ast_id_(TypeFeedbackId::None()),
293  unresolved_branches_(),
294  positions_recorder_(this) {
295  const_pool_blocked_nesting_ = 0;
296  veneer_pool_blocked_nesting_ = 0;
297  Reset();
298 }
299 
300 
302  ASSERT(num_pending_reloc_info_ == 0);
303  ASSERT(const_pool_blocked_nesting_ == 0);
304  ASSERT(veneer_pool_blocked_nesting_ == 0);
305 }
306 
307 
308 void Assembler::Reset() {
309 #ifdef DEBUG
310  ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
311  ASSERT(const_pool_blocked_nesting_ == 0);
312  ASSERT(veneer_pool_blocked_nesting_ == 0);
313  ASSERT(unresolved_branches_.empty());
314  memset(buffer_, 0, pc_ - buffer_);
315 #endif
316  pc_ = buffer_;
317  reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
318  reinterpret_cast<byte*>(pc_));
319  num_pending_reloc_info_ = 0;
320  next_constant_pool_check_ = 0;
322  no_const_pool_before_ = 0;
323  first_const_pool_use_ = -1;
325 }
326 
327 
328 void Assembler::GetCode(CodeDesc* desc) {
329  // Emit constant pool if necessary.
330  CheckConstPool(true, false);
331  ASSERT(num_pending_reloc_info_ == 0);
332 
333  // Set up code descriptor.
334  if (desc) {
335  desc->buffer = reinterpret_cast<byte*>(buffer_);
336  desc->buffer_size = buffer_size_;
337  desc->instr_size = pc_offset();
338  desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
339  reloc_info_writer.pos();
340  desc->origin = this;
341  }
342 }
343 
344 
345 void Assembler::Align(int m) {
346  ASSERT(m >= 4 && IsPowerOf2(m));
347  while ((pc_offset() & (m - 1)) != 0) {
348  nop();
349  }
350 }
351 
352 
353 void Assembler::CheckLabelLinkChain(Label const * label) {
354 #ifdef DEBUG
355  if (label->is_linked()) {
356  int linkoffset = label->pos();
357  bool end_of_chain = false;
358  while (!end_of_chain) {
359  Instruction * link = InstructionAt(linkoffset);
360  int linkpcoffset = link->ImmPCOffset();
361  int prevlinkoffset = linkoffset + linkpcoffset;
362 
363  end_of_chain = (linkoffset == prevlinkoffset);
364  linkoffset = linkoffset + linkpcoffset;
365  }
366  }
367 #endif
368 }
369 
370 
371 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
372  Label* label,
373  Instruction* label_veneer) {
374  ASSERT(label->is_linked());
375 
376  CheckLabelLinkChain(label);
377 
378  Instruction* link = InstructionAt(label->pos());
379  Instruction* prev_link = link;
380  Instruction* next_link;
381  bool end_of_chain = false;
382 
383  while (link != branch && !end_of_chain) {
384  next_link = link->ImmPCOffsetTarget();
385  end_of_chain = (link == next_link);
386  prev_link = link;
387  link = next_link;
388  }
389 
390  ASSERT(branch == link);
391  next_link = branch->ImmPCOffsetTarget();
392 
393  if (branch == prev_link) {
394  // The branch is the first instruction in the chain.
395  if (branch == next_link) {
396  // It is also the last instruction in the chain, so it is the only branch
397  // currently referring to this label.
398  label->Unuse();
399  } else {
400  label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
401  }
402 
403  } else if (branch == next_link) {
404  // The branch is the last (but not also the first) instruction in the chain.
405  prev_link->SetImmPCOffsetTarget(prev_link);
406 
407  } else {
408  // The branch is in the middle of the chain.
409  if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
410  prev_link->SetImmPCOffsetTarget(next_link);
411  } else if (label_veneer != NULL) {
412  // Use the veneer for all previous links in the chain.
413  prev_link->SetImmPCOffsetTarget(prev_link);
414 
415  end_of_chain = false;
416  link = next_link;
417  while (!end_of_chain) {
418  next_link = link->ImmPCOffsetTarget();
419  end_of_chain = (link == next_link);
420  link->SetImmPCOffsetTarget(label_veneer);
421  link = next_link;
422  }
423  } else {
424  // The assert below will fire.
425  // Some other work could be attempted to fix up the chain, but it would be
426  // rather complicated. If we crash here, we may want to consider using an
427  // other mechanism than a chain of branches.
428  //
429  // Note that this situation currently should not happen, as we always call
430  // this function with a veneer to the target label.
431  // However this could happen with a MacroAssembler in the following state:
432  // [previous code]
433  // B(label);
434  // [20KB code]
435  // Tbz(label); // First tbz. Pointing to unconditional branch.
436  // [20KB code]
437  // Tbz(label); // Second tbz. Pointing to the first tbz.
438  // [more code]
439  // and this function is called to remove the first tbz from the label link
440  // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
441  // the unconditional branch.
442  CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
443  UNREACHABLE();
444  }
445  }
446 
447  CheckLabelLinkChain(label);
448 }
449 
450 
451 void Assembler::bind(Label* label) {
452  // Bind label to the address at pc_. All instructions (most likely branches)
453  // that are linked to this label will be updated to point to the newly-bound
454  // label.
455 
456  ASSERT(!label->is_near_linked());
457  ASSERT(!label->is_bound());
458 
459  // If the label is linked, the link chain looks something like this:
460  //
461  // |--I----I-------I-------L
462  // |---------------------->| pc_offset
463  // |-------------->| linkoffset = label->pos()
464  // |<------| link->ImmPCOffset()
465  // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
466  //
467  // On each iteration, the last link is updated and then removed from the
468  // chain until only one remains. At that point, the label is bound.
469  //
470  // If the label is not linked, no preparation is required before binding.
471  while (label->is_linked()) {
472  int linkoffset = label->pos();
473  Instruction* link = InstructionAt(linkoffset);
474  int prevlinkoffset = linkoffset + link->ImmPCOffset();
475 
476  CheckLabelLinkChain(label);
477 
478  ASSERT(linkoffset >= 0);
479  ASSERT(linkoffset < pc_offset());
480  ASSERT((linkoffset > prevlinkoffset) ||
481  (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
482  ASSERT(prevlinkoffset >= 0);
483 
484  // Update the link to point to the label.
485  link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
486 
487  // Link the label to the previous link in the chain.
488  if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
489  // We hit kStartOfLabelLinkChain, so the chain is fully processed.
490  label->Unuse();
491  } else {
492  // Update the label for the next iteration.
493  label->link_to(prevlinkoffset);
494  }
495  }
496  label->bind_to(pc_offset());
497 
498  ASSERT(label->is_bound());
499  ASSERT(!label->is_linked());
500 
501  DeleteUnresolvedBranchInfoForLabel(label);
502 }
503 
504 
505 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
506  ASSERT(sizeof(*pc_) == 1);
507  CheckLabelLinkChain(label);
508 
509  int offset;
510  if (label->is_bound()) {
511  // The label is bound, so it does not need to be updated. Referring
512  // instructions must link directly to the label as they will not be
513  // updated.
514  //
515  // In this case, label->pos() returns the offset of the label from the
516  // start of the buffer.
517  //
518  // Note that offset can be zero for self-referential instructions. (This
519  // could be useful for ADR, for example.)
520  offset = label->pos() - pc_offset();
521  ASSERT(offset <= 0);
522  } else {
523  if (label->is_linked()) {
524  // The label is linked, so the referring instruction should be added onto
525  // the end of the label's link chain.
526  //
527  // In this case, label->pos() returns the offset of the last linked
528  // instruction from the start of the buffer.
529  offset = label->pos() - pc_offset();
530  ASSERT(offset != kStartOfLabelLinkChain);
531  // Note that the offset here needs to be PC-relative only so that the
532  // first instruction in a buffer can link to an unbound label. Otherwise,
533  // the offset would be 0 for this case, and 0 is reserved for
534  // kStartOfLabelLinkChain.
535  } else {
536  // The label is unused, so it now becomes linked and the referring
537  // instruction is at the start of the new link chain.
538  offset = kStartOfLabelLinkChain;
539  }
540  // The instruction at pc is now the last link in the label's chain.
541  label->link_to(pc_offset());
542  }
543 
544  return offset;
545 }
546 
547 
548 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
549  if (unresolved_branches_.empty()) {
551  return;
552  }
553 
554  // Branches to this label will be resolved when the label is bound below.
555  std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
556  it = unresolved_branches_.begin();
557  while (it != unresolved_branches_.end()) {
558  it_tmp = it++;
559  if (it_tmp->second.label_ == label) {
560  CHECK(it_tmp->first >= pc_offset());
561  unresolved_branches_.erase(it_tmp);
562  }
563  }
564  if (unresolved_branches_.empty()) {
566  } else {
569  }
570 }
571 
572 
574  if (const_pool_blocked_nesting_++ == 0) {
575  // Prevent constant pool checks happening by setting the next check to
576  // the biggest possible offset.
577  next_constant_pool_check_ = kMaxInt;
578  }
579 }
580 
581 
583  if (--const_pool_blocked_nesting_ == 0) {
584  // Check the constant pool hasn't been blocked for too long.
585  ASSERT((num_pending_reloc_info_ == 0) ||
586  (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
587  // Two cases:
588  // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
589  // still blocked
590  // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
591  // will trigger a check.
592  next_constant_pool_check_ = no_const_pool_before_;
593  }
594 }
595 
596 
598  return (const_pool_blocked_nesting_ > 0) ||
599  (pc_offset() < no_const_pool_before_);
600 }
601 
602 
603 bool Assembler::IsConstantPoolAt(Instruction* instr) {
604  // The constant pool marker is made of two instructions. These instructions
605  // will never be emitted by the JIT, so checking for the first one is enough:
606  // 0: ldr xzr, #<size of pool>
607  bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
608 
609  // It is still worth asserting the marker is complete.
610  // 4: blr xzr
611  ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
612  instr->following()->Rn() == xzr.code()));
613 
614  return result;
615 }
616 
617 
618 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
619 #ifdef USE_SIMULATOR
620  // Assembler::debug() embeds constants directly into the instruction stream.
621  // Although this is not a genuine constant pool, treat it like one to avoid
622  // disassembling the constants.
623  if ((instr->Mask(ExceptionMask) == HLT) &&
624  (instr->ImmException() == kImmExceptionIsDebug)) {
625  const char* message =
626  reinterpret_cast<const char*>(
627  instr->InstructionAtOffset(kDebugMessageOffset));
628  int size = kDebugMessageOffset + strlen(message) + 1;
629  return RoundUp(size, kInstructionSize) / kInstructionSize;
630  }
631  // Same for printf support, see MacroAssembler::CallPrintf().
632  if ((instr->Mask(ExceptionMask) == HLT) &&
633  (instr->ImmException() == kImmExceptionIsPrintf)) {
635  }
636 #endif
637  if (IsConstantPoolAt(instr)) {
638  return instr->ImmLLiteral();
639  } else {
640  return -1;
641  }
642 }
643 
644 
645 void Assembler::ConstantPoolMarker(uint32_t size) {
647  // + 1 is for the crash guard.
648  Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
649 }
650 
651 
653  // We must generate only one instruction as this is used in scopes that
654  // control the size of the code generated.
655  Emit(BLR | Rn(xzr));
656 }
657 
658 
660 #ifdef DEBUG
661  // Currently this is only used after a constant pool marker.
663  Instruction* instr = reinterpret_cast<Instruction*>(pc_);
664  ASSERT(instr->preceding()->IsLdrLiteralX() &&
665  instr->preceding()->Rt() == xzr.code());
666 #endif
667  EmitPoolGuard();
668 }
669 
670 
672  ++veneer_pool_blocked_nesting_;
673 }
674 
675 
677  if (--veneer_pool_blocked_nesting_ == 0) {
678  // Check the veneer pool hasn't been blocked for too long.
679  ASSERT(unresolved_branches_.empty() ||
681  }
682 }
683 
684 
685 void Assembler::br(const Register& xn) {
686  positions_recorder()->WriteRecordedPositions();
687  ASSERT(xn.Is64Bits());
688  Emit(BR | Rn(xn));
689 }
690 
691 
692 void Assembler::blr(const Register& xn) {
693  positions_recorder()->WriteRecordedPositions();
694  ASSERT(xn.Is64Bits());
695  // The pattern 'blr xzr' is used as a guard to detect when execution falls
696  // through the constant pool. It should not be emitted.
697  ASSERT(!xn.Is(xzr));
698  Emit(BLR | Rn(xn));
699 }
700 
701 
702 void Assembler::ret(const Register& xn) {
703  positions_recorder()->WriteRecordedPositions();
704  ASSERT(xn.Is64Bits());
705  Emit(RET | Rn(xn));
706 }
707 
708 
709 void Assembler::b(int imm26) {
710  Emit(B | ImmUncondBranch(imm26));
711 }
712 
713 
714 void Assembler::b(Label* label) {
715  positions_recorder()->WriteRecordedPositions();
716  b(LinkAndGetInstructionOffsetTo(label));
717 }
718 
719 
720 void Assembler::b(int imm19, Condition cond) {
721  Emit(B_cond | ImmCondBranch(imm19) | cond);
722 }
723 
724 
725 void Assembler::b(Label* label, Condition cond) {
726  positions_recorder()->WriteRecordedPositions();
727  b(LinkAndGetInstructionOffsetTo(label), cond);
728 }
729 
730 
731 void Assembler::bl(int imm26) {
732  positions_recorder()->WriteRecordedPositions();
733  Emit(BL | ImmUncondBranch(imm26));
734 }
735 
736 
737 void Assembler::bl(Label* label) {
738  positions_recorder()->WriteRecordedPositions();
739  bl(LinkAndGetInstructionOffsetTo(label));
740 }
741 
742 
743 void Assembler::cbz(const Register& rt,
744  int imm19) {
745  positions_recorder()->WriteRecordedPositions();
746  Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
747 }
748 
749 
750 void Assembler::cbz(const Register& rt,
751  Label* label) {
752  positions_recorder()->WriteRecordedPositions();
753  cbz(rt, LinkAndGetInstructionOffsetTo(label));
754 }
755 
756 
757 void Assembler::cbnz(const Register& rt,
758  int imm19) {
759  positions_recorder()->WriteRecordedPositions();
760  Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
761 }
762 
763 
764 void Assembler::cbnz(const Register& rt,
765  Label* label) {
766  positions_recorder()->WriteRecordedPositions();
767  cbnz(rt, LinkAndGetInstructionOffsetTo(label));
768 }
769 
770 
771 void Assembler::tbz(const Register& rt,
772  unsigned bit_pos,
773  int imm14) {
774  positions_recorder()->WriteRecordedPositions();
775  ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
776  Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
777 }
778 
779 
780 void Assembler::tbz(const Register& rt,
781  unsigned bit_pos,
782  Label* label) {
783  positions_recorder()->WriteRecordedPositions();
784  tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
785 }
786 
787 
788 void Assembler::tbnz(const Register& rt,
789  unsigned bit_pos,
790  int imm14) {
791  positions_recorder()->WriteRecordedPositions();
792  ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
793  Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
794 }
795 
796 
797 void Assembler::tbnz(const Register& rt,
798  unsigned bit_pos,
799  Label* label) {
800  positions_recorder()->WriteRecordedPositions();
801  tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
802 }
803 
804 
805 void Assembler::adr(const Register& rd, int imm21) {
806  ASSERT(rd.Is64Bits());
807  Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
808 }
809 
810 
811 void Assembler::adr(const Register& rd, Label* label) {
812  adr(rd, LinkAndGetByteOffsetTo(label));
813 }
814 
815 
816 void Assembler::add(const Register& rd,
817  const Register& rn,
818  const Operand& operand) {
819  AddSub(rd, rn, operand, LeaveFlags, ADD);
820 }
821 
822 
823 void Assembler::adds(const Register& rd,
824  const Register& rn,
825  const Operand& operand) {
826  AddSub(rd, rn, operand, SetFlags, ADD);
827 }
828 
829 
830 void Assembler::cmn(const Register& rn,
831  const Operand& operand) {
832  Register zr = AppropriateZeroRegFor(rn);
833  adds(zr, rn, operand);
834 }
835 
836 
837 void Assembler::sub(const Register& rd,
838  const Register& rn,
839  const Operand& operand) {
840  AddSub(rd, rn, operand, LeaveFlags, SUB);
841 }
842 
843 
844 void Assembler::subs(const Register& rd,
845  const Register& rn,
846  const Operand& operand) {
847  AddSub(rd, rn, operand, SetFlags, SUB);
848 }
849 
850 
851 void Assembler::cmp(const Register& rn, const Operand& operand) {
852  Register zr = AppropriateZeroRegFor(rn);
853  subs(zr, rn, operand);
854 }
855 
856 
857 void Assembler::neg(const Register& rd, const Operand& operand) {
858  Register zr = AppropriateZeroRegFor(rd);
859  sub(rd, zr, operand);
860 }
861 
862 
863 void Assembler::negs(const Register& rd, const Operand& operand) {
864  Register zr = AppropriateZeroRegFor(rd);
865  subs(rd, zr, operand);
866 }
867 
868 
869 void Assembler::adc(const Register& rd,
870  const Register& rn,
871  const Operand& operand) {
872  AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
873 }
874 
875 
876 void Assembler::adcs(const Register& rd,
877  const Register& rn,
878  const Operand& operand) {
879  AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
880 }
881 
882 
883 void Assembler::sbc(const Register& rd,
884  const Register& rn,
885  const Operand& operand) {
886  AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
887 }
888 
889 
890 void Assembler::sbcs(const Register& rd,
891  const Register& rn,
892  const Operand& operand) {
893  AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
894 }
895 
896 
897 void Assembler::ngc(const Register& rd, const Operand& operand) {
898  Register zr = AppropriateZeroRegFor(rd);
899  sbc(rd, zr, operand);
900 }
901 
902 
903 void Assembler::ngcs(const Register& rd, const Operand& operand) {
904  Register zr = AppropriateZeroRegFor(rd);
905  sbcs(rd, zr, operand);
906 }
907 
908 
909 // Logical instructions.
910 void Assembler::and_(const Register& rd,
911  const Register& rn,
912  const Operand& operand) {
913  Logical(rd, rn, operand, AND);
914 }
915 
916 
917 void Assembler::ands(const Register& rd,
918  const Register& rn,
919  const Operand& operand) {
920  Logical(rd, rn, operand, ANDS);
921 }
922 
923 
924 void Assembler::tst(const Register& rn,
925  const Operand& operand) {
926  ands(AppropriateZeroRegFor(rn), rn, operand);
927 }
928 
929 
930 void Assembler::bic(const Register& rd,
931  const Register& rn,
932  const Operand& operand) {
933  Logical(rd, rn, operand, BIC);
934 }
935 
936 
937 void Assembler::bics(const Register& rd,
938  const Register& rn,
939  const Operand& operand) {
940  Logical(rd, rn, operand, BICS);
941 }
942 
943 
944 void Assembler::orr(const Register& rd,
945  const Register& rn,
946  const Operand& operand) {
947  Logical(rd, rn, operand, ORR);
948 }
949 
950 
951 void Assembler::orn(const Register& rd,
952  const Register& rn,
953  const Operand& operand) {
954  Logical(rd, rn, operand, ORN);
955 }
956 
957 
958 void Assembler::eor(const Register& rd,
959  const Register& rn,
960  const Operand& operand) {
961  Logical(rd, rn, operand, EOR);
962 }
963 
964 
965 void Assembler::eon(const Register& rd,
966  const Register& rn,
967  const Operand& operand) {
968  Logical(rd, rn, operand, EON);
969 }
970 
971 
972 void Assembler::lslv(const Register& rd,
973  const Register& rn,
974  const Register& rm) {
975  ASSERT(rd.SizeInBits() == rn.SizeInBits());
976  ASSERT(rd.SizeInBits() == rm.SizeInBits());
977  Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
978 }
979 
980 
981 void Assembler::lsrv(const Register& rd,
982  const Register& rn,
983  const Register& rm) {
984  ASSERT(rd.SizeInBits() == rn.SizeInBits());
985  ASSERT(rd.SizeInBits() == rm.SizeInBits());
986  Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
987 }
988 
989 
990 void Assembler::asrv(const Register& rd,
991  const Register& rn,
992  const Register& rm) {
993  ASSERT(rd.SizeInBits() == rn.SizeInBits());
994  ASSERT(rd.SizeInBits() == rm.SizeInBits());
995  Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
996 }
997 
998 
999 void Assembler::rorv(const Register& rd,
1000  const Register& rn,
1001  const Register& rm) {
1002  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1003  ASSERT(rd.SizeInBits() == rm.SizeInBits());
1004  Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1005 }
1006 
1007 
1008 // Bitfield operations.
1009 void Assembler::bfm(const Register& rd,
1010  const Register& rn,
1011  unsigned immr,
1012  unsigned imms) {
1013  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1014  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1015  Emit(SF(rd) | BFM | N |
1016  ImmR(immr, rd.SizeInBits()) |
1017  ImmS(imms, rn.SizeInBits()) |
1018  Rn(rn) | Rd(rd));
1019 }
1020 
1021 
1022 void Assembler::sbfm(const Register& rd,
1023  const Register& rn,
1024  unsigned immr,
1025  unsigned imms) {
1026  ASSERT(rd.Is64Bits() || rn.Is32Bits());
1027  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1028  Emit(SF(rd) | SBFM | N |
1029  ImmR(immr, rd.SizeInBits()) |
1030  ImmS(imms, rn.SizeInBits()) |
1031  Rn(rn) | Rd(rd));
1032 }
1033 
1034 
1035 void Assembler::ubfm(const Register& rd,
1036  const Register& rn,
1037  unsigned immr,
1038  unsigned imms) {
1039  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1040  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1041  Emit(SF(rd) | UBFM | N |
1042  ImmR(immr, rd.SizeInBits()) |
1043  ImmS(imms, rn.SizeInBits()) |
1044  Rn(rn) | Rd(rd));
1045 }
1046 
1047 
1048 void Assembler::extr(const Register& rd,
1049  const Register& rn,
1050  const Register& rm,
1051  unsigned lsb) {
1052  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1053  ASSERT(rd.SizeInBits() == rm.SizeInBits());
1054  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1055  Emit(SF(rd) | EXTR | N | Rm(rm) |
1056  ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1057 }
1058 
1059 
1060 void Assembler::csel(const Register& rd,
1061  const Register& rn,
1062  const Register& rm,
1063  Condition cond) {
1064  ConditionalSelect(rd, rn, rm, cond, CSEL);
1065 }
1066 
1067 
1068 void Assembler::csinc(const Register& rd,
1069  const Register& rn,
1070  const Register& rm,
1071  Condition cond) {
1072  ConditionalSelect(rd, rn, rm, cond, CSINC);
1073 }
1074 
1075 
1076 void Assembler::csinv(const Register& rd,
1077  const Register& rn,
1078  const Register& rm,
1079  Condition cond) {
1080  ConditionalSelect(rd, rn, rm, cond, CSINV);
1081 }
1082 
1083 
1084 void Assembler::csneg(const Register& rd,
1085  const Register& rn,
1086  const Register& rm,
1087  Condition cond) {
1088  ConditionalSelect(rd, rn, rm, cond, CSNEG);
1089 }
1090 
1091 
1092 void Assembler::cset(const Register &rd, Condition cond) {
1093  ASSERT((cond != al) && (cond != nv));
1094  Register zr = AppropriateZeroRegFor(rd);
1095  csinc(rd, zr, zr, InvertCondition(cond));
1096 }
1097 
1098 
1099 void Assembler::csetm(const Register &rd, Condition cond) {
1100  ASSERT((cond != al) && (cond != nv));
1101  Register zr = AppropriateZeroRegFor(rd);
1102  csinv(rd, zr, zr, InvertCondition(cond));
1103 }
1104 
1105 
1106 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
1107  ASSERT((cond != al) && (cond != nv));
1108  csinc(rd, rn, rn, InvertCondition(cond));
1109 }
1110 
1111 
1112 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
1113  ASSERT((cond != al) && (cond != nv));
1114  csinv(rd, rn, rn, InvertCondition(cond));
1115 }
1116 
1117 
1118 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
1119  ASSERT((cond != al) && (cond != nv));
1120  csneg(rd, rn, rn, InvertCondition(cond));
1121 }
1122 
1123 
1124 void Assembler::ConditionalSelect(const Register& rd,
1125  const Register& rn,
1126  const Register& rm,
1127  Condition cond,
1128  ConditionalSelectOp op) {
1129  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1130  ASSERT(rd.SizeInBits() == rm.SizeInBits());
1131  Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1132 }
1133 
1134 
1135 void Assembler::ccmn(const Register& rn,
1136  const Operand& operand,
1137  StatusFlags nzcv,
1138  Condition cond) {
1139  ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1140 }
1141 
1142 
1143 void Assembler::ccmp(const Register& rn,
1144  const Operand& operand,
1145  StatusFlags nzcv,
1146  Condition cond) {
1147  ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1148 }
1149 
1150 
1151 void Assembler::DataProcessing3Source(const Register& rd,
1152  const Register& rn,
1153  const Register& rm,
1154  const Register& ra,
1156  Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1157 }
1158 
1159 
1160 void Assembler::mul(const Register& rd,
1161  const Register& rn,
1162  const Register& rm) {
1163  ASSERT(AreSameSizeAndType(rd, rn, rm));
1164  Register zr = AppropriateZeroRegFor(rn);
1165  DataProcessing3Source(rd, rn, rm, zr, MADD);
1166 }
1167 
1168 
1169 void Assembler::madd(const Register& rd,
1170  const Register& rn,
1171  const Register& rm,
1172  const Register& ra) {
1173  ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
1174  DataProcessing3Source(rd, rn, rm, ra, MADD);
1175 }
1176 
1177 
1178 void Assembler::mneg(const Register& rd,
1179  const Register& rn,
1180  const Register& rm) {
1181  ASSERT(AreSameSizeAndType(rd, rn, rm));
1182  Register zr = AppropriateZeroRegFor(rn);
1183  DataProcessing3Source(rd, rn, rm, zr, MSUB);
1184 }
1185 
1186 
1187 void Assembler::msub(const Register& rd,
1188  const Register& rn,
1189  const Register& rm,
1190  const Register& ra) {
1191  ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
1192  DataProcessing3Source(rd, rn, rm, ra, MSUB);
1193 }
1194 
1195 
1196 void Assembler::smaddl(const Register& rd,
1197  const Register& rn,
1198  const Register& rm,
1199  const Register& ra) {
1200  ASSERT(rd.Is64Bits() && ra.Is64Bits());
1201  ASSERT(rn.Is32Bits() && rm.Is32Bits());
1202  DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1203 }
1204 
1205 
1206 void Assembler::smsubl(const Register& rd,
1207  const Register& rn,
1208  const Register& rm,
1209  const Register& ra) {
1210  ASSERT(rd.Is64Bits() && ra.Is64Bits());
1211  ASSERT(rn.Is32Bits() && rm.Is32Bits());
1212  DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1213 }
1214 
1215 
1216 void Assembler::umaddl(const Register& rd,
1217  const Register& rn,
1218  const Register& rm,
1219  const Register& ra) {
1220  ASSERT(rd.Is64Bits() && ra.Is64Bits());
1221  ASSERT(rn.Is32Bits() && rm.Is32Bits());
1222  DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1223 }
1224 
1225 
1226 void Assembler::umsubl(const Register& rd,
1227  const Register& rn,
1228  const Register& rm,
1229  const Register& ra) {
1230  ASSERT(rd.Is64Bits() && ra.Is64Bits());
1231  ASSERT(rn.Is32Bits() && rm.Is32Bits());
1232  DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1233 }
1234 
1235 
1236 void Assembler::smull(const Register& rd,
1237  const Register& rn,
1238  const Register& rm) {
1239  ASSERT(rd.Is64Bits());
1240  ASSERT(rn.Is32Bits() && rm.Is32Bits());
1241  DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1242 }
1243 
1244 
1245 void Assembler::smulh(const Register& rd,
1246  const Register& rn,
1247  const Register& rm) {
1248  ASSERT(AreSameSizeAndType(rd, rn, rm));
1249  DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1250 }
1251 
1252 
1253 void Assembler::sdiv(const Register& rd,
1254  const Register& rn,
1255  const Register& rm) {
1256  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1257  ASSERT(rd.SizeInBits() == rm.SizeInBits());
1258  Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1259 }
1260 
1261 
1262 void Assembler::udiv(const Register& rd,
1263  const Register& rn,
1264  const Register& rm) {
1265  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1266  ASSERT(rd.SizeInBits() == rm.SizeInBits());
1267  Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1268 }
1269 
1270 
1271 void Assembler::rbit(const Register& rd,
1272  const Register& rn) {
1273  DataProcessing1Source(rd, rn, RBIT);
1274 }
1275 
1276 
1277 void Assembler::rev16(const Register& rd,
1278  const Register& rn) {
1279  DataProcessing1Source(rd, rn, REV16);
1280 }
1281 
1282 
1283 void Assembler::rev32(const Register& rd,
1284  const Register& rn) {
1285  ASSERT(rd.Is64Bits());
1286  DataProcessing1Source(rd, rn, REV);
1287 }
1288 
1289 
1290 void Assembler::rev(const Register& rd,
1291  const Register& rn) {
1292  DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1293 }
1294 
1295 
1296 void Assembler::clz(const Register& rd,
1297  const Register& rn) {
1298  DataProcessing1Source(rd, rn, CLZ);
1299 }
1300 
1301 
1302 void Assembler::cls(const Register& rd,
1303  const Register& rn) {
1304  DataProcessing1Source(rd, rn, CLS);
1305 }
1306 
1307 
1308 void Assembler::ldp(const CPURegister& rt,
1309  const CPURegister& rt2,
1310  const MemOperand& src) {
1311  LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1312 }
1313 
1314 
1315 void Assembler::stp(const CPURegister& rt,
1316  const CPURegister& rt2,
1317  const MemOperand& dst) {
1318  LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1319 }
1320 
1321 
1322 void Assembler::ldpsw(const Register& rt,
1323  const Register& rt2,
1324  const MemOperand& src) {
1325  ASSERT(rt.Is64Bits());
1326  LoadStorePair(rt, rt2, src, LDPSW_x);
1327 }
1328 
1329 
1330 void Assembler::LoadStorePair(const CPURegister& rt,
1331  const CPURegister& rt2,
1332  const MemOperand& addr,
1333  LoadStorePairOp op) {
1334  // 'rt' and 'rt2' can only be aliased for stores.
1335  ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1336  ASSERT(AreSameSizeAndType(rt, rt2));
1337 
1338  Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1339  ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1340 
1341  Instr addrmodeop;
1342  if (addr.IsImmediateOffset()) {
1343  addrmodeop = LoadStorePairOffsetFixed;
1344  } else {
1345  // Pre-index and post-index modes.
1346  ASSERT(!rt.Is(addr.base()));
1347  ASSERT(!rt2.Is(addr.base()));
1348  ASSERT(addr.offset() != 0);
1349  if (addr.IsPreIndex()) {
1350  addrmodeop = LoadStorePairPreIndexFixed;
1351  } else {
1352  ASSERT(addr.IsPostIndex());
1353  addrmodeop = LoadStorePairPostIndexFixed;
1354  }
1355  }
1356  Emit(addrmodeop | memop);
1357 }
1358 
1359 
1360 void Assembler::ldnp(const CPURegister& rt,
1361  const CPURegister& rt2,
1362  const MemOperand& src) {
1363  LoadStorePairNonTemporal(rt, rt2, src,
1364  LoadPairNonTemporalOpFor(rt, rt2));
1365 }
1366 
1367 
1368 void Assembler::stnp(const CPURegister& rt,
1369  const CPURegister& rt2,
1370  const MemOperand& dst) {
1371  LoadStorePairNonTemporal(rt, rt2, dst,
1372  StorePairNonTemporalOpFor(rt, rt2));
1373 }
1374 
1375 
1376 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1377  const CPURegister& rt2,
1378  const MemOperand& addr,
1380  ASSERT(!rt.Is(rt2));
1381  ASSERT(AreSameSizeAndType(rt, rt2));
1382  ASSERT(addr.IsImmediateOffset());
1383 
1385  static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1386  Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1387  ImmLSPair(addr.offset(), size));
1388 }
1389 
1390 
1391 // Memory instructions.
1392 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1393  LoadStore(rt, src, LDRB_w);
1394 }
1395 
1396 
1397 void Assembler::strb(const Register& rt, const MemOperand& dst) {
1398  LoadStore(rt, dst, STRB_w);
1399 }
1400 
1401 
1402 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1403  LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1404 }
1405 
1406 
1407 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1408  LoadStore(rt, src, LDRH_w);
1409 }
1410 
1411 
1412 void Assembler::strh(const Register& rt, const MemOperand& dst) {
1413  LoadStore(rt, dst, STRH_w);
1414 }
1415 
1416 
1417 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1418  LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1419 }
1420 
1421 
1422 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1423  LoadStore(rt, src, LoadOpFor(rt));
1424 }
1425 
1426 
1427 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1428  LoadStore(rt, src, StoreOpFor(rt));
1429 }
1430 
1431 
1432 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1433  ASSERT(rt.Is64Bits());
1434  LoadStore(rt, src, LDRSW_x);
1435 }
1436 
1437 
1438 void Assembler::ldr(const Register& rt, uint64_t imm) {
1439  // TODO(all): Constant pool may be garbage collected. Hence we cannot store
1440  // arbitrary values in them. Manually move it for now. Fix
1441  // MacroAssembler::Fmov when this is implemented.
1442  UNIMPLEMENTED();
1443 }
1444 
1445 
1446 void Assembler::ldr(const FPRegister& ft, double imm) {
1447  // TODO(all): Constant pool may be garbage collected. Hence we cannot store
1448  // arbitrary values in them. Manually move it for now. Fix
1449  // MacroAssembler::Fmov when this is implemented.
1450  UNIMPLEMENTED();
1451 }
1452 
1453 
1454 void Assembler::ldr(const FPRegister& ft, float imm) {
1455  // TODO(all): Constant pool may be garbage collected. Hence we cannot store
1456  // arbitrary values in them. Manually move it for now. Fix
1457  // MacroAssembler::Fmov when this is implemented.
1458  UNIMPLEMENTED();
1459 }
1460 
1461 
1462 void Assembler::mov(const Register& rd, const Register& rm) {
1463  // Moves involving the stack pointer are encoded as add immediate with
1464  // second operand of zero. Otherwise, orr with first operand zr is
1465  // used.
1466  if (rd.IsSP() || rm.IsSP()) {
1467  add(rd, rm, 0);
1468  } else {
1469  orr(rd, AppropriateZeroRegFor(rd), rm);
1470  }
1471 }
1472 
1473 
1474 void Assembler::mvn(const Register& rd, const Operand& operand) {
1475  orn(rd, AppropriateZeroRegFor(rd), operand);
1476 }
1477 
1478 
1479 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1480  ASSERT(rt.Is64Bits());
1481  Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1482 }
1483 
1484 
1485 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1486  ASSERT(rt.Is64Bits());
1487  Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1488 }
1489 
1490 
1492  Emit(HINT | ImmHint(code) | Rt(xzr));
1493 }
1494 
1495 
1496 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1497  Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1498 }
1499 
1500 
1501 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1502  Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1503 }
1504 
1505 
1506 void Assembler::isb() {
1508 }
1509 
1510 
1511 void Assembler::fmov(FPRegister fd, double imm) {
1512  ASSERT(fd.Is64Bits());
1513  ASSERT(IsImmFP64(imm));
1514  Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1515 }
1516 
1517 
1518 void Assembler::fmov(FPRegister fd, float imm) {
1519  ASSERT(fd.Is32Bits());
1520  ASSERT(IsImmFP32(imm));
1521  Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1522 }
1523 
1524 
1525 void Assembler::fmov(Register rd, FPRegister fn) {
1526  ASSERT(rd.SizeInBits() == fn.SizeInBits());
1527  FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1528  Emit(op | Rd(rd) | Rn(fn));
1529 }
1530 
1531 
1532 void Assembler::fmov(FPRegister fd, Register rn) {
1533  ASSERT(fd.SizeInBits() == rn.SizeInBits());
1534  FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1535  Emit(op | Rd(fd) | Rn(rn));
1536 }
1537 
1538 
1539 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1540  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1541  Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1542 }
1543 
1544 
1545 void Assembler::fadd(const FPRegister& fd,
1546  const FPRegister& fn,
1547  const FPRegister& fm) {
1548  FPDataProcessing2Source(fd, fn, fm, FADD);
1549 }
1550 
1551 
1552 void Assembler::fsub(const FPRegister& fd,
1553  const FPRegister& fn,
1554  const FPRegister& fm) {
1555  FPDataProcessing2Source(fd, fn, fm, FSUB);
1556 }
1557 
1558 
1559 void Assembler::fmul(const FPRegister& fd,
1560  const FPRegister& fn,
1561  const FPRegister& fm) {
1562  FPDataProcessing2Source(fd, fn, fm, FMUL);
1563 }
1564 
1565 
1566 void Assembler::fmadd(const FPRegister& fd,
1567  const FPRegister& fn,
1568  const FPRegister& fm,
1569  const FPRegister& fa) {
1570  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1571 }
1572 
1573 
1574 void Assembler::fmsub(const FPRegister& fd,
1575  const FPRegister& fn,
1576  const FPRegister& fm,
1577  const FPRegister& fa) {
1578  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1579 }
1580 
1581 
1582 void Assembler::fnmadd(const FPRegister& fd,
1583  const FPRegister& fn,
1584  const FPRegister& fm,
1585  const FPRegister& fa) {
1586  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1587 }
1588 
1589 
1590 void Assembler::fnmsub(const FPRegister& fd,
1591  const FPRegister& fn,
1592  const FPRegister& fm,
1593  const FPRegister& fa) {
1594  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1595 }
1596 
1597 
1598 void Assembler::fdiv(const FPRegister& fd,
1599  const FPRegister& fn,
1600  const FPRegister& fm) {
1601  FPDataProcessing2Source(fd, fn, fm, FDIV);
1602 }
1603 
1604 
1605 void Assembler::fmax(const FPRegister& fd,
1606  const FPRegister& fn,
1607  const FPRegister& fm) {
1608  FPDataProcessing2Source(fd, fn, fm, FMAX);
1609 }
1610 
1611 
1612 void Assembler::fmaxnm(const FPRegister& fd,
1613  const FPRegister& fn,
1614  const FPRegister& fm) {
1615  FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1616 }
1617 
1618 
1619 void Assembler::fmin(const FPRegister& fd,
1620  const FPRegister& fn,
1621  const FPRegister& fm) {
1622  FPDataProcessing2Source(fd, fn, fm, FMIN);
1623 }
1624 
1625 
1626 void Assembler::fminnm(const FPRegister& fd,
1627  const FPRegister& fn,
1628  const FPRegister& fm) {
1629  FPDataProcessing2Source(fd, fn, fm, FMINNM);
1630 }
1631 
1632 
1633 void Assembler::fabs(const FPRegister& fd,
1634  const FPRegister& fn) {
1635  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1636  FPDataProcessing1Source(fd, fn, FABS);
1637 }
1638 
1639 
1640 void Assembler::fneg(const FPRegister& fd,
1641  const FPRegister& fn) {
1642  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1643  FPDataProcessing1Source(fd, fn, FNEG);
1644 }
1645 
1646 
1647 void Assembler::fsqrt(const FPRegister& fd,
1648  const FPRegister& fn) {
1649  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1650  FPDataProcessing1Source(fd, fn, FSQRT);
1651 }
1652 
1653 
1654 void Assembler::frinta(const FPRegister& fd,
1655  const FPRegister& fn) {
1656  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1657  FPDataProcessing1Source(fd, fn, FRINTA);
1658 }
1659 
1660 
1661 void Assembler::frintn(const FPRegister& fd,
1662  const FPRegister& fn) {
1663  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1664  FPDataProcessing1Source(fd, fn, FRINTN);
1665 }
1666 
1667 
1668 void Assembler::frintz(const FPRegister& fd,
1669  const FPRegister& fn) {
1670  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1671  FPDataProcessing1Source(fd, fn, FRINTZ);
1672 }
1673 
1674 
1675 void Assembler::fcmp(const FPRegister& fn,
1676  const FPRegister& fm) {
1677  ASSERT(fn.SizeInBits() == fm.SizeInBits());
1678  Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1679 }
1680 
1681 
1682 void Assembler::fcmp(const FPRegister& fn,
1683  double value) {
1684  USE(value);
1685  // Although the fcmp instruction can strictly only take an immediate value of
1686  // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1687  // affect the result of the comparison.
1688  ASSERT(value == 0.0);
1689  Emit(FPType(fn) | FCMP_zero | Rn(fn));
1690 }
1691 
1692 
1693 void Assembler::fccmp(const FPRegister& fn,
1694  const FPRegister& fm,
1695  StatusFlags nzcv,
1696  Condition cond) {
1697  ASSERT(fn.SizeInBits() == fm.SizeInBits());
1698  Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1699 }
1700 
1701 
1702 void Assembler::fcsel(const FPRegister& fd,
1703  const FPRegister& fn,
1704  const FPRegister& fm,
1705  Condition cond) {
1706  ASSERT(fd.SizeInBits() == fn.SizeInBits());
1707  ASSERT(fd.SizeInBits() == fm.SizeInBits());
1708  Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1709 }
1710 
1711 
1712 void Assembler::FPConvertToInt(const Register& rd,
1713  const FPRegister& fn,
1714  FPIntegerConvertOp op) {
1715  Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1716 }
1717 
1718 
1719 void Assembler::fcvt(const FPRegister& fd,
1720  const FPRegister& fn) {
1721  if (fd.Is64Bits()) {
1722  // Convert float to double.
1723  ASSERT(fn.Is32Bits());
1724  FPDataProcessing1Source(fd, fn, FCVT_ds);
1725  } else {
1726  // Convert double to float.
1727  ASSERT(fn.Is64Bits());
1728  FPDataProcessing1Source(fd, fn, FCVT_sd);
1729  }
1730 }
1731 
1732 
1733 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
1734  FPConvertToInt(rd, fn, FCVTAU);
1735 }
1736 
1737 
1738 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
1739  FPConvertToInt(rd, fn, FCVTAS);
1740 }
1741 
1742 
1743 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
1744  FPConvertToInt(rd, fn, FCVTMU);
1745 }
1746 
1747 
1748 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
1749  FPConvertToInt(rd, fn, FCVTMS);
1750 }
1751 
1752 
1753 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
1754  FPConvertToInt(rd, fn, FCVTNU);
1755 }
1756 
1757 
1758 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
1759  FPConvertToInt(rd, fn, FCVTNS);
1760 }
1761 
1762 
1763 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
1764  FPConvertToInt(rd, fn, FCVTZU);
1765 }
1766 
1767 
1768 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
1769  FPConvertToInt(rd, fn, FCVTZS);
1770 }
1771 
1772 
1773 void Assembler::scvtf(const FPRegister& fd,
1774  const Register& rn,
1775  unsigned fbits) {
1776  if (fbits == 0) {
1777  Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
1778  } else {
1779  Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1780  Rd(fd));
1781  }
1782 }
1783 
1784 
1785 void Assembler::ucvtf(const FPRegister& fd,
1786  const Register& rn,
1787  unsigned fbits) {
1788  if (fbits == 0) {
1789  Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
1790  } else {
1791  Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1792  Rd(fd));
1793  }
1794 }
1795 
1796 
1797 // Note:
1798 // Below, a difference in case for the same letter indicates a
1799 // negated bit.
1800 // If b is 1, then B is 0.
1801 Instr Assembler::ImmFP32(float imm) {
1802  ASSERT(IsImmFP32(imm));
1803  // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
1804  uint32_t bits = float_to_rawbits(imm);
1805  // bit7: a000.0000
1806  uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1807  // bit6: 0b00.0000
1808  uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1809  // bit5_to_0: 00cd.efgh
1810  uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1811 
1812  return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1813 }
1814 
1815 
1816 Instr Assembler::ImmFP64(double imm) {
1817  ASSERT(IsImmFP64(imm));
1818  // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
1819  // 0000.0000.0000.0000.0000.0000.0000.0000
1820  uint64_t bits = double_to_rawbits(imm);
1821  // bit7: a000.0000
1822  uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1823  // bit6: 0b00.0000
1824  uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1825  // bit5_to_0: 00cd.efgh
1826  uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1827 
1828  return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1829 }
1830 
1831 
1832 // Code generation helpers.
1833 void Assembler::MoveWide(const Register& rd,
1834  uint64_t imm,
1835  int shift,
1836  MoveWideImmediateOp mov_op) {
1837  if (shift >= 0) {
1838  // Explicit shift specified.
1839  ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
1840  ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
1841  shift /= 16;
1842  } else {
1843  // Calculate a new immediate and shift combination to encode the immediate
1844  // argument.
1845  shift = 0;
1846  if ((imm & ~0xffffUL) == 0) {
1847  // Nothing to do.
1848  } else if ((imm & ~(0xffffUL << 16)) == 0) {
1849  imm >>= 16;
1850  shift = 1;
1851  } else if ((imm & ~(0xffffUL << 32)) == 0) {
1852  ASSERT(rd.Is64Bits());
1853  imm >>= 32;
1854  shift = 2;
1855  } else if ((imm & ~(0xffffUL << 48)) == 0) {
1856  ASSERT(rd.Is64Bits());
1857  imm >>= 48;
1858  shift = 3;
1859  }
1860  }
1861 
1862  ASSERT(is_uint16(imm));
1863 
1864  Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
1865  Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
1866 }
1867 
1868 
1869 void Assembler::AddSub(const Register& rd,
1870  const Register& rn,
1871  const Operand& operand,
1872  FlagsUpdate S,
1873  AddSubOp op) {
1874  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1875  ASSERT(!operand.NeedsRelocation());
1876  if (operand.IsImmediate()) {
1877  int64_t immediate = operand.immediate();
1878  ASSERT(IsImmAddSub(immediate));
1879  Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1880  Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
1881  ImmAddSub(immediate) | dest_reg | RnSP(rn));
1882  } else if (operand.IsShiftedRegister()) {
1883  ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
1884  ASSERT(operand.shift() != ROR);
1885 
1886  // For instructions of the form:
1887  // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
1888  // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
1889  // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
1890  // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
1891  // or their 64-bit register equivalents, convert the operand from shifted to
1892  // extended register mode, and emit an add/sub extended instruction.
1893  if (rn.IsSP() || rd.IsSP()) {
1894  ASSERT(!(rd.IsSP() && (S == SetFlags)));
1895  DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
1896  AddSubExtendedFixed | op);
1897  } else {
1898  DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
1899  }
1900  } else {
1901  ASSERT(operand.IsExtendedRegister());
1902  DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
1903  }
1904 }
1905 
1906 
1907 void Assembler::AddSubWithCarry(const Register& rd,
1908  const Register& rn,
1909  const Operand& operand,
1910  FlagsUpdate S,
1911  AddSubWithCarryOp op) {
1912  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1913  ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
1914  ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1915  ASSERT(!operand.NeedsRelocation());
1916  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
1917 }
1918 
1919 
1920 void Assembler::hlt(int code) {
1921  ASSERT(is_uint16(code));
1922  Emit(HLT | ImmException(code));
1923 }
1924 
1925 
1926 void Assembler::brk(int code) {
1927  ASSERT(is_uint16(code));
1928  Emit(BRK | ImmException(code));
1929 }
1930 
1931 
1932 void Assembler::debug(const char* message, uint32_t code, Instr params) {
1933 #ifdef USE_SIMULATOR
1934  // Don't generate simulator specific code if we are building a snapshot, which
1935  // might be run on real hardware.
1936  if (!Serializer::enabled()) {
1937 #ifdef DEBUG
1939 #endif
1940  // The arguments to the debug marker need to be contiguous in memory, so
1941  // make sure we don't try to emit pools.
1942  BlockPoolsScope scope(this);
1943 
1944  Label start;
1945  bind(&start);
1946 
1947  // Refer to instructions-arm64.h for a description of the marker and its
1948  // arguments.
1951  dc32(code);
1953  dc32(params);
1955  EmitStringData(message);
1957 
1958  return;
1959  }
1960  // Fall through if Serializer is enabled.
1961 #endif
1962 
1963  if (params & BREAK) {
1965  }
1966 }
1967 
1968 
1969 void Assembler::Logical(const Register& rd,
1970  const Register& rn,
1971  const Operand& operand,
1972  LogicalOp op) {
1973  ASSERT(rd.SizeInBits() == rn.SizeInBits());
1974  ASSERT(!operand.NeedsRelocation());
1975  if (operand.IsImmediate()) {
1976  int64_t immediate = operand.immediate();
1977  unsigned reg_size = rd.SizeInBits();
1978 
1979  ASSERT(immediate != 0);
1980  ASSERT(immediate != -1);
1981  ASSERT(rd.Is64Bits() || is_uint32(immediate));
1982 
1983  // If the operation is NOT, invert the operation and immediate.
1984  if ((op & NOT) == NOT) {
1985  op = static_cast<LogicalOp>(op & ~NOT);
1986  immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
1987  }
1988 
1989  unsigned n, imm_s, imm_r;
1990  if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
1991  // Immediate can be encoded in the instruction.
1992  LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
1993  } else {
1994  // This case is handled in the macro assembler.
1995  UNREACHABLE();
1996  }
1997  } else {
1998  ASSERT(operand.IsShiftedRegister());
1999  ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
2000  Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
2001  DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
2002  }
2003 }
2004 
2005 
2006 void Assembler::LogicalImmediate(const Register& rd,
2007  const Register& rn,
2008  unsigned n,
2009  unsigned imm_s,
2010  unsigned imm_r,
2011  LogicalOp op) {
2012  unsigned reg_size = rd.SizeInBits();
2013  Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
2014  Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
2015  ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2016  Rn(rn));
2017 }
2018 
2019 
2020 void Assembler::ConditionalCompare(const Register& rn,
2021  const Operand& operand,
2022  StatusFlags nzcv,
2023  Condition cond,
2024  ConditionalCompareOp op) {
2025  Instr ccmpop;
2026  ASSERT(!operand.NeedsRelocation());
2027  if (operand.IsImmediate()) {
2028  int64_t immediate = operand.immediate();
2029  ASSERT(IsImmConditionalCompare(immediate));
2030  ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
2031  } else {
2032  ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2033  ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
2034  }
2035  Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2036 }
2037 
2038 
2039 void Assembler::DataProcessing1Source(const Register& rd,
2040  const Register& rn,
2042  ASSERT(rd.SizeInBits() == rn.SizeInBits());
2043  Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2044 }
2045 
2046 
2047 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
2048  const FPRegister& fn,
2050  Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2051 }
2052 
2053 
2054 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
2055  const FPRegister& fn,
2056  const FPRegister& fm,
2058  ASSERT(fd.SizeInBits() == fn.SizeInBits());
2059  ASSERT(fd.SizeInBits() == fm.SizeInBits());
2060  Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2061 }
2062 
2063 
2064 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
2065  const FPRegister& fn,
2066  const FPRegister& fm,
2067  const FPRegister& fa,
2069  ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
2070  Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2071 }
2072 
2073 
2074 void Assembler::EmitShift(const Register& rd,
2075  const Register& rn,
2076  Shift shift,
2077  unsigned shift_amount) {
2078  switch (shift) {
2079  case LSL:
2080  lsl(rd, rn, shift_amount);
2081  break;
2082  case LSR:
2083  lsr(rd, rn, shift_amount);
2084  break;
2085  case ASR:
2086  asr(rd, rn, shift_amount);
2087  break;
2088  case ROR:
2089  ror(rd, rn, shift_amount);
2090  break;
2091  default:
2092  UNREACHABLE();
2093  }
2094 }
2095 
2096 
2097 void Assembler::EmitExtendShift(const Register& rd,
2098  const Register& rn,
2099  Extend extend,
2100  unsigned left_shift) {
2101  ASSERT(rd.SizeInBits() >= rn.SizeInBits());
2102  unsigned reg_size = rd.SizeInBits();
2103  // Use the correct size of register.
2104  Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2105  // Bits extracted are high_bit:0.
2106  unsigned high_bit = (8 << (extend & 0x3)) - 1;
2107  // Number of bits left in the result that are not introduced by the shift.
2108  unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2109 
2110  if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2111  switch (extend) {
2112  case UXTB:
2113  case UXTH:
2114  case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
2115  case SXTB:
2116  case SXTH:
2117  case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
2118  case UXTX:
2119  case SXTX: {
2120  ASSERT(rn.SizeInBits() == kXRegSizeInBits);
2121  // Nothing to extend. Just shift.
2122  lsl(rd, rn_, left_shift);
2123  break;
2124  }
2125  default: UNREACHABLE();
2126  }
2127  } else {
2128  // No need to extend as the extended bits would be shifted away.
2129  lsl(rd, rn_, left_shift);
2130  }
2131 }
2132 
2133 
2134 void Assembler::DataProcShiftedRegister(const Register& rd,
2135  const Register& rn,
2136  const Operand& operand,
2137  FlagsUpdate S,
2138  Instr op) {
2139  ASSERT(operand.IsShiftedRegister());
2140  ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2141  ASSERT(!operand.NeedsRelocation());
2142  Emit(SF(rd) | op | Flags(S) |
2143  ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2144  Rm(operand.reg()) | Rn(rn) | Rd(rd));
2145 }
2146 
2147 
2148 void Assembler::DataProcExtendedRegister(const Register& rd,
2149  const Register& rn,
2150  const Operand& operand,
2151  FlagsUpdate S,
2152  Instr op) {
2153  ASSERT(!operand.NeedsRelocation());
2154  Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2155  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
2156  ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2157  dest_reg | RnSP(rn));
2158 }
2159 
2160 
2161 bool Assembler::IsImmAddSub(int64_t immediate) {
2162  return is_uint12(immediate) ||
2163  (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2164 }
2165 
2166 void Assembler::LoadStore(const CPURegister& rt,
2167  const MemOperand& addr,
2168  LoadStoreOp op) {
2169  Instr memop = op | Rt(rt) | RnSP(addr.base());
2170  ptrdiff_t offset = addr.offset();
2171 
2172  if (addr.IsImmediateOffset()) {
2173  LSDataSize size = CalcLSDataSize(op);
2174  if (IsImmLSScaled(offset, size)) {
2175  // Use the scaled addressing mode.
2176  Emit(LoadStoreUnsignedOffsetFixed | memop |
2177  ImmLSUnsigned(offset >> size));
2178  } else if (IsImmLSUnscaled(offset)) {
2179  // Use the unscaled addressing mode.
2180  Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
2181  } else {
2182  // This case is handled in the macro assembler.
2183  UNREACHABLE();
2184  }
2185  } else if (addr.IsRegisterOffset()) {
2186  Extend ext = addr.extend();
2187  Shift shift = addr.shift();
2188  unsigned shift_amount = addr.shift_amount();
2189 
2190  // LSL is encoded in the option field as UXTX.
2191  if (shift == LSL) {
2192  ext = UXTX;
2193  }
2194 
2195  // Shifts are encoded in one bit, indicating a left shift by the memory
2196  // access size.
2197  ASSERT((shift_amount == 0) ||
2198  (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
2199  Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
2200  ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2201  } else {
2202  // Pre-index and post-index modes.
2203  ASSERT(!rt.Is(addr.base()));
2204  if (IsImmLSUnscaled(offset)) {
2205  if (addr.IsPreIndex()) {
2206  Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
2207  } else {
2208  ASSERT(addr.IsPostIndex());
2209  Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
2210  }
2211  } else {
2212  // This case is handled in the macro assembler.
2213  UNREACHABLE();
2214  }
2215  }
2216 }
2217 
2218 
2219 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
2220  return is_int9(offset);
2221 }
2222 
2223 
2224 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
2225  bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2226  return offset_is_size_multiple && is_uint12(offset >> size);
2227 }
2228 
2229 
2230 void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
2231  ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
2232  // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
2233  // constant pool. It should not be emitted.
2234  ASSERT(!rt.Is(xzr));
2235  Emit(LDR_x_lit |
2236  ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
2237  Rt(rt));
2238 }
2239 
2240 
2241 void Assembler::LoadRelocatedValue(const CPURegister& rt,
2242  const Operand& operand,
2243  LoadLiteralOp op) {
2244  int64_t imm = operand.immediate();
2245  ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
2246  RecordRelocInfo(operand.rmode(), imm);
2247  BlockConstPoolFor(1);
2248  Emit(op | ImmLLiteral(0) | Rt(rt));
2249 }
2250 
2251 
2252 // Test if a given value can be encoded in the immediate field of a logical
2253 // instruction.
2254 // If it can be encoded, the function returns true, and values pointed to by n,
2255 // imm_s and imm_r are updated with immediates encoded in the format required
2256 // by the corresponding fields in the logical instruction.
2257 // If it can not be encoded, the function returns false, and the values pointed
2258 // to by n, imm_s and imm_r are undefined.
2259 bool Assembler::IsImmLogical(uint64_t value,
2260  unsigned width,
2261  unsigned* n,
2262  unsigned* imm_s,
2263  unsigned* imm_r) {
2264  ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
2265  ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
2266 
2267  // Logical immediates are encoded using parameters n, imm_s and imm_r using
2268  // the following table:
2269  //
2270  // N imms immr size S R
2271  // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
2272  // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
2273  // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
2274  // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
2275  // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
2276  // 0 11110s xxxxxr 2 UInt(s) UInt(r)
2277  // (s bits must not be all set)
2278  //
2279  // A pattern is constructed of size bits, where the least significant S+1
2280  // bits are set. The pattern is rotated right by R, and repeated across a
2281  // 32 or 64-bit value, depending on destination register width.
2282  //
2283  // To test if an arbitary immediate can be encoded using this scheme, an
2284  // iterative algorithm is used.
2285  //
2286  // TODO(mcapewel) This code does not consider using X/W register overlap to
2287  // support 64-bit immediates where the top 32-bits are zero, and the bottom
2288  // 32-bits are an encodable logical immediate.
2289 
2290  // 1. If the value has all set or all clear bits, it can't be encoded.
2291  if ((value == 0) || (value == 0xffffffffffffffffUL) ||
2292  ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
2293  return false;
2294  }
2295 
2296  unsigned lead_zero = CountLeadingZeros(value, width);
2297  unsigned lead_one = CountLeadingZeros(~value, width);
2298  unsigned trail_zero = CountTrailingZeros(value, width);
2299  unsigned trail_one = CountTrailingZeros(~value, width);
2300  unsigned set_bits = CountSetBits(value, width);
2301 
2302  // The fixed bits in the immediate s field.
2303  // If width == 64 (X reg), start at 0xFFFFFF80.
2304  // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
2305  // widths won't be executed.
2306  int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
2307  int imm_s_mask = 0x3F;
2308 
2309  for (;;) {
2310  // 2. If the value is two bits wide, it can be encoded.
2311  if (width == 2) {
2312  *n = 0;
2313  *imm_s = 0x3C;
2314  *imm_r = (value & 3) - 1;
2315  return true;
2316  }
2317 
2318  *n = (width == 64) ? 1 : 0;
2319  *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
2320  if ((lead_zero + set_bits) == width) {
2321  *imm_r = 0;
2322  } else {
2323  *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
2324  }
2325 
2326  // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
2327  // the bit width of the value, it can be encoded.
2328  if (lead_zero + trail_zero + set_bits == width) {
2329  return true;
2330  }
2331 
2332  // 4. If the sum of leading ones, trailing ones and unset bits in the
2333  // value is equal to the bit width of the value, it can be encoded.
2334  if (lead_one + trail_one + (width - set_bits) == width) {
2335  return true;
2336  }
2337 
2338  // 5. If the most-significant half of the bitwise value is equal to the
2339  // least-significant half, return to step 2 using the least-significant
2340  // half of the value.
2341  uint64_t mask = (1UL << (width >> 1)) - 1;
2342  if ((value & mask) == ((value >> (width >> 1)) & mask)) {
2343  width >>= 1;
2344  set_bits >>= 1;
2345  imm_s_fixed >>= 1;
2346  continue;
2347  }
2348 
2349  // 6. Otherwise, the value can't be encoded.
2350  return false;
2351  }
2352 }
2353 
2354 
2355 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2356  return is_uint5(immediate);
2357 }
2358 
2359 
2360 bool Assembler::IsImmFP32(float imm) {
2361  // Valid values will have the form:
2362  // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2363  uint32_t bits = float_to_rawbits(imm);
2364  // bits[19..0] are cleared.
2365  if ((bits & 0x7ffff) != 0) {
2366  return false;
2367  }
2368 
2369  // bits[29..25] are all set or all cleared.
2370  uint32_t b_pattern = (bits >> 16) & 0x3e00;
2371  if (b_pattern != 0 && b_pattern != 0x3e00) {
2372  return false;
2373  }
2374 
2375  // bit[30] and bit[29] are opposite.
2376  if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2377  return false;
2378  }
2379 
2380  return true;
2381 }
2382 
2383 
2384 bool Assembler::IsImmFP64(double imm) {
2385  // Valid values will have the form:
2386  // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2387  // 0000.0000.0000.0000.0000.0000.0000.0000
2388  uint64_t bits = double_to_rawbits(imm);
2389  // bits[47..0] are cleared.
2390  if ((bits & 0xffffffffffffL) != 0) {
2391  return false;
2392  }
2393 
2394  // bits[61..54] are all set or all cleared.
2395  uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2396  if (b_pattern != 0 && b_pattern != 0x3fc0) {
2397  return false;
2398  }
2399 
2400  // bit[62] and bit[61] are opposite.
2401  if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2402  return false;
2403  }
2404 
2405  return true;
2406 }
2407 
2408 
2409 void Assembler::GrowBuffer() {
2410  if (!own_buffer_) FATAL("external code buffer is too small");
2411 
2412  // Compute new buffer size.
2413  CodeDesc desc; // the new buffer
2414  if (buffer_size_ < 4 * KB) {
2415  desc.buffer_size = 4 * KB;
2416  } else if (buffer_size_ < 1 * MB) {
2417  desc.buffer_size = 2 * buffer_size_;
2418  } else {
2419  desc.buffer_size = buffer_size_ + 1 * MB;
2420  }
2421  CHECK_GT(desc.buffer_size, 0); // No overflow.
2422 
2423  byte* buffer = reinterpret_cast<byte*>(buffer_);
2424 
2425  // Set up new buffer.
2426  desc.buffer = NewArray<byte>(desc.buffer_size);
2427 
2428  desc.instr_size = pc_offset();
2429  desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
2430 
2431  // Copy the data.
2432  intptr_t pc_delta = desc.buffer - buffer;
2433  intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2434  (buffer + buffer_size_);
2435  memmove(desc.buffer, buffer, desc.instr_size);
2436  memmove(reloc_info_writer.pos() + rc_delta,
2437  reloc_info_writer.pos(), desc.reloc_size);
2438 
2439  // Switch buffers.
2441  buffer_ = desc.buffer;
2442  buffer_size_ = desc.buffer_size;
2443  pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
2444  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2445  reloc_info_writer.last_pc() + pc_delta);
2446 
2447  // None of our relocation types are pc relative pointing outside the code
2448  // buffer nor pc absolute pointing inside the code buffer, so there is no need
2449  // to relocate any emitted relocation entries.
2450 
2451  // Relocate pending relocation entries.
2452  for (int i = 0; i < num_pending_reloc_info_; i++) {
2453  RelocInfo& rinfo = pending_reloc_info_[i];
2454  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2455  rinfo.rmode() != RelocInfo::POSITION);
2456  if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2457  rinfo.set_pc(rinfo.pc() + pc_delta);
2458  }
2459  }
2460 }
2461 
2462 
2463 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2464  // We do not try to reuse pool constants.
2465  RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2466  if (((rmode >= RelocInfo::JS_RETURN) &&
2467  (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2468  (rmode == RelocInfo::CONST_POOL) ||
2469  (rmode == RelocInfo::VENEER_POOL)) {
2470  // Adjust code for new modes.
2471  ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2472  || RelocInfo::IsJSReturn(rmode)
2473  || RelocInfo::IsComment(rmode)
2474  || RelocInfo::IsPosition(rmode)
2475  || RelocInfo::IsConstPool(rmode)
2476  || RelocInfo::IsVeneerPool(rmode));
2477  // These modes do not need an entry in the constant pool.
2478  } else {
2479  ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2480  if (num_pending_reloc_info_ == 0) {
2481  first_const_pool_use_ = pc_offset();
2482  }
2483  pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2484  // Make sure the constant pool is not emitted in place of the next
2485  // instruction for which we just recorded relocation info.
2486  BlockConstPoolFor(1);
2487  }
2488 
2489  if (!RelocInfo::IsNone(rmode)) {
2490  // Don't record external references unless the heap will be serialized.
2491  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2492 #ifdef DEBUG
2493  if (!Serializer::enabled()) {
2495  }
2496 #endif
2497  if (!Serializer::enabled() && !emit_debug_code()) {
2498  return;
2499  }
2500  }
2501  ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2502  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2503  RelocInfo reloc_info_with_ast_id(
2504  reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2506  reloc_info_writer.Write(&reloc_info_with_ast_id);
2507  } else {
2508  reloc_info_writer.Write(&rinfo);
2509  }
2510  }
2511 }
2512 
2513 
2514 void Assembler::BlockConstPoolFor(int instructions) {
2515  int pc_limit = pc_offset() + instructions * kInstructionSize;
2516  if (no_const_pool_before_ < pc_limit) {
2517  // If there are some pending entries, the constant pool cannot be blocked
2518  // further than first_const_pool_use_ + kMaxDistToConstPool
2519  ASSERT((num_pending_reloc_info_ == 0) ||
2520  (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
2521  no_const_pool_before_ = pc_limit;
2522  }
2523 
2524  if (next_constant_pool_check_ < no_const_pool_before_) {
2525  next_constant_pool_check_ = no_const_pool_before_;
2526  }
2527 }
2528 
2529 
2530 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2531  // Some short sequence of instruction mustn't be broken up by constant pool
2532  // emission, such sequences are protected by calls to BlockConstPoolFor and
2533  // BlockConstPoolScope.
2534  if (is_const_pool_blocked()) {
2535  // Something is wrong if emission is forced and blocked at the same time.
2536  ASSERT(!force_emit);
2537  return;
2538  }
2539 
2540  // There is nothing to do if there are no pending constant pool entries.
2541  if (num_pending_reloc_info_ == 0) {
2542  // Calculate the offset of the next check.
2543  next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
2544  return;
2545  }
2546 
2547  // We emit a constant pool when:
2548  // * requested to do so by parameter force_emit (e.g. after each function).
2549  // * the distance to the first instruction accessing the constant pool is
2550  // kAvgDistToConstPool or more.
2551  // * no jump is required and the distance to the first instruction accessing
2552  // the constant pool is at least kMaxDistToPConstool / 2.
2553  ASSERT(first_const_pool_use_ >= 0);
2554  int dist = pc_offset() - first_const_pool_use_;
2555  if (!force_emit && dist < kAvgDistToConstPool &&
2556  (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
2557  return;
2558  }
2559 
2560  int jump_instr = require_jump ? kInstructionSize : 0;
2561  int size_pool_marker = kInstructionSize;
2562  int size_pool_guard = kInstructionSize;
2563  int pool_size = jump_instr + size_pool_marker + size_pool_guard +
2564  num_pending_reloc_info_ * kPointerSize;
2565  int needed_space = pool_size + kGap;
2566 
2567  // Emit veneers for branches that would go out of range during emission of the
2568  // constant pool.
2569  CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
2570 
2571  Label size_check;
2572  bind(&size_check);
2573 
2574  // Check that the code buffer is large enough before emitting the constant
2575  // pool (include the jump over the pool, the constant pool marker, the
2576  // constant pool guard, and the gap to the relocation information).
2577  while (buffer_space() <= needed_space) {
2578  GrowBuffer();
2579  }
2580 
2581  {
2582  // Block recursive calls to CheckConstPool and protect from veneer pools.
2583  BlockPoolsScope block_pools(this);
2584  RecordComment("[ Constant Pool");
2585  RecordConstPool(pool_size);
2586 
2587  // Emit jump over constant pool if necessary.
2588  Label after_pool;
2589  if (require_jump) {
2590  b(&after_pool);
2591  }
2592 
2593  // Emit a constant pool header. The header has two goals:
2594  // 1) Encode the size of the constant pool, for use by the disassembler.
2595  // 2) Terminate the program, to try to prevent execution from accidentally
2596  // flowing into the constant pool.
2597  // The header is therefore made of two arm64 instructions:
2598  // ldr xzr, #<size of the constant pool in 32-bit words>
2599  // blr xzr
2600  // If executed the code will likely segfault and lr will point to the
2601  // beginning of the constant pool.
2602  // TODO(all): currently each relocated constant is 64 bits, consider adding
2603  // support for 32-bit entries.
2604  ConstantPoolMarker(2 * num_pending_reloc_info_);
2606 
2607  // Emit constant pool entries.
2608  for (int i = 0; i < num_pending_reloc_info_; i++) {
2609  RelocInfo& rinfo = pending_reloc_info_[i];
2610  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2611  rinfo.rmode() != RelocInfo::POSITION &&
2612  rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2613  rinfo.rmode() != RelocInfo::CONST_POOL &&
2614  rinfo.rmode() != RelocInfo::VENEER_POOL);
2615 
2616  Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
2617  // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2618  ASSERT(instr->IsLdrLiteral() &&
2619  instr->ImmLLiteral() == 0);
2620 
2621  instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
2622  dc64(rinfo.data());
2623  }
2624 
2625  num_pending_reloc_info_ = 0;
2626  first_const_pool_use_ = -1;
2627 
2628  RecordComment("]");
2629 
2630  if (after_pool.is_linked()) {
2631  bind(&after_pool);
2632  }
2633  }
2634 
2635  // Since a constant pool was just emitted, move the check offset forward by
2636  // the standard interval.
2637  next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
2638 
2639  ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
2640  static_cast<unsigned>(pool_size));
2641 }
2642 
2643 
2644 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2645  // Account for the branch around the veneers and the guard.
2646  int protection_offset = 2 * kInstructionSize;
2647  return pc_offset() > max_reachable_pc - margin - protection_offset -
2648  static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2649 }
2650 
2651 
2652 void Assembler::RecordVeneerPool(int location_offset, int size) {
2653 #ifdef ENABLE_DEBUGGER_SUPPORT
2654  RelocInfo rinfo(buffer_ + location_offset,
2655  RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
2656  NULL);
2657  reloc_info_writer.Write(&rinfo);
2658 #endif
2659 }
2660 
2661 
2662 void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
2663  BlockPoolsScope scope(this);
2664  RecordComment("[ Veneers");
2665 
2666  // The exact size of the veneer pool must be recorded (see the comment at the
2667  // declaration site of RecordConstPool()), but computing the number of
2668  // veneers that will be generated is not obvious. So instead we remember the
2669  // current position and will record the size after the pool has been
2670  // generated.
2671  Label size_check;
2672  bind(&size_check);
2673  int veneer_pool_relocinfo_loc = pc_offset();
2674 
2675  Label end;
2676  if (need_protection) {
2677  b(&end);
2678  }
2679 
2680  EmitVeneersGuard();
2681 
2682  Label veneer_size_check;
2683 
2684  std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2685 
2686  it = unresolved_branches_.begin();
2687  while (it != unresolved_branches_.end()) {
2688  if (force_emit || ShouldEmitVeneer(it->first, margin)) {
2689  Instruction* branch = InstructionAt(it->second.pc_offset_);
2690  Label* label = it->second.label_;
2691 
2692 #ifdef DEBUG
2693  bind(&veneer_size_check);
2694 #endif
2695  // Patch the branch to point to the current position, and emit a branch
2696  // to the label.
2697  Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
2698  RemoveBranchFromLabelLinkChain(branch, label, veneer);
2699  branch->SetImmPCOffsetTarget(veneer);
2700  b(label);
2701 #ifdef DEBUG
2702  ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
2703  static_cast<uint64_t>(kMaxVeneerCodeSize));
2704  veneer_size_check.Unuse();
2705 #endif
2706 
2707  it_to_delete = it++;
2708  unresolved_branches_.erase(it_to_delete);
2709  } else {
2710  ++it;
2711  }
2712  }
2713 
2714  // Record the veneer pool size.
2715  int pool_size = SizeOfCodeGeneratedSince(&size_check);
2716  RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
2717 
2718  if (unresolved_branches_.empty()) {
2720  } else {
2723  }
2724 
2725  bind(&end);
2726 
2727  RecordComment("]");
2728 }
2729 
2730 
2731 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
2732  int margin) {
2733  // There is nothing to do if there are no pending veneer pool entries.
2734  if (unresolved_branches_.empty()) {
2736  return;
2737  }
2738 
2740 
2741  // Some short sequence of instruction mustn't be broken up by veneer pool
2742  // emission, such sequences are protected by calls to BlockVeneerPoolFor and
2743  // BlockVeneerPoolScope.
2744  if (is_veneer_pool_blocked()) {
2745  ASSERT(!force_emit);
2746  return;
2747  }
2748 
2749  if (!require_jump) {
2750  // Prefer emitting veneers protected by an existing instruction.
2751  margin *= kVeneerNoProtectionFactor;
2752  }
2753  if (force_emit || ShouldEmitVeneers(margin)) {
2754  EmitVeneers(force_emit, require_jump, margin);
2755  } else {
2758  }
2759 }
2760 
2761 
2762 void Assembler::RecordComment(const char* msg) {
2763  if (FLAG_code_comments) {
2764  CheckBuffer();
2765  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2766  }
2767 }
2768 
2769 
2770 int Assembler::buffer_space() const {
2771  return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
2772 }
2773 
2774 
2776  positions_recorder()->WriteRecordedPositions();
2777  CheckBuffer();
2778  RecordRelocInfo(RelocInfo::JS_RETURN);
2779 }
2780 
2781 
2783  positions_recorder()->WriteRecordedPositions();
2784  CheckBuffer();
2785  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2786 }
2787 
2788 
2789 void Assembler::RecordConstPool(int size) {
2790  // We only need this for debugger support, to correctly compute offsets in the
2791  // code.
2792 #ifdef ENABLE_DEBUGGER_SUPPORT
2793  RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
2794 #endif
2795 }
2796 
2797 
2798 MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
2799  // No out-of-line constant pool support.
2800  UNREACHABLE();
2801  return NULL;
2802 }
2803 
2804 
2805 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2806  // No out-of-line constant pool support.
2807  UNREACHABLE();
2808 }
2809 
2810 
2811 } } // namespace v8::internal
2812 
2813 #endif // V8_TARGET_ARCH_ARM64
void RecordVeneerPool(int location_offset, int size)
byte * Address
Definition: globals.h:186
static Instr ImmPCRelAddress(int imm21)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void cmp(Register src1, const Operand &src2, Condition cond=al)
void cbnz(const Register &rt, Label *label)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void lsl(const Register &rd, const Register &rn, unsigned shift)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
static Instr ImmSystemRegister(int imm15)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
const unsigned kDebugMessageOffset
static Instr Cond(Condition cond)
static Instr FPScale(unsigned scale)
void ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
#define FATAL(msg)
Definition: checks.h:48
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
void mrs(Register dst, SRegister s, Condition cond=al)
void sbfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr ImmCmpBranch(int imm19)
static Instr ImmTestBranch(int imm14)
static bool IsImmLSUnscaled(ptrdiff_t offset)
void negs(const Register &rd, const Operand &operand)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const int kMaxVeneerCodeSize
const int KB
Definition: globals.h:245
static Instr ShiftDP(Shift shift)
#define CHECK_GT(a, b)
Definition: checks.h:260
const int64_t kWRegMask
void bfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
static HeapObject * cast(Object *obj)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
int CountLeadingZeros(uint64_t value, int width)
int SizeOfCodeGeneratedSince(Label *label)
void rev16(const Register &rd, const Register &rn)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
void adcs(const Register &rd, const Register &rn, const Operand &operand)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
const unsigned kLiteralEntrySizeLog2
const unsigned kXRegSizeInBits
static Instr ExtendMode(Extend extend)
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
void csetm(const Register &rd, Condition cond)
void bics(const Register &rd, const Register &rn, const Operand &operand)
static int NumAllocatableRegisters()
kSerializedDataOffset Object
Definition: objects-inl.h:5016
void b(int branch_offset, Condition cond=al)
static Instr ImmShiftLS(unsigned shift_amount)
static Register Create(unsigned code, unsigned size)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
Definition: flags.cc:665
static Instr ImmLS(int imm9)
void cmn(Register src1, const Operand &src2, Condition cond=al)
uint32_t RegList
Definition: frames.h:41
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static Instr ImmBarrierType(int imm2)
void clz(Register dst, Register src, Condition cond=al)
static bool IsConstantPoolAt(Instruction *instr)
const int kMaxInt
Definition: globals.h:248
static bool enabled()
Definition: serialize.h:485
void fcvtns(const Register &rd, const FPRegister &fn)
void orn(const Register &rd, const Register &rn, const Operand &operand)
void frintz(const FPRegister &fd, const FPRegister &fn)
void RecordConstPool(int size)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static Instr RdSP(Register rd)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void rbit(const Register &rd, const Register &rn)
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
const RegList kJSCallerSaved
Definition: frames-arm.h:47
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
CPURegister PopHighestIndex()
void ands(const Register &rd, const Register &rn, const Operand &operand)
void frintn(const FPRegister &fd, const FPRegister &fn)
static Instr Flags(FlagsUpdate S)
void eon(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmFP32(float imm)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
bool ShouldEmitVeneer(int max_reachable_pc, int margin=kVeneerDistanceMargin)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
#define CHECK(condition)
Definition: checks.h:75
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void fneg(const FPRegister &fd, const FPRegister &fn)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cinv(const Register &rd, const Register &rn, Condition cond)
void RemoveBranchFromLabelLinkChain(Instruction *branch, Label *label, Instruction *label_veneer=NULL)
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
CPURegister PopLowestIndex()
void strb(Register src, const MemOperand &dst, Condition cond=al)
static Instr Rt2(CPURegister rt2)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void udiv(const Register &rd, const Register &rn, const Register &rm)
const Instr kImmExceptionIsPrintf
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void asr(const Register &rd, const Register &rn, unsigned shift)
void rev32(const Register &rd, const Register &rn)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
int CountSetBits(uint64_t value, int width)
uint8_t byte
Definition: globals.h:185
const unsigned kWRegSizeInBits
void ngcs(const Register &rd, const Operand &operand)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmCondCmp(unsigned imm)
void ret(const Register &xn=lr)
bool ShouldEmitVeneers(int margin=kVeneerDistanceMargin)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr ImmUncondBranch(int imm26)
static bool IsImmFP64(double imm)
const int kSFOffset
void cneg(const Register &rd, const Register &rn, Condition cond)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
#define UNREACHABLE()
Definition: checks.h:52
void neg(const Register &rd, const Operand &operand)
static Instr ImmMoveWide(uint64_t imm)
static Instr ImmException(int imm16)
void FPConvertToInt(const Register &rd, const FPRegister &fn, FPIntegerConvertOp op)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static Instr ImmTestBranchBit(unsigned bit_pos)
void cbz(const Register &rt, Label *label)
static Instr SF(Register rd)
static Instr ImmCondBranch(int imm19)
void sbcs(const Register &rd, const Register &rn, const Operand &operand)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
void smulh(const Register &rd, const Register &rn, const Register &rm)
Condition InvertCondition(Condition cond)
void br(const Register &xn)
void EmitVeneers(bool force_emit, bool need_protection, int margin=kVeneerDistanceMargin)
const unsigned kPrintfLength
Instruction * ImmPCOffsetTarget()
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void rev(const Register &rd, const Register &rn)
void GetCode(CodeDesc *desc)
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static bool IsImmAddSub(int64_t immediate)
const int kPointerSize
Definition: globals.h:268
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
const int kBitfieldNOffset
static const int kVeneerDistanceMargin
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
static bool IsImmConditionalCompare(int64_t immediate)
void dc64(uint64_t data)
static void TooLateToEnableNow()
Definition: serialize.h:484
void fmov(FPRegister fd, double imm)
const unsigned kDebugParamsOffset
static Instr ImmExtendShift(unsigned left_shift)
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
Instruction * InstructionAt(int offset) const
bool is_veneer_pool_blocked() const
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Register FromAllocationIndex(int index)
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void mneg(const Register &rd, const Register &rn, const Register &rm)
const Register pc
static Instr Rd(CPURegister rd)
void blr(const Register &xn)
void lslv(const Register &rd, const Register &rn, const Register &rm)
void ror(const Register &rd, const Register &rs, unsigned shift)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static Instr Rn(CPURegister rn)
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
static const int kVeneerNoProtectionFactor
void adds(const Register &rd, const Register &rn, const Operand &operand)
MaybeObject * AllocateConstantPool(Heap *heap)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
void fcvtau(const Register &rd, const FPRegister &fn)
void CheckConstPool(bool force_emit, bool require_jump)
const int kBitsPerByte
Definition: globals.h:287
bool IsPowerOf2(T x)
Definition: utils.h:51
void ldrsw(const Register &rt, const MemOperand &src)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void fcvtnu(const Register &rd, const FPRegister &fn)
void fcvtzu(const Register &rd, const FPRegister &fn)
void ngc(const Register &rd, const Operand &operand)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cinc(const Register &rd, const Register &rn, Condition cond)
static Instr Ra(CPURegister ra)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
bool NeedsRelocation() const
void ldr(Register dst, const MemOperand &src, Condition cond=al)
Definition: v8.h:2107
unsigned RegisterSizeInBits() const
const Register lr
void ConstantPoolMarker(uint32_t size)
static bool IsImmFP32(float imm)
void dc32(uint32_t data)
void cset(const Register &rd, Condition cond)
static Instr Rt(CPURegister rt)
static Instr ImmLLiteral(int imm19)
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
int unresolved_branches_first_limit() const
const Instr kImmExceptionIsUnreachable
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static Instr Rm(CPURegister rm)
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
void dmb(BarrierDomain domain, BarrierType type)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
bool emit_debug_code() const
Definition: assembler.h:65
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void hint(SystemHint code)
const unsigned kDebugCodeOffset
void fsqrt(const FPRegister &fd, const FPRegister &fn)
#define UNIMPLEMENTED()
Definition: checks.h:50
void fcvtas(const Register &rd, const FPRegister &fn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
static Instr ImmBarrierDomain(int imm2)
bool is_const_pool_blocked() const
void EmitStringData(const char *string)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Instr ImmLSPair(int imm7, LSDataSize size)
std::multimap< int, FarBranchInfo > unresolved_branches_
void dsb(BarrierDomain domain, BarrierType type)
CPURegister::RegisterType type() const
static Instr ImmHint(int imm7)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Remove(const CPURegList &other)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
void fcvtmu(const Register &rd, const FPRegister &fn)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void USE(T)
Definition: globals.h:341
static Instr ImmR(unsigned immr, unsigned reg_size)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void cls(const Register &rd, const Register &rn)
PositionsRecorder * positions_recorder()
static Instr ImmS(unsigned imms, unsigned reg_size)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
static Instr ImmDPShift(unsigned amount)
void ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
HeapObject * obj
static Instr RnSP(Register rn)
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
static Instr ImmFP64(double imm)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
void rorv(const Register &rd, const Register &rn, const Register &rm)
void fcvt(const FPRegister &fd, const FPRegister &fn)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void adr(const Register &rd, Label *label)
void DeleteArray(T *array)
Definition: allocation.h:91
static Instr BitN(unsigned bitn, unsigned reg_size)
LSDataSize CalcLSPairDataSize(LoadStorePairOp op)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
int CountTrailingZeros(uint64_t value, int width)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr FPType(FPRegister fd)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
static CPURegList GetSafepointSavedRegisters()
void frinta(const FPRegister &fd, const FPRegister &fn)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
const Instr kImmExceptionIsDebug
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
void fcvtms(const Register &rd, const FPRegister &fn)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static int ConstantPoolSizeAt(Instruction *instr)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void tst(Register src1, const Operand &src2, Condition cond=al)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
void subs(const Register &rd, const Register &rn, const Operand &operand)
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const int MB
Definition: globals.h:246