v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #include "v8.h"
38 
39 #if V8_TARGET_ARCH_ARM
40 
41 #include "arm/assembler-arm-inl.h"
42 #include "macro-assembler.h"
43 #include "serialize.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 #ifdef DEBUG
49 bool CpuFeatures::initialized_ = false;
50 #endif
51 unsigned CpuFeatures::supported_ = 0;
52 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
53 unsigned CpuFeatures::cross_compile_ = 0;
54 unsigned CpuFeatures::cache_line_size_ = 64;
55 
56 
57 ExternalReference ExternalReference::cpu_features() {
58  ASSERT(CpuFeatures::initialized_);
59  return ExternalReference(&CpuFeatures::supported_);
60 }
61 
62 
63 // Get the CPU features enabled by the build. For cross compilation the
64 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
65 // can be defined to enable ARMv7 and VFPv3 instructions when building the
66 // snapshot.
67 static unsigned CpuFeaturesImpliedByCompiler() {
68  unsigned answer = 0;
69 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
70  if (FLAG_enable_armv7) {
71  answer |= 1u << ARMv7;
72  }
73 #endif // CAN_USE_ARMV7_INSTRUCTIONS
74 #ifdef CAN_USE_VFP3_INSTRUCTIONS
75  if (FLAG_enable_vfp3) {
76  answer |= 1u << VFP3 | 1u << ARMv7;
77  }
78 #endif // CAN_USE_VFP3_INSTRUCTIONS
79 #ifdef CAN_USE_VFP32DREGS
80  if (FLAG_enable_32dregs) {
81  answer |= 1u << VFP32DREGS;
82  }
83 #endif // CAN_USE_VFP32DREGS
84  if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
85  answer |= 1u << UNALIGNED_ACCESSES;
86  }
87 
88  return answer;
89 }
90 
91 
92 const char* DwVfpRegister::AllocationIndexToString(int index) {
93  ASSERT(index >= 0 && index < NumAllocatableRegisters());
94  ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
96  if (index >= kDoubleRegZero.code())
97  index += kNumReservedRegisters;
98 
99  return VFPRegisters::Name(index, true);
100 }
101 
102 
103 void CpuFeatures::Probe() {
104  uint64_t standard_features = static_cast<unsigned>(
105  OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
106  ASSERT(supported_ == 0 || supported_ == standard_features);
107 #ifdef DEBUG
108  initialized_ = true;
109 #endif
110 
111  // Get the features implied by the OS and the compiler settings. This is the
112  // minimal set of features which is also alowed for generated code in the
113  // snapshot.
114  supported_ |= standard_features;
115 
116  if (Serializer::enabled()) {
117  // No probing for features if we might serialize (generate snapshot).
118  printf(" ");
119  PrintFeatures();
120  return;
121  }
122 
123 #ifndef __arm__
124  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
125  // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
126  if (FLAG_enable_vfp3) {
127  supported_ |=
128  static_cast<uint64_t>(1) << VFP3 |
129  static_cast<uint64_t>(1) << ARMv7;
130  }
131  if (FLAG_enable_neon) {
132  supported_ |= 1u << NEON;
133  }
134  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
135  if (FLAG_enable_armv7) {
136  supported_ |= static_cast<uint64_t>(1) << ARMv7;
137  }
138 
139  if (FLAG_enable_sudiv) {
140  supported_ |= static_cast<uint64_t>(1) << SUDIV;
141  }
142 
143  if (FLAG_enable_movw_movt) {
144  supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
145  }
146 
147  if (FLAG_enable_32dregs) {
148  supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
149  }
150 
151  if (FLAG_enable_unaligned_accesses) {
152  supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
153  }
154 
155 #else // __arm__
156  // Probe for additional features not already known to be available.
157  CPU cpu;
158  if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
159  // This implementation also sets the VFP flags if runtime
160  // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
161  // 0406B, page A1-6.
162  found_by_runtime_probing_only_ |=
163  static_cast<uint64_t>(1) << VFP3 |
164  static_cast<uint64_t>(1) << ARMv7;
165  }
166 
167  if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
168  found_by_runtime_probing_only_ |= 1u << NEON;
169  }
170 
171  if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
172  found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
173  }
174 
175  if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
176  found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
177  }
178 
179  if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
180  && cpu.architecture() >= 7) {
181  found_by_runtime_probing_only_ |=
182  static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
183  }
184 
185  // Use movw/movt for QUALCOMM ARMv7 cores.
186  if (cpu.implementer() == CPU::QUALCOMM &&
187  cpu.architecture() >= 7 &&
188  FLAG_enable_movw_movt) {
189  found_by_runtime_probing_only_ |=
190  static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
191  }
192 
193  // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
194  if (cpu.implementer() == CPU::ARM &&
195  (cpu.part() == CPU::ARM_CORTEX_A5 ||
196  cpu.part() == CPU::ARM_CORTEX_A9)) {
197  cache_line_size_ = 32;
198  }
199 
200  if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
201  found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
202  }
203 
204  supported_ |= found_by_runtime_probing_only_;
205 #endif
206 
207  // Assert that VFP3 implies ARMv7.
209 }
210 
211 
213  const char* arm_arch = NULL;
214  const char* arm_test = "";
215  const char* arm_fpu = "";
216  const char* arm_thumb = "";
217  const char* arm_float_abi = NULL;
218 
219 #if defined CAN_USE_ARMV7_INSTRUCTIONS
220  arm_arch = "arm v7";
221 #else
222  arm_arch = "arm v6";
223 #endif
224 
225 #ifdef __arm__
226 
227 # ifdef ARM_TEST
228  arm_test = " test";
229 # endif
230 # if defined __ARM_NEON__
231  arm_fpu = " neon";
232 # elif defined CAN_USE_VFP3_INSTRUCTIONS
233  arm_fpu = " vfp3";
234 # else
235  arm_fpu = " vfp2";
236 # endif
237 # if (defined __thumb__) || (defined __thumb2__)
238  arm_thumb = " thumb";
239 # endif
240  arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
241 
242 #else // __arm__
243 
244  arm_test = " simulator";
245 # if defined CAN_USE_VFP3_INSTRUCTIONS
246 # if defined CAN_USE_VFP32DREGS
247  arm_fpu = " vfp3";
248 # else
249  arm_fpu = " vfp3-d16";
250 # endif
251 # else
252  arm_fpu = " vfp2";
253 # endif
254 # if USE_EABI_HARDFLOAT == 1
255  arm_float_abi = "hard";
256 # else
257  arm_float_abi = "softfp";
258 # endif
259 
260 #endif // __arm__
261 
262  printf("target%s %s%s%s %s\n",
263  arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
264 }
265 
266 
268  printf(
269  "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
270  "MOVW_MOVT_IMMEDIATE_LOADS=%d",
278 #ifdef __arm__
279  bool eabi_hardfloat = OS::ArmUsingHardFloat();
280 #elif USE_EABI_HARDFLOAT
281  bool eabi_hardfloat = true;
282 #else
283  bool eabi_hardfloat = false;
284 #endif
285  printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
286 }
287 
288 
289 // -----------------------------------------------------------------------------
290 // Implementation of RelocInfo
291 
292 const int RelocInfo::kApplyMask = 0;
293 
294 
295 bool RelocInfo::IsCodedSpecially() {
296  // The deserializer needs to know whether a pointer is specially coded.  Being
297  // specially coded on ARM means that it is a movw/movt instruction, or is an
298  // out of line constant pool entry.  These only occur if
299  // FLAG_enable_ool_constant_pool is true.
300  return FLAG_enable_ool_constant_pool;
301 }
302 
303 
304 bool RelocInfo::IsInConstantPool() {
305  if (FLAG_enable_ool_constant_pool) {
307  } else {
309  }
310 }
311 
312 
313 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
314  // Patch the code at the current address with the supplied instructions.
315  Instr* pc = reinterpret_cast<Instr*>(pc_);
316  Instr* instr = reinterpret_cast<Instr*>(instructions);
317  for (int i = 0; i < instruction_count; i++) {
318  *(pc + i) = *(instr + i);
319  }
320 
321  // Indicate that code has changed.
322  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
323 }
324 
325 
326 // Patch the code at the current PC with a call to the target address.
327 // Additional guard instructions can be added if required.
328 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
329  // Patch the code at the current address with a call to the target.
330  UNIMPLEMENTED();
331 }
332 
333 
334 // -----------------------------------------------------------------------------
335 // Implementation of Operand and MemOperand
336 // See assembler-arm-inl.h for inlined constructors
337 
338 Operand::Operand(Handle<Object> handle) {
339  AllowDeferredHandleDereference using_raw_address;
340  rm_ = no_reg;
341  // Verify all Objects referred by code are NOT in new space.
342  Object* obj = *handle;
343  if (obj->IsHeapObject()) {
344  ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
345  imm32_ = reinterpret_cast<intptr_t>(handle.location());
346  rmode_ = RelocInfo::EMBEDDED_OBJECT;
347  } else {
348  // no relocation needed
349  imm32_ = reinterpret_cast<intptr_t>(obj);
350  rmode_ = RelocInfo::NONE32;
351  }
352 }
353 
354 
355 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
356  ASSERT(is_uint5(shift_imm));
357 
358  rm_ = rm;
359  rs_ = no_reg;
360  shift_op_ = shift_op;
361  shift_imm_ = shift_imm & 31;
362 
363  if ((shift_op == ROR) && (shift_imm == 0)) {
364  // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
365  // RRX as ROR #0 (See below).
366  shift_op = LSL;
367  } else if (shift_op == RRX) {
368  // encoded as ROR with shift_imm == 0
369  ASSERT(shift_imm == 0);
370  shift_op_ = ROR;
371  shift_imm_ = 0;
372  }
373 }
374 
375 
376 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
377  ASSERT(shift_op != RRX);
378  rm_ = rm;
379  rs_ = no_reg;
380  shift_op_ = shift_op;
381  rs_ = rs;
382 }
383 
384 
385 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
386  rn_ = rn;
387  rm_ = no_reg;
388  offset_ = offset;
389  am_ = am;
390 }
391 
392 
393 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
394  rn_ = rn;
395  rm_ = rm;
396  shift_op_ = LSL;
397  shift_imm_ = 0;
398  am_ = am;
399 }
400 
401 
402 MemOperand::MemOperand(Register rn, Register rm,
403  ShiftOp shift_op, int shift_imm, AddrMode am) {
404  ASSERT(is_uint5(shift_imm));
405  rn_ = rn;
406  rm_ = rm;
407  shift_op_ = shift_op;
408  shift_imm_ = shift_imm & 31;
409  am_ = am;
410 }
411 
412 
413 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
414  ASSERT((am == Offset) || (am == PostIndex));
415  rn_ = rn;
416  rm_ = (am == Offset) ? pc : sp;
417  SetAlignment(align);
418 }
419 
420 
421 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
422  rn_ = rn;
423  rm_ = rm;
424  SetAlignment(align);
425 }
426 
427 
428 void NeonMemOperand::SetAlignment(int align) {
429  switch (align) {
430  case 0:
431  align_ = 0;
432  break;
433  case 64:
434  align_ = 1;
435  break;
436  case 128:
437  align_ = 2;
438  break;
439  case 256:
440  align_ = 3;
441  break;
442  default:
443  UNREACHABLE();
444  align_ = 0;
445  break;
446  }
447 }
448 
449 
450 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
451  base_ = base;
452  switch (registers_count) {
453  case 1:
454  type_ = nlt_1;
455  break;
456  case 2:
457  type_ = nlt_2;
458  break;
459  case 3:
460  type_ = nlt_3;
461  break;
462  case 4:
463  type_ = nlt_4;
464  break;
465  default:
466  UNREACHABLE();
467  type_ = nlt_1;
468  break;
469  }
470 }
471 
472 
473 // -----------------------------------------------------------------------------
474 // Specific instructions, constants, and masks.
475 
476 // add(sp, sp, 4) instruction (aka Pop())
477 const Instr kPopInstruction =
478  al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
480 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
481 // register r is not encoded.
482 const Instr kPushRegPattern =
483  al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
484 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
485 // register r is not encoded.
486 const Instr kPopRegPattern =
487  al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
488 // mov lr, pc
490 // ldr rd, [pc, #offset]
491 const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
492 const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
493 // ldr rd, [pp, #offset]
494 const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
495 const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
496 // vldr dd, [pc, #offset]
497 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
498 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
499 // vldr dd, [pp, #offset]
500 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
501 const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
502 // blxcc rm
503 const Instr kBlxRegMask =
504  15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
505 const Instr kBlxRegPattern =
506  B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
507 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
508 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
509 const Instr kMovMvnPattern = 0xd * B21;
510 const Instr kMovMvnFlip = B22;
511 const Instr kMovLeaveCCMask = 0xdff * B16;
512 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
513 const Instr kMovwMask = 0xff * B20;
514 const Instr kMovwPattern = 0x30 * B20;
515 const Instr kMovwLeaveCCFlip = 0x5 * B21;
516 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
517 const Instr kCmpCmnPattern = 0x15 * B20;
518 const Instr kCmpCmnFlip = B21;
519 const Instr kAddSubFlip = 0x6 * B21;
520 const Instr kAndBicFlip = 0xe * B21;
521 
522 // A mask for the Rd register for push, pop, ldr, str instructions.
524  al | B26 | L | Offset | kRegister_fp_Code * B16;
531 const Instr kLdrStrInstrTypeMask = 0xffff0000;
532 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
533 const Instr kLdrStrOffsetMask = 0x00000fff;
534 
535 
536 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
537  : AssemblerBase(isolate, buffer, buffer_size),
538  recorded_ast_id_(TypeFeedbackId::None()),
539  constant_pool_builder_(),
540  positions_recorder_(this) {
541  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
542  num_pending_32_bit_reloc_info_ = 0;
543  num_pending_64_bit_reloc_info_ = 0;
544  next_buffer_check_ = 0;
545  const_pool_blocked_nesting_ = 0;
546  no_const_pool_before_ = 0;
547  first_const_pool_32_use_ = -1;
548  first_const_pool_64_use_ = -1;
549  last_bound_pos_ = 0;
550  constant_pool_available_ = !FLAG_enable_ool_constant_pool;
551  constant_pool_full_ = false;
553 }
554 
555 
557  ASSERT(const_pool_blocked_nesting_ == 0);
558 }
559 
560 
561 void Assembler::GetCode(CodeDesc* desc) {
562  if (!FLAG_enable_ool_constant_pool) {
563  // Emit constant pool if necessary.
564  CheckConstPool(true, false);
565  ASSERT(num_pending_32_bit_reloc_info_ == 0);
566  ASSERT(num_pending_64_bit_reloc_info_ == 0);
567  }
568  // Set up code descriptor.
569  desc->buffer = buffer_;
570  desc->buffer_size = buffer_size_;
571  desc->instr_size = pc_offset();
572  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
573  desc->origin = this;
574 }
575 
576 
577 void Assembler::Align(int m) {
578  ASSERT(m >= 4 && IsPowerOf2(m));
579  while ((pc_offset() & (m - 1)) != 0) {
580  nop();
581  }
582 }
583 
584 
586  // Preferred alignment of jump targets on some ARM chips.
587  Align(8);
588 }
589 
590 
592  return Instruction::ConditionField(instr);
593 }
594 
595 
596 bool Assembler::IsBranch(Instr instr) {
597  return (instr & (B27 | B25)) == (B27 | B25);
598 }
599 
600 
602  ASSERT(IsBranch(instr));
603  // Take the jump offset in the lower 24 bits, sign extend it and multiply it
604  // with 4 to get the offset in bytes.
605  return ((instr & kImm24Mask) << 8) >> 6;
606 }
607 
608 
610  return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
611 }
612 
613 
615  return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
616 }
617 
618 
621  bool positive = (instr & B23) == B23;
622  int offset = instr & kOff12Mask; // Zero extended offset.
623  return positive ? offset : -offset;
624 }
625 
626 
629  bool positive = (instr & B23) == B23;
630  int offset = instr & kOff8Mask; // Zero extended offset.
631  offset <<= 2;
632  return positive ? offset : -offset;
633 }
634 
635 
638  bool positive = offset >= 0;
639  if (!positive) offset = -offset;
640  ASSERT(is_uint12(offset));
641  // Set bit indicating whether the offset should be added.
642  instr = (instr & ~B23) | (positive ? B23 : 0);
643  // Set the actual offset.
644  return (instr & ~kOff12Mask) | offset;
645 }
646 
647 
650  ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
651  bool positive = offset >= 0;
652  if (!positive) offset = -offset;
653  ASSERT(is_uint10(offset));
654  // Set bit indicating whether the offset should be added.
655  instr = (instr & ~B23) | (positive ? B23 : 0);
656  // Set the actual offset. Its bottom 2 bits are zero.
657  return (instr & ~kOff8Mask) | (offset >> 2);
658 }
659 
660 
662  return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
663 }
664 
665 
668  bool positive = offset >= 0;
669  if (!positive) offset = -offset;
670  ASSERT(is_uint12(offset));
671  // Set bit indicating whether the offset should be added.
672  instr = (instr & ~B23) | (positive ? B23 : 0);
673  // Set the actual offset.
674  return (instr & ~kOff12Mask) | offset;
675 }
676 
677 
679  return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
680 }
681 
682 
685  ASSERT(offset >= 0);
686  ASSERT(is_uint12(offset));
687  // Set the offset.
688  return (instr & ~kOff12Mask) | offset;
689 }
690 
691 
692 Register Assembler::GetRd(Instr instr) {
693  Register reg;
694  reg.code_ = Instruction::RdValue(instr);
695  return reg;
696 }
697 
698 
699 Register Assembler::GetRn(Instr instr) {
700  Register reg;
701  reg.code_ = Instruction::RnValue(instr);
702  return reg;
703 }
704 
705 
706 Register Assembler::GetRm(Instr instr) {
707  Register reg;
708  reg.code_ = Instruction::RmValue(instr);
709  return reg;
710 }
711 
712 
713 bool Assembler::IsPush(Instr instr) {
714  return ((instr & ~kRdMask) == kPushRegPattern);
715 }
716 
717 
718 bool Assembler::IsPop(Instr instr) {
719  return ((instr & ~kRdMask) == kPopRegPattern);
720 }
721 
722 
724  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
725 }
726 
727 
729  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
730 }
731 
732 
734  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
735 }
736 
737 
739  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
740 }
741 
742 
744  // Check the instruction is indeed a
745  // ldr<cond> <Rd>, [pc +/- offset_12].
746  return (instr & kLdrPCMask) == kLdrPCPattern;
747 }
748 
749 
751  // Check the instruction is indeed a
752  // ldr<cond> <Rd>, [pp +/- offset_12].
753  return (instr & kLdrPpMask) == kLdrPpPattern;
754 }
755 
756 
758  // Check the instruction is indeed a
759  // vldr<cond> <Dd>, [pc +/- offset_10].
760  return (instr & kVldrDPCMask) == kVldrDPCPattern;
761 }
762 
763 
765  // Check the instruction is indeed a
766  // vldr<cond> <Dd>, [pp +/- offset_10].
767  return (instr & kVldrDPpMask) == kVldrDPpPattern;
768 }
769 
770 
771 bool Assembler::IsTstImmediate(Instr instr) {
772  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
773  (I | TST | S);
774 }
775 
776 
777 bool Assembler::IsCmpRegister(Instr instr) {
778  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
779  (CMP | S);
780 }
781 
782 
783 bool Assembler::IsCmpImmediate(Instr instr) {
784  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
785  (I | CMP | S);
786 }
787 
788 
790  ASSERT(IsCmpImmediate(instr));
791  return GetRn(instr);
792 }
793 
794 
796  ASSERT(IsCmpImmediate(instr));
797  return instr & kOff12Mask;
798 }
799 
800 
801 // Labels refer to positions in the (to be) generated code.
802 // There are bound, linked, and unused labels.
803 //
804 // Bound labels refer to known positions in the already
805 // generated code. pos() is the position the label refers to.
806 //
807 // Linked labels refer to unknown positions in the code
808 // to be generated; pos() is the position of the last
809 // instruction using the label.
810 //
811 // The linked labels form a link chain by making the branch offset
812 // in the instruction steam to point to the previous branch
813 // instruction using the same label.
814 //
815 // The link chain is terminated by a branch offset pointing to the
816 // same position.
817 
818 
819 int Assembler::target_at(int pos) {
820  Instr instr = instr_at(pos);
821  if (is_uint24(instr)) {
822  // Emitted link to a label, not part of a branch.
823  return instr;
824  }
825  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
826  int imm26 = ((instr & kImm24Mask) << 8) >> 6;
828  ((instr & B24) != 0)) {
829  // blx uses bit 24 to encode bit 2 of imm26
830  imm26 += 2;
831  }
832  return pos + kPcLoadDelta + imm26;
833 }
834 
835 
836 void Assembler::target_at_put(int pos, int target_pos) {
837  Instr instr = instr_at(pos);
838  if (is_uint24(instr)) {
839  ASSERT(target_pos == pos || target_pos >= 0);
840  // Emitted link to a label, not part of a branch.
841  // Load the position of the label relative to the generated code object
842  // pointer in a register.
843 
844  // Here are the instructions we need to emit:
845  // For ARMv7: target24 => target16_1:target16_0
846  // movw dst, #target16_0
847  // movt dst, #target16_1
848  // For ARMv6: target24 => target8_2:target8_1:target8_0
849  // mov dst, #target8_0
850  // orr dst, dst, #target8_1 << 8
851  // orr dst, dst, #target8_2 << 16
852 
853  // We extract the destination register from the emitted nop instruction.
854  Register dst = Register::from_code(
856  ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
857  uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
858  ASSERT(is_uint24(target24));
859  if (is_uint8(target24)) {
860  // If the target fits in a byte then only patch with a mov
861  // instruction.
862  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
863  1,
865  patcher.masm()->mov(dst, Operand(target24));
866  } else {
867  uint16_t target16_0 = target24 & kImm16Mask;
868  uint16_t target16_1 = target24 >> 16;
870  // Patch with movw/movt.
871  if (target16_1 == 0) {
872  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
873  1,
875  patcher.masm()->movw(dst, target16_0);
876  } else {
877  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
878  2,
880  patcher.masm()->movw(dst, target16_0);
881  patcher.masm()->movt(dst, target16_1);
882  }
883  } else {
884  // Patch with a sequence of mov/orr/orr instructions.
885  uint8_t target8_0 = target16_0 & kImm8Mask;
886  uint8_t target8_1 = target16_0 >> 8;
887  uint8_t target8_2 = target16_1 & kImm8Mask;
888  if (target8_2 == 0) {
889  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
890  2,
892  patcher.masm()->mov(dst, Operand(target8_0));
893  patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
894  } else {
895  CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
896  3,
898  patcher.masm()->mov(dst, Operand(target8_0));
899  patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
900  patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
901  }
902  }
903  }
904  return;
905  }
906  int imm26 = target_pos - (pos + kPcLoadDelta);
907  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
909  // blx uses bit 24 to encode bit 2 of imm26
910  ASSERT((imm26 & 1) == 0);
911  instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
912  } else {
913  ASSERT((imm26 & 3) == 0);
914  instr &= ~kImm24Mask;
915  }
916  int imm24 = imm26 >> 2;
917  ASSERT(is_int24(imm24));
918  instr_at_put(pos, instr | (imm24 & kImm24Mask));
919 }
920 
921 
922 void Assembler::print(Label* L) {
923  if (L->is_unused()) {
924  PrintF("unused label\n");
925  } else if (L->is_bound()) {
926  PrintF("bound label to %d\n", L->pos());
927  } else if (L->is_linked()) {
928  Label l = *L;
929  PrintF("unbound label");
930  while (l.is_linked()) {
931  PrintF("@ %d ", l.pos());
932  Instr instr = instr_at(l.pos());
933  if ((instr & ~kImm24Mask) == 0) {
934  PrintF("value\n");
935  } else {
936  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
938  const char* b;
939  const char* c;
940  if (cond == kSpecialCondition) {
941  b = "blx";
942  c = "";
943  } else {
944  if ((instr & B24) != 0)
945  b = "bl";
946  else
947  b = "b";
948 
949  switch (cond) {
950  case eq: c = "eq"; break;
951  case ne: c = "ne"; break;
952  case hs: c = "hs"; break;
953  case lo: c = "lo"; break;
954  case mi: c = "mi"; break;
955  case pl: c = "pl"; break;
956  case vs: c = "vs"; break;
957  case vc: c = "vc"; break;
958  case hi: c = "hi"; break;
959  case ls: c = "ls"; break;
960  case ge: c = "ge"; break;
961  case lt: c = "lt"; break;
962  case gt: c = "gt"; break;
963  case le: c = "le"; break;
964  case al: c = ""; break;
965  default:
966  c = "";
967  UNREACHABLE();
968  }
969  }
970  PrintF("%s%s\n", b, c);
971  }
972  next(&l);
973  }
974  } else {
975  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
976  }
977 }
978 
979 
980 void Assembler::bind_to(Label* L, int pos) {
981  ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
982  while (L->is_linked()) {
983  int fixup_pos = L->pos();
984  next(L); // call next before overwriting link with target at fixup_pos
985  target_at_put(fixup_pos, pos);
986  }
987  L->bind_to(pos);
988 
989  // Keep track of the last bound label so we don't eliminate any instructions
990  // before a bound label.
991  if (pos > last_bound_pos_)
992  last_bound_pos_ = pos;
993 }
994 
995 
996 void Assembler::bind(Label* L) {
997  ASSERT(!L->is_bound()); // label can only be bound once
998  bind_to(L, pc_offset());
999 }
1000 
1001 
1002 void Assembler::next(Label* L) {
1003  ASSERT(L->is_linked());
1004  int link = target_at(L->pos());
1005  if (link == L->pos()) {
1006  // Branch target points to the same instuction. This is the end of the link
1007  // chain.
1008  L->Unuse();
1009  } else {
1010  ASSERT(link >= 0);
1011  L->link_to(link);
1012  }
1013 }
1014 
1015 
1016 // Low-level code emission routines depending on the addressing mode.
1017 // If this returns true then you have to use the rotate_imm and immed_8
1018 // that it returns, because it may have already changed the instruction
1019 // to match them!
1020 static bool fits_shifter(uint32_t imm32,
1021  uint32_t* rotate_imm,
1022  uint32_t* immed_8,
1023  Instr* instr) {
1024  // imm32 must be unsigned.
1025  for (int rot = 0; rot < 16; rot++) {
1026  uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
1027  if ((imm8 <= 0xff)) {
1028  *rotate_imm = rot;
1029  *immed_8 = imm8;
1030  return true;
1031  }
1032  }
1033  // If the opcode is one with a complementary version and the complementary
1034  // immediate fits, change the opcode.
1035  if (instr != NULL) {
1036  if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1037  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1038  *instr ^= kMovMvnFlip;
1039  return true;
1040  } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1042  if (imm32 < 0x10000) {
1043  *instr ^= kMovwLeaveCCFlip;
1044  *instr |= EncodeMovwImmediate(imm32);
1045  *rotate_imm = *immed_8 = 0; // Not used for movw.
1046  return true;
1047  }
1048  }
1049  }
1050  } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1051  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1052  *instr ^= kCmpCmnFlip;
1053  return true;
1054  }
1055  } else {
1056  Instr alu_insn = (*instr & kALUMask);
1057  if (alu_insn == ADD ||
1058  alu_insn == SUB) {
1059  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1060  *instr ^= kAddSubFlip;
1061  return true;
1062  }
1063  } else if (alu_insn == AND ||
1064  alu_insn == BIC) {
1065  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1066  *instr ^= kAndBicFlip;
1067  return true;
1068  }
1069  }
1070  }
1071  }
1072  return false;
1073 }
1074 
1075 
1076 // We have to use the temporary register for things that can be relocated even
1077 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1078 // space. There is no guarantee that the relocated location can be similarly
1079 // encoded.
1080 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1081  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1082 #ifdef DEBUG
1083  if (!Serializer::enabled()) {
1085  }
1086 #endif // def DEBUG
1087  if (assembler != NULL && assembler->predictable_code_size()) return true;
1088  return Serializer::enabled();
1089  } else if (RelocInfo::IsNone(rmode_)) {
1090  return false;
1091  }
1092  return true;
1093 }
1094 
1095 
1096 static bool use_mov_immediate_load(const Operand& x,
1097  const Assembler* assembler) {
1098  if (assembler != NULL && !assembler->can_use_constant_pool()) {
1099  // If there is no constant pool available, we must use an mov immediate.
1100  // TODO(rmcilroy): enable ARMv6 support.
1102  return true;
1104  (assembler == NULL || !assembler->predictable_code_size())) {
1105  // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1106  return true;
1107  } else if (x.must_output_reloc_info(assembler)) {
1108  // Prefer constant pool if data is likely to be patched.
1109  return false;
1110  } else {
1111  // Otherwise, use immediate load if movw / movt is available.
1113  }
1114 }
1115 
1116 
1117 bool Operand::is_single_instruction(const Assembler* assembler,
1118  Instr instr) const {
1119  if (rm_.is_valid()) return true;
1120  uint32_t dummy1, dummy2;
1121  if (must_output_reloc_info(assembler) ||
1122  !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1123  // The immediate operand cannot be encoded as a shifter operand, or use of
1124  // constant pool is required. For a mov instruction not setting the
1125  // condition code additional instruction conventions can be used.
1126  if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1127  return !use_mov_immediate_load(*this, assembler);
1128  } else {
1129  // If this is not a mov or mvn instruction there will always an additional
1130  // instructions - either mov or ldr. The mov might actually be two
1131  // instructions mov or movw followed by movt so including the actual
1132  // instruction two or three instructions will be generated.
1133  return false;
1134  }
1135  } else {
1136  // No use of constant pool and the immediate operand can be encoded as a
1137  // shifter operand.
1138  return true;
1139  }
1140 }
1141 
1142 
1143 void Assembler::move_32_bit_immediate(Register rd,
1144  const Operand& x,
1145  Condition cond) {
1146  RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1147  if (x.must_output_reloc_info(this)) {
1148  RecordRelocInfo(rinfo);
1149  }
1150 
1151  if (use_mov_immediate_load(x, this)) {
1152  Register target = rd.code() == pc.code() ? ip : rd;
1153  // TODO(rmcilroy): add ARMv6 support for immediate loads.
1155  if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
1156  // Make sure the movw/movt doesn't get separated.
1157  BlockConstPoolFor(2);
1158  }
1159  emit(cond | 0x30*B20 | target.code()*B12 |
1160  EncodeMovwImmediate(x.imm32_ & 0xffff));
1161  movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
1162  if (target.code() != rd.code()) {
1163  mov(rd, target, LeaveCC, cond);
1164  }
1165  } else {
1167  ConstantPoolAddEntry(rinfo);
1168  ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1169  }
1170 }
1171 
1172 
1173 void Assembler::addrmod1(Instr instr,
1174  Register rn,
1175  Register rd,
1176  const Operand& x) {
1177  CheckBuffer();
1178  ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1179  if (!x.rm_.is_valid()) {
1180  // Immediate.
1181  uint32_t rotate_imm;
1182  uint32_t immed_8;
1183  if (x.must_output_reloc_info(this) ||
1184  !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1185  // The immediate operand cannot be encoded as a shifter operand, so load
1186  // it first to register ip and change the original instruction to use ip.
1187  // However, if the original instruction is a 'mov rd, x' (not setting the
1188  // condition code), then replace it with a 'ldr rd, [pc]'.
1189  CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
1190  Condition cond = Instruction::ConditionField(instr);
1191  if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1192  move_32_bit_immediate(rd, x, cond);
1193  } else {
1194  mov(ip, x, LeaveCC, cond);
1195  addrmod1(instr, rn, rd, Operand(ip));
1196  }
1197  return;
1198  }
1199  instr |= I | rotate_imm*B8 | immed_8;
1200  } else if (!x.rs_.is_valid()) {
1201  // Immediate shift.
1202  instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1203  } else {
1204  // Register shift.
1205  ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1206  instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1207  }
1208  emit(instr | rn.code()*B16 | rd.code()*B12);
1209  if (rn.is(pc) || x.rm_.is(pc)) {
1210  // Block constant pool emission for one instruction after reading pc.
1211  BlockConstPoolFor(1);
1212  }
1213 }
1214 
1215 
1216 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1217  ASSERT((instr & ~(kCondMask | B | L)) == B26);
1218  int am = x.am_;
1219  if (!x.rm_.is_valid()) {
1220  // Immediate offset.
1221  int offset_12 = x.offset_;
1222  if (offset_12 < 0) {
1223  offset_12 = -offset_12;
1224  am ^= U;
1225  }
1226  if (!is_uint12(offset_12)) {
1227  // Immediate offset cannot be encoded, load it first to register ip
1228  // rn (and rd in a load) should never be ip, or will be trashed.
1229  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1230  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1231  addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1232  return;
1233  }
1234  ASSERT(offset_12 >= 0); // no masking needed
1235  instr |= offset_12;
1236  } else {
1237  // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1238  // register offset the constructors make sure than both shift_imm_
1239  // and shift_op_ are initialized.
1240  ASSERT(!x.rm_.is(pc));
1241  instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1242  }
1243  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1244  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1245 }
1246 
1247 
1248 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1249  ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1250  ASSERT(x.rn_.is_valid());
1251  int am = x.am_;
1252  if (!x.rm_.is_valid()) {
1253  // Immediate offset.
1254  int offset_8 = x.offset_;
1255  if (offset_8 < 0) {
1256  offset_8 = -offset_8;
1257  am ^= U;
1258  }
1259  if (!is_uint8(offset_8)) {
1260  // Immediate offset cannot be encoded, load it first to register ip
1261  // rn (and rd in a load) should never be ip, or will be trashed.
1262  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1263  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1264  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1265  return;
1266  }
1267  ASSERT(offset_8 >= 0); // no masking needed
1268  instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1269  } else if (x.shift_imm_ != 0) {
1270  // Scaled register offset not supported, load index first
1271  // rn (and rd in a load) should never be ip, or will be trashed.
1272  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1273  mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1275  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1276  return;
1277  } else {
1278  // Register offset.
1279  ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1280  instr |= x.rm_.code();
1281  }
1282  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1283  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1284 }
1285 
1286 
1287 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1288  ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
1289  ASSERT(rl != 0);
1290  ASSERT(!rn.is(pc));
1291  emit(instr | rn.code()*B16 | rl);
1292 }
1293 
1294 
1295 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1296  // Unindexed addressing is not encoded by this function.
1297  ASSERT_EQ((B27 | B26),
1298  (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1299  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1300  int am = x.am_;
1301  int offset_8 = x.offset_;
1302  ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
1303  offset_8 >>= 2;
1304  if (offset_8 < 0) {
1305  offset_8 = -offset_8;
1306  am ^= U;
1307  }
1308  ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1309  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1310 
1311  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1312  if ((am & P) == 0)
1313  am |= W;
1314 
1315  ASSERT(offset_8 >= 0); // no masking needed
1316  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1317 }
1318 
1319 
1320 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1321  int target_pos;
1322  if (L->is_bound()) {
1323  target_pos = L->pos();
1324  } else {
1325  if (L->is_linked()) {
1326  // Point to previous instruction that uses the link.
1327  target_pos = L->pos();
1328  } else {
1329  // First entry of the link chain points to itself.
1330  target_pos = pc_offset();
1331  }
1332  L->link_to(pc_offset());
1333  }
1334 
1335  // Block the emission of the constant pool, since the branch instruction must
1336  // be emitted at the pc offset recorded by the label.
1337  BlockConstPoolFor(1);
1338  return target_pos - (pc_offset() + kPcLoadDelta);
1339 }
1340 
1341 
1342 // Branch instructions.
1343 void Assembler::b(int branch_offset, Condition cond) {
1344  ASSERT((branch_offset & 3) == 0);
1345  int imm24 = branch_offset >> 2;
1346  ASSERT(is_int24(imm24));
1347  emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1348 
1349  if (cond == al) {
1350  // Dead code is a good location to emit the constant pool.
1351  CheckConstPool(false, false);
1352  }
1353 }
1354 
1355 
1356 void Assembler::bl(int branch_offset, Condition cond) {
1357  positions_recorder()->WriteRecordedPositions();
1358  ASSERT((branch_offset & 3) == 0);
1359  int imm24 = branch_offset >> 2;
1360  ASSERT(is_int24(imm24));
1361  emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1362 }
1363 
1364 
1365 void Assembler::blx(int branch_offset) { // v5 and above
1366  positions_recorder()->WriteRecordedPositions();
1367  ASSERT((branch_offset & 1) == 0);
1368  int h = ((branch_offset & 2) >> 1)*B24;
1369  int imm24 = branch_offset >> 2;
1370  ASSERT(is_int24(imm24));
1371  emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1372 }
1373 
1374 
1375 void Assembler::blx(Register target, Condition cond) { // v5 and above
1376  positions_recorder()->WriteRecordedPositions();
1377  ASSERT(!target.is(pc));
1378  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1379 }
1380 
1381 
1382 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1383  positions_recorder()->WriteRecordedPositions();
1384  ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1385  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1386 }
1387 
1388 
1389 // Data-processing instructions.
1390 
1391 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1392  SBit s, Condition cond) {
1393  addrmod1(cond | AND | s, src1, dst, src2);
1394 }
1395 
1396 
1397 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1398  SBit s, Condition cond) {
1399  addrmod1(cond | EOR | s, src1, dst, src2);
1400 }
1401 
1402 
1403 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1404  SBit s, Condition cond) {
1405  addrmod1(cond | SUB | s, src1, dst, src2);
1406 }
1407 
1408 
1409 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1410  SBit s, Condition cond) {
1411  addrmod1(cond | RSB | s, src1, dst, src2);
1412 }
1413 
1414 
1415 void Assembler::add(Register dst, Register src1, const Operand& src2,
1416  SBit s, Condition cond) {
1417  addrmod1(cond | ADD | s, src1, dst, src2);
1418 }
1419 
1420 
1421 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1422  SBit s, Condition cond) {
1423  addrmod1(cond | ADC | s, src1, dst, src2);
1424 }
1425 
1426 
1427 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1428  SBit s, Condition cond) {
1429  addrmod1(cond | SBC | s, src1, dst, src2);
1430 }
1431 
1432 
1433 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1434  SBit s, Condition cond) {
1435  addrmod1(cond | RSC | s, src1, dst, src2);
1436 }
1437 
1438 
1439 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1440  addrmod1(cond | TST | S, src1, r0, src2);
1441 }
1442 
1443 
1444 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1445  addrmod1(cond | TEQ | S, src1, r0, src2);
1446 }
1447 
1448 
1449 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1450  addrmod1(cond | CMP | S, src1, r0, src2);
1451 }
1452 
1453 
1455  Register src, int raw_immediate, Condition cond) {
1456  ASSERT(is_uint12(raw_immediate));
1457  emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1458 }
1459 
1460 
1461 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1462  addrmod1(cond | CMN | S, src1, r0, src2);
1463 }
1464 
1465 
1466 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1467  SBit s, Condition cond) {
1468  addrmod1(cond | ORR | s, src1, dst, src2);
1469 }
1470 
1471 
1472 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1473  if (dst.is(pc)) {
1474  positions_recorder()->WriteRecordedPositions();
1475  }
1476  // Don't allow nop instructions in the form mov rn, rn to be generated using
1477  // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1478  // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1479  ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1480  addrmod1(cond | MOV | s, r0, dst, src);
1481 }
1482 
1483 
1484 void Assembler::mov_label_offset(Register dst, Label* label) {
1485  if (label->is_bound()) {
1486  mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1487  } else {
1488  // Emit the link to the label in the code stream followed by extra nop
1489  // instructions.
1490  // If the label is not linked, then start a new link chain by linking it to
1491  // itself, emitting pc_offset().
1492  int link = label->is_linked() ? label->pos() : pc_offset();
1493  label->link_to(pc_offset());
1494 
1495  // When the label is bound, these instructions will be patched with a
1496  // sequence of movw/movt or mov/orr/orr instructions. They will load the
1497  // destination register with the position of the label from the beginning
1498  // of the code.
1499  //
1500  // The link will be extracted from the first instruction and the destination
1501  // register from the second.
1502  // For ARMv7:
1503  // link
1504  // mov dst, dst
1505  // For ARMv6:
1506  // link
1507  // mov dst, dst
1508  // mov dst, dst
1509  //
1510  // When the label gets bound: target_at extracts the link and target_at_put
1511  // patches the instructions.
1512  ASSERT(is_uint24(link));
1513  BlockConstPoolScope block_const_pool(this);
1514  emit(link);
1515  nop(dst.code());
1517  nop(dst.code());
1518  }
1519  }
1520 }
1521 
1522 
1523 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1524  ASSERT(immediate < 0x10000);
1525  // May use movw if supported, but on unsupported platforms will try to use
1526  // equivalent rotated immed_8 value and other tricks before falling back to a
1527  // constant pool load.
1528  mov(reg, Operand(immediate), LeaveCC, cond);
1529 }
1530 
1531 
1532 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1533  emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1534 }
1535 
1536 
1537 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1538  SBit s, Condition cond) {
1539  addrmod1(cond | BIC | s, src1, dst, src2);
1540 }
1541 
1542 
1543 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1544  addrmod1(cond | MVN | s, r0, dst, src);
1545 }
1546 
1547 
1548 // Multiply instructions.
1549 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1550  SBit s, Condition cond) {
1551  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1552  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1553  src2.code()*B8 | B7 | B4 | src1.code());
1554 }
1555 
1556 
1557 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1558  Condition cond) {
1559  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1560  emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1561  src2.code()*B8 | B7 | B4 | src1.code());
1562 }
1563 
1564 
1565 void Assembler::sdiv(Register dst, Register src1, Register src2,
1566  Condition cond) {
1567  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1569  emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1570  src2.code()*B8 | B4 | src1.code());
1571 }
1572 
1573 
1574 void Assembler::mul(Register dst, Register src1, Register src2,
1575  SBit s, Condition cond) {
1576  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1577  // dst goes in bits 16-19 for this instruction!
1578  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1579 }
1580 
1581 
1582 void Assembler::smlal(Register dstL,
1583  Register dstH,
1584  Register src1,
1585  Register src2,
1586  SBit s,
1587  Condition cond) {
1588  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1589  ASSERT(!dstL.is(dstH));
1590  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1591  src2.code()*B8 | B7 | B4 | src1.code());
1592 }
1593 
1594 
1595 void Assembler::smull(Register dstL,
1596  Register dstH,
1597  Register src1,
1598  Register src2,
1599  SBit s,
1600  Condition cond) {
1601  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1602  ASSERT(!dstL.is(dstH));
1603  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1604  src2.code()*B8 | B7 | B4 | src1.code());
1605 }
1606 
1607 
1608 void Assembler::umlal(Register dstL,
1609  Register dstH,
1610  Register src1,
1611  Register src2,
1612  SBit s,
1613  Condition cond) {
1614  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1615  ASSERT(!dstL.is(dstH));
1616  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1617  src2.code()*B8 | B7 | B4 | src1.code());
1618 }
1619 
1620 
1621 void Assembler::umull(Register dstL,
1622  Register dstH,
1623  Register src1,
1624  Register src2,
1625  SBit s,
1626  Condition cond) {
1627  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1628  ASSERT(!dstL.is(dstH));
1629  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1630  src2.code()*B8 | B7 | B4 | src1.code());
1631 }
1632 
1633 
1634 // Miscellaneous arithmetic instructions.
1635 void Assembler::clz(Register dst, Register src, Condition cond) {
1636  // v5 and above.
1637  ASSERT(!dst.is(pc) && !src.is(pc));
1638  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1639  15*B8 | CLZ | src.code());
1640 }
1641 
1642 
1643 // Saturating instructions.
1644 
1645 // Unsigned saturate.
1646 void Assembler::usat(Register dst,
1647  int satpos,
1648  const Operand& src,
1649  Condition cond) {
1650  // v6 and above.
1652  ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1653  ASSERT((satpos >= 0) && (satpos <= 31));
1654  ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1655  ASSERT(src.rs_.is(no_reg));
1656 
1657  int sh = 0;
1658  if (src.shift_op_ == ASR) {
1659  sh = 1;
1660  }
1661 
1662  emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1663  src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1664 }
1665 
1666 
1667 // Bitfield manipulation instructions.
1668 
1669 // Unsigned bit field extract.
1670 // Extracts #width adjacent bits from position #lsb in a register, and
1671 // writes them to the low bits of a destination register.
1672 // ubfx dst, src, #lsb, #width
1673 void Assembler::ubfx(Register dst,
1674  Register src,
1675  int lsb,
1676  int width,
1677  Condition cond) {
1678  // v7 and above.
1680  ASSERT(!dst.is(pc) && !src.is(pc));
1681  ASSERT((lsb >= 0) && (lsb <= 31));
1682  ASSERT((width >= 1) && (width <= (32 - lsb)));
1683  emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1684  lsb*B7 | B6 | B4 | src.code());
1685 }
1686 
1687 
1688 // Signed bit field extract.
1689 // Extracts #width adjacent bits from position #lsb in a register, and
1690 // writes them to the low bits of a destination register. The extracted
1691 // value is sign extended to fill the destination register.
1692 // sbfx dst, src, #lsb, #width
1693 void Assembler::sbfx(Register dst,
1694  Register src,
1695  int lsb,
1696  int width,
1697  Condition cond) {
1698  // v7 and above.
1700  ASSERT(!dst.is(pc) && !src.is(pc));
1701  ASSERT((lsb >= 0) && (lsb <= 31));
1702  ASSERT((width >= 1) && (width <= (32 - lsb)));
1703  emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1704  lsb*B7 | B6 | B4 | src.code());
1705 }
1706 
1707 
1708 // Bit field clear.
1709 // Sets #width adjacent bits at position #lsb in the destination register
1710 // to zero, preserving the value of the other bits.
1711 // bfc dst, #lsb, #width
1712 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1713  // v7 and above.
1715  ASSERT(!dst.is(pc));
1716  ASSERT((lsb >= 0) && (lsb <= 31));
1717  ASSERT((width >= 1) && (width <= (32 - lsb)));
1718  int msb = lsb + width - 1;
1719  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1720 }
1721 
1722 
1723 // Bit field insert.
1724 // Inserts #width adjacent bits from the low bits of the source register
1725 // into position #lsb of the destination register.
1726 // bfi dst, src, #lsb, #width
1727 void Assembler::bfi(Register dst,
1728  Register src,
1729  int lsb,
1730  int width,
1731  Condition cond) {
1732  // v7 and above.
1734  ASSERT(!dst.is(pc) && !src.is(pc));
1735  ASSERT((lsb >= 0) && (lsb <= 31));
1736  ASSERT((width >= 1) && (width <= (32 - lsb)));
1737  int msb = lsb + width - 1;
1738  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1739  src.code());
1740 }
1741 
1742 
1743 void Assembler::pkhbt(Register dst,
1744  Register src1,
1745  const Operand& src2,
1746  Condition cond ) {
1747  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1748  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1749  // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1750  ASSERT(!dst.is(pc));
1751  ASSERT(!src1.is(pc));
1752  ASSERT(!src2.rm().is(pc));
1753  ASSERT(!src2.rm().is(no_reg));
1754  ASSERT(src2.rs().is(no_reg));
1755  ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1756  ASSERT(src2.shift_op() == LSL);
1757  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1758  src2.shift_imm_*B7 | B4 | src2.rm().code());
1759 }
1760 
1761 
1762 void Assembler::pkhtb(Register dst,
1763  Register src1,
1764  const Operand& src2,
1765  Condition cond) {
1766  // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1767  // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1768  // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1769  ASSERT(!dst.is(pc));
1770  ASSERT(!src1.is(pc));
1771  ASSERT(!src2.rm().is(pc));
1772  ASSERT(!src2.rm().is(no_reg));
1773  ASSERT(src2.rs().is(no_reg));
1774  ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1775  ASSERT(src2.shift_op() == ASR);
1776  int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1777  emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1778  asr*B7 | B6 | B4 | src2.rm().code());
1779 }
1780 
1781 
1782 void Assembler::uxtb(Register dst,
1783  const Operand& src,
1784  Condition cond) {
1785  // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1786  // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1787  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1788  ASSERT(!dst.is(pc));
1789  ASSERT(!src.rm().is(pc));
1790  ASSERT(!src.rm().is(no_reg));
1791  ASSERT(src.rs().is(no_reg));
1792  ASSERT((src.shift_imm_ == 0) ||
1793  (src.shift_imm_ == 8) ||
1794  (src.shift_imm_ == 16) ||
1795  (src.shift_imm_ == 24));
1796  // Operand maps ROR #0 to LSL #0.
1797  ASSERT((src.shift_op() == ROR) ||
1798  ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1799  emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
1800  ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1801 }
1802 
1803 
1804 void Assembler::uxtab(Register dst,
1805  Register src1,
1806  const Operand& src2,
1807  Condition cond) {
1808  // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1809  // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1810  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1811  ASSERT(!dst.is(pc));
1812  ASSERT(!src1.is(pc));
1813  ASSERT(!src2.rm().is(pc));
1814  ASSERT(!src2.rm().is(no_reg));
1815  ASSERT(src2.rs().is(no_reg));
1816  ASSERT((src2.shift_imm_ == 0) ||
1817  (src2.shift_imm_ == 8) ||
1818  (src2.shift_imm_ == 16) ||
1819  (src2.shift_imm_ == 24));
1820  // Operand maps ROR #0 to LSL #0.
1821  ASSERT((src2.shift_op() == ROR) ||
1822  ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
1823  emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
1824  ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
1825 }
1826 
1827 
1828 void Assembler::uxtb16(Register dst,
1829  const Operand& src,
1830  Condition cond) {
1831  // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1832  // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1833  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1834  ASSERT(!dst.is(pc));
1835  ASSERT(!src.rm().is(pc));
1836  ASSERT(!src.rm().is(no_reg));
1837  ASSERT(src.rs().is(no_reg));
1838  ASSERT((src.shift_imm_ == 0) ||
1839  (src.shift_imm_ == 8) ||
1840  (src.shift_imm_ == 16) ||
1841  (src.shift_imm_ == 24));
1842  // Operand maps ROR #0 to LSL #0.
1843  ASSERT((src.shift_op() == ROR) ||
1844  ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
1845  emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
1846  ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
1847 }
1848 
1849 
1850 // Status register access instructions.
1851 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1852  ASSERT(!dst.is(pc));
1853  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1854 }
1855 
1856 
1857 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1858  Condition cond) {
1859  ASSERT(fields >= B16 && fields < B20); // at least one field set
1860  Instr instr;
1861  if (!src.rm_.is_valid()) {
1862  // Immediate.
1863  uint32_t rotate_imm;
1864  uint32_t immed_8;
1865  if (src.must_output_reloc_info(this) ||
1866  !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1867  // Immediate operand cannot be encoded, load it first to register ip.
1868  move_32_bit_immediate(ip, src);
1869  msr(fields, Operand(ip), cond);
1870  return;
1871  }
1872  instr = I | rotate_imm*B8 | immed_8;
1873  } else {
1874  ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1875  instr = src.rm_.code();
1876  }
1877  emit(cond | instr | B24 | B21 | fields | 15*B12);
1878 }
1879 
1880 
1881 // Load/Store instructions.
1882 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1883  if (dst.is(pc)) {
1884  positions_recorder()->WriteRecordedPositions();
1885  }
1886  addrmod2(cond | B26 | L, dst, src);
1887 }
1888 
1889 
1890 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1891  addrmod2(cond | B26, src, dst);
1892 }
1893 
1894 
1895 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1896  addrmod2(cond | B26 | B | L, dst, src);
1897 }
1898 
1899 
1900 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1901  addrmod2(cond | B26 | B, src, dst);
1902 }
1903 
1904 
1905 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1906  addrmod3(cond | L | B7 | H | B4, dst, src);
1907 }
1908 
1909 
1910 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1911  addrmod3(cond | B7 | H | B4, src, dst);
1912 }
1913 
1914 
1915 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1916  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1917 }
1918 
1919 
1920 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1921  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1922 }
1923 
1924 
1925 void Assembler::ldrd(Register dst1, Register dst2,
1926  const MemOperand& src, Condition cond) {
1928  ASSERT(src.rm().is(no_reg));
1929  ASSERT(!dst1.is(lr)); // r14.
1930  ASSERT_EQ(0, dst1.code() % 2);
1931  ASSERT_EQ(dst1.code() + 1, dst2.code());
1932  addrmod3(cond | B7 | B6 | B4, dst1, src);
1933 }
1934 
1935 
1936 void Assembler::strd(Register src1, Register src2,
1937  const MemOperand& dst, Condition cond) {
1938  ASSERT(dst.rm().is(no_reg));
1939  ASSERT(!src1.is(lr)); // r14.
1940  ASSERT_EQ(0, src1.code() % 2);
1941  ASSERT_EQ(src1.code() + 1, src2.code());
1943  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1944 }
1945 
1946 
1947 // Preload instructions.
1948 void Assembler::pld(const MemOperand& address) {
1949  // Instruction details available in ARM DDI 0406C.b, A8.8.128.
1950  // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
1951  // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
1952  ASSERT(address.rm().is(no_reg));
1953  ASSERT(address.am() == Offset);
1954  int U = B23;
1955  int offset = address.offset();
1956  if (offset < 0) {
1957  offset = -offset;
1958  U = 0;
1959  }
1960  ASSERT(offset < 4096);
1961  emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
1962  0xf*B12 | offset);
1963 }
1964 
1965 
1966 // Load/Store multiple instructions.
1968  Register base,
1969  RegList dst,
1970  Condition cond) {
1971  // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1972  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1973 
1974  addrmod4(cond | B27 | am | L, base, dst);
1975 
1976  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1977  if (cond == al && (dst & pc.bit()) != 0) {
1978  // There is a slight chance that the ldm instruction was actually a call,
1979  // in which case it would be wrong to return into the constant pool; we
1980  // recognize this case by checking if the emission of the pool was blocked
1981  // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1982  // the case, we emit a jump over the pool.
1983  CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1984  }
1985 }
1986 
1987 
1989  Register base,
1990  RegList src,
1991  Condition cond) {
1992  addrmod4(cond | B27 | am, base, src);
1993 }
1994 
1995 
1996 // Exception-generating instructions and debugging support.
1997 // Stops with a non-negative code less than kNumOfWatchedStops support
1998 // enabling/disabling and a counter feature. See simulator-arm.h .
1999 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
2000 #ifndef __arm__
2001  ASSERT(code >= kDefaultStopCode);
2002  {
2003  // The Simulator will handle the stop instruction and get the message
2004  // address. It expects to find the address just after the svc instruction.
2005  BlockConstPoolScope block_const_pool(this);
2006  if (code >= 0) {
2007  svc(kStopCode + code, cond);
2008  } else {
2009  svc(kStopCode + kMaxStopCode, cond);
2010  }
2011  emit(reinterpret_cast<Instr>(msg));
2012  }
2013 #else // def __arm__
2014  if (cond != al) {
2015  Label skip;
2016  b(&skip, NegateCondition(cond));
2017  bkpt(0);
2018  bind(&skip);
2019  } else {
2020  bkpt(0);
2021  }
2022 #endif // def __arm__
2023 }
2024 
2025 
2026 void Assembler::bkpt(uint32_t imm16) { // v5 and above
2027  ASSERT(is_uint16(imm16));
2028  emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
2029 }
2030 
2031 
2032 void Assembler::svc(uint32_t imm24, Condition cond) {
2033  ASSERT(is_uint24(imm24));
2034  emit(cond | 15*B24 | imm24);
2035 }
2036 
2037 
2038 // Coprocessor instructions.
2039 void Assembler::cdp(Coprocessor coproc,
2040  int opcode_1,
2041  CRegister crd,
2042  CRegister crn,
2043  CRegister crm,
2044  int opcode_2,
2045  Condition cond) {
2046  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
2047  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2048  crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2049 }
2050 
2051 
2052 void Assembler::cdp2(Coprocessor coproc,
2053  int opcode_1,
2054  CRegister crd,
2055  CRegister crn,
2056  CRegister crm,
2057  int opcode_2) { // v5 and above
2058  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2059 }
2060 
2061 
2062 void Assembler::mcr(Coprocessor coproc,
2063  int opcode_1,
2064  Register rd,
2065  CRegister crn,
2066  CRegister crm,
2067  int opcode_2,
2068  Condition cond) {
2069  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2070  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2071  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2072 }
2073 
2074 
2075 void Assembler::mcr2(Coprocessor coproc,
2076  int opcode_1,
2077  Register rd,
2078  CRegister crn,
2079  CRegister crm,
2080  int opcode_2) { // v5 and above
2081  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2082 }
2083 
2084 
2085 void Assembler::mrc(Coprocessor coproc,
2086  int opcode_1,
2087  Register rd,
2088  CRegister crn,
2089  CRegister crm,
2090  int opcode_2,
2091  Condition cond) {
2092  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
2093  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2094  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2095 }
2096 
2097 
2098 void Assembler::mrc2(Coprocessor coproc,
2099  int opcode_1,
2100  Register rd,
2101  CRegister crn,
2102  CRegister crm,
2103  int opcode_2) { // v5 and above
2104  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2105 }
2106 
2107 
2108 void Assembler::ldc(Coprocessor coproc,
2109  CRegister crd,
2110  const MemOperand& src,
2111  LFlag l,
2112  Condition cond) {
2113  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2114 }
2115 
2116 
2117 void Assembler::ldc(Coprocessor coproc,
2118  CRegister crd,
2119  Register rn,
2120  int option,
2121  LFlag l,
2122  Condition cond) {
2123  // Unindexed addressing.
2124  ASSERT(is_uint8(option));
2125  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2126  coproc*B8 | (option & 255));
2127 }
2128 
2129 
2130 void Assembler::ldc2(Coprocessor coproc,
2131  CRegister crd,
2132  const MemOperand& src,
2133  LFlag l) { // v5 and above
2134  ldc(coproc, crd, src, l, kSpecialCondition);
2135 }
2136 
2137 
2138 void Assembler::ldc2(Coprocessor coproc,
2139  CRegister crd,
2140  Register rn,
2141  int option,
2142  LFlag l) { // v5 and above
2143  ldc(coproc, crd, rn, option, l, kSpecialCondition);
2144 }
2145 
2146 
2147 // Support for VFP.
2148 
2149 void Assembler::vldr(const DwVfpRegister dst,
2150  const Register base,
2151  int offset,
2152  const Condition cond) {
2153  // Ddst = MEM(Rbase + offset).
2154  // Instruction details available in ARM DDI 0406C.b, A8-924.
2155  // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2156  // Vd(15-12) | 1011(11-8) | offset
2157  int u = 1;
2158  if (offset < 0) {
2159  offset = -offset;
2160  u = 0;
2161  }
2162  int vd, d;
2163  dst.split_code(&vd, &d);
2164 
2165  ASSERT(offset >= 0);
2166  if ((offset % 4) == 0 && (offset / 4) < 256) {
2167  emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2168  0xB*B8 | ((offset / 4) & 255));
2169  } else {
2170  // Larger offsets must be handled by computing the correct address
2171  // in the ip register.
2172  ASSERT(!base.is(ip));
2173  if (u == 1) {
2174  add(ip, base, Operand(offset));
2175  } else {
2176  sub(ip, base, Operand(offset));
2177  }
2178  emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2179  }
2180 }
2181 
2182 
2183 void Assembler::vldr(const DwVfpRegister dst,
2184  const MemOperand& operand,
2185  const Condition cond) {
2186  ASSERT(!operand.rm().is_valid());
2187  ASSERT(operand.am_ == Offset);
2188  vldr(dst, operand.rn(), operand.offset(), cond);
2189 }
2190 
2191 
2192 void Assembler::vldr(const SwVfpRegister dst,
2193  const Register base,
2194  int offset,
2195  const Condition cond) {
2196  // Sdst = MEM(Rbase + offset).
2197  // Instruction details available in ARM DDI 0406A, A8-628.
2198  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2199  // Vdst(15-12) | 1010(11-8) | offset
2200  int u = 1;
2201  if (offset < 0) {
2202  offset = -offset;
2203  u = 0;
2204  }
2205  int sd, d;
2206  dst.split_code(&sd, &d);
2207  ASSERT(offset >= 0);
2208 
2209  if ((offset % 4) == 0 && (offset / 4) < 256) {
2210  emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2211  0xA*B8 | ((offset / 4) & 255));
2212  } else {
2213  // Larger offsets must be handled by computing the correct address
2214  // in the ip register.
2215  ASSERT(!base.is(ip));
2216  if (u == 1) {
2217  add(ip, base, Operand(offset));
2218  } else {
2219  sub(ip, base, Operand(offset));
2220  }
2221  emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2222  }
2223 }
2224 
2225 
2226 void Assembler::vldr(const SwVfpRegister dst,
2227  const MemOperand& operand,
2228  const Condition cond) {
2229  ASSERT(!operand.rm().is_valid());
2230  ASSERT(operand.am_ == Offset);
2231  vldr(dst, operand.rn(), operand.offset(), cond);
2232 }
2233 
2234 
2235 void Assembler::vstr(const DwVfpRegister src,
2236  const Register base,
2237  int offset,
2238  const Condition cond) {
2239  // MEM(Rbase + offset) = Dsrc.
2240  // Instruction details available in ARM DDI 0406C.b, A8-1082.
2241  // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2242  // Vd(15-12) | 1011(11-8) | (offset/4)
2243  int u = 1;
2244  if (offset < 0) {
2245  offset = -offset;
2246  u = 0;
2247  }
2248  ASSERT(offset >= 0);
2249  int vd, d;
2250  src.split_code(&vd, &d);
2251 
2252  if ((offset % 4) == 0 && (offset / 4) < 256) {
2253  emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2254  ((offset / 4) & 255));
2255  } else {
2256  // Larger offsets must be handled by computing the correct address
2257  // in the ip register.
2258  ASSERT(!base.is(ip));
2259  if (u == 1) {
2260  add(ip, base, Operand(offset));
2261  } else {
2262  sub(ip, base, Operand(offset));
2263  }
2264  emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2265  }
2266 }
2267 
2268 
2269 void Assembler::vstr(const DwVfpRegister src,
2270  const MemOperand& operand,
2271  const Condition cond) {
2272  ASSERT(!operand.rm().is_valid());
2273  ASSERT(operand.am_ == Offset);
2274  vstr(src, operand.rn(), operand.offset(), cond);
2275 }
2276 
2277 
2278 void Assembler::vstr(const SwVfpRegister src,
2279  const Register base,
2280  int offset,
2281  const Condition cond) {
2282  // MEM(Rbase + offset) = SSrc.
2283  // Instruction details available in ARM DDI 0406A, A8-786.
2284  // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2285  // Vdst(15-12) | 1010(11-8) | (offset/4)
2286  int u = 1;
2287  if (offset < 0) {
2288  offset = -offset;
2289  u = 0;
2290  }
2291  int sd, d;
2292  src.split_code(&sd, &d);
2293  ASSERT(offset >= 0);
2294  if ((offset % 4) == 0 && (offset / 4) < 256) {
2295  emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2296  0xA*B8 | ((offset / 4) & 255));
2297  } else {
2298  // Larger offsets must be handled by computing the correct address
2299  // in the ip register.
2300  ASSERT(!base.is(ip));
2301  if (u == 1) {
2302  add(ip, base, Operand(offset));
2303  } else {
2304  sub(ip, base, Operand(offset));
2305  }
2306  emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2307  }
2308 }
2309 
2310 
2311 void Assembler::vstr(const SwVfpRegister src,
2312  const MemOperand& operand,
2313  const Condition cond) {
2314  ASSERT(!operand.rm().is_valid());
2315  ASSERT(operand.am_ == Offset);
2316  vstr(src, operand.rn(), operand.offset(), cond);
2317 }
2318 
2319 
2321  Register base,
2322  DwVfpRegister first,
2323  DwVfpRegister last,
2324  Condition cond) {
2325  // Instruction details available in ARM DDI 0406C.b, A8-922.
2326  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2327  // first(15-12) | 1011(11-8) | (count * 2)
2328  ASSERT_LE(first.code(), last.code());
2329  ASSERT(am == ia || am == ia_w || am == db_w);
2330  ASSERT(!base.is(pc));
2331 
2332  int sd, d;
2333  first.split_code(&sd, &d);
2334  int count = last.code() - first.code() + 1;
2335  ASSERT(count <= 16);
2336  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2337  0xB*B8 | count*2);
2338 }
2339 
2340 
2342  Register base,
2343  DwVfpRegister first,
2344  DwVfpRegister last,
2345  Condition cond) {
2346  // Instruction details available in ARM DDI 0406C.b, A8-1080.
2347  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2348  // first(15-12) | 1011(11-8) | (count * 2)
2349  ASSERT_LE(first.code(), last.code());
2350  ASSERT(am == ia || am == ia_w || am == db_w);
2351  ASSERT(!base.is(pc));
2352 
2353  int sd, d;
2354  first.split_code(&sd, &d);
2355  int count = last.code() - first.code() + 1;
2356  ASSERT(count <= 16);
2357  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2358  0xB*B8 | count*2);
2359 }
2360 
2362  Register base,
2363  SwVfpRegister first,
2364  SwVfpRegister last,
2365  Condition cond) {
2366  // Instruction details available in ARM DDI 0406A, A8-626.
2367  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2368  // first(15-12) | 1010(11-8) | (count/2)
2369  ASSERT_LE(first.code(), last.code());
2370  ASSERT(am == ia || am == ia_w || am == db_w);
2371  ASSERT(!base.is(pc));
2372 
2373  int sd, d;
2374  first.split_code(&sd, &d);
2375  int count = last.code() - first.code() + 1;
2376  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2377  0xA*B8 | count);
2378 }
2379 
2380 
2382  Register base,
2383  SwVfpRegister first,
2384  SwVfpRegister last,
2385  Condition cond) {
2386  // Instruction details available in ARM DDI 0406A, A8-784.
2387  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2388  // first(15-12) | 1011(11-8) | (count/2)
2389  ASSERT_LE(first.code(), last.code());
2390  ASSERT(am == ia || am == ia_w || am == db_w);
2391  ASSERT(!base.is(pc));
2392 
2393  int sd, d;
2394  first.split_code(&sd, &d);
2395  int count = last.code() - first.code() + 1;
2396  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2397  0xA*B8 | count);
2398 }
2399 
2400 
2401 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2402  uint64_t i;
2403  OS::MemCopy(&i, &d, 8);
2404 
2405  *lo = i & 0xffffffff;
2406  *hi = i >> 32;
2407 }
2408 
2409 
2410 // Only works for little endian floating point formats.
2411 // We don't support VFP on the mixed endian floating point platform.
2412 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
2414 
2415  // VMOV can accept an immediate of the form:
2416  //
2417  // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2418  //
2419  // The immediate is encoded using an 8-bit quantity, comprised of two
2420  // 4-bit fields. For an 8-bit immediate of the form:
2421  //
2422  // [abcdefgh]
2423  //
2424  // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2425  // created of the form:
2426  //
2427  // [aBbbbbbb,bbcdefgh,00000000,00000000,
2428  // 00000000,00000000,00000000,00000000]
2429  //
2430  // where B = ~b.
2431  //
2432 
2433  uint32_t lo, hi;
2434  DoubleAsTwoUInt32(d, &lo, &hi);
2435 
2436  // The most obvious constraint is the long block of zeroes.
2437  if ((lo != 0) || ((hi & 0xffff) != 0)) {
2438  return false;
2439  }
2440 
2441  // Bits 62:55 must be all clear or all set.
2442  if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2443  return false;
2444  }
2445 
2446  // Bit 63 must be NOT bit 62.
2447  if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2448  return false;
2449  }
2450 
2451  // Create the encoded immediate in the form:
2452  // [00000000,0000abcd,00000000,0000efgh]
2453  *encoding = (hi >> 16) & 0xf; // Low nybble.
2454  *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2455  *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2456 
2457  return true;
2458 }
2459 
2460 
2461 void Assembler::vmov(const DwVfpRegister dst,
2462  double imm,
2463  const Register scratch) {
2464  uint32_t enc;
2465  if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2466  // The double can be encoded in the instruction.
2467  //
2468  // Dd = immediate
2469  // Instruction details available in ARM DDI 0406C.b, A8-936.
2470  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2471  // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2472  int vd, d;
2473  dst.split_code(&vd, &d);
2474  emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2475  } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
2476  // TODO(jfb) Temporarily turned off until we have constant blinding or
2477  // some equivalent mitigation: an attacker can otherwise control
2478  // generated data which also happens to be executable, a Very Bad
2479  // Thing indeed.
2480  // Blinding gets tricky because we don't have xor, we probably
2481  // need to add/subtract without losing precision, which requires a
2482  // cookie value that Lithium is probably better positioned to
2483  // choose.
2484  // We could also add a few peepholes here like detecting 0.0 and
2485  // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2486  // to zero (we set flush-to-zero), and normalizing NaN values.
2487  // We could also detect redundant values.
2488  // The code could also randomize the order of values, though
2489  // that's tricky because vldr has a limited reach. Furthermore
2490  // it breaks load locality.
2491  RelocInfo rinfo(pc_, imm);
2492  ConstantPoolAddEntry(rinfo);
2493  vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2494  } else {
2495  // Synthesise the double from ARM immediates.
2496  uint32_t lo, hi;
2497  DoubleAsTwoUInt32(imm, &lo, &hi);
2498 
2499  if (scratch.is(no_reg)) {
2500  if (dst.code() < 16) {
2501  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2502  // Move the low part of the double into the lower of the corresponsing S
2503  // registers of D register dst.
2504  mov(ip, Operand(lo));
2505  vmov(loc.low(), ip);
2506 
2507  // Move the high part of the double into the higher of the
2508  // corresponsing S registers of D register dst.
2509  mov(ip, Operand(hi));
2510  vmov(loc.high(), ip);
2511  } else {
2512  // D16-D31 does not have S registers, so move the low and high parts
2513  // directly to the D register using vmov.32.
2514  // Note: This may be slower, so we only do this when we have to.
2515  mov(ip, Operand(lo));
2516  vmov(dst, VmovIndexLo, ip);
2517  mov(ip, Operand(hi));
2518  vmov(dst, VmovIndexHi, ip);
2519  }
2520  } else {
2521  // Move the low and high parts of the double to a D register in one
2522  // instruction.
2523  mov(ip, Operand(lo));
2524  mov(scratch, Operand(hi));
2525  vmov(dst, ip, scratch);
2526  }
2527  }
2528 }
2529 
2530 
2531 void Assembler::vmov(const SwVfpRegister dst,
2532  const SwVfpRegister src,
2533  const Condition cond) {
2534  // Sd = Sm
2535  // Instruction details available in ARM DDI 0406B, A8-642.
2536  int sd, d, sm, m;
2537  dst.split_code(&sd, &d);
2538  src.split_code(&sm, &m);
2539  emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2540 }
2541 
2542 
2543 void Assembler::vmov(const DwVfpRegister dst,
2544  const DwVfpRegister src,
2545  const Condition cond) {
2546  // Dd = Dm
2547  // Instruction details available in ARM DDI 0406C.b, A8-938.
2548  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2549  // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2550  int vd, d;
2551  dst.split_code(&vd, &d);
2552  int vm, m;
2553  src.split_code(&vm, &m);
2554  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2555  vm);
2556 }
2557 
2558 
2559 void Assembler::vmov(const DwVfpRegister dst,
2560  const VmovIndex index,
2561  const Register src,
2562  const Condition cond) {
2563  // Dd[index] = Rt
2564  // Instruction details available in ARM DDI 0406C.b, A8-940.
2565  // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2566  // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2567  ASSERT(index.index == 0 || index.index == 1);
2568  int vd, d;
2569  dst.split_code(&vd, &d);
2570  emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2571  d*B7 | B4);
2572 }
2573 
2574 
2575 void Assembler::vmov(const Register dst,
2576  const VmovIndex index,
2577  const DwVfpRegister src,
2578  const Condition cond) {
2579  // Dd[index] = Rt
2580  // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2581  // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2582  // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2583  ASSERT(index.index == 0 || index.index == 1);
2584  int vn, n;
2585  src.split_code(&vn, &n);
2586  emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2587  0xB*B8 | n*B7 | B4);
2588 }
2589 
2590 
2591 void Assembler::vmov(const DwVfpRegister dst,
2592  const Register src1,
2593  const Register src2,
2594  const Condition cond) {
2595  // Dm = <Rt,Rt2>.
2596  // Instruction details available in ARM DDI 0406C.b, A8-948.
2597  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2598  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2599  ASSERT(!src1.is(pc) && !src2.is(pc));
2600  int vm, m;
2601  dst.split_code(&vm, &m);
2602  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2603  src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2604 }
2605 
2606 
2607 void Assembler::vmov(const Register dst1,
2608  const Register dst2,
2609  const DwVfpRegister src,
2610  const Condition cond) {
2611  // <Rt,Rt2> = Dm.
2612  // Instruction details available in ARM DDI 0406C.b, A8-948.
2613  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2614  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2615  ASSERT(!dst1.is(pc) && !dst2.is(pc));
2616  int vm, m;
2617  src.split_code(&vm, &m);
2618  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2619  dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2620 }
2621 
2622 
2623 void Assembler::vmov(const SwVfpRegister dst,
2624  const Register src,
2625  const Condition cond) {
2626  // Sn = Rt.
2627  // Instruction details available in ARM DDI 0406A, A8-642.
2628  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2629  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2630  ASSERT(!src.is(pc));
2631  int sn, n;
2632  dst.split_code(&sn, &n);
2633  emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2634 }
2635 
2636 
2637 void Assembler::vmov(const Register dst,
2638  const SwVfpRegister src,
2639  const Condition cond) {
2640  // Rt = Sn.
2641  // Instruction details available in ARM DDI 0406A, A8-642.
2642  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2643  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2644  ASSERT(!dst.is(pc));
2645  int sn, n;
2646  src.split_code(&sn, &n);
2647  emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2648 }
2649 
2650 
2651 // Type of data to read from or write to VFP register.
2652 // Used as specifier in generic vcvt instruction.
2653 enum VFPType { S32, U32, F32, F64 };
2654 
2655 
2656 static bool IsSignedVFPType(VFPType type) {
2657  switch (type) {
2658  case S32:
2659  return true;
2660  case U32:
2661  return false;
2662  default:
2663  UNREACHABLE();
2664  return false;
2665  }
2666 }
2667 
2668 
2669 static bool IsIntegerVFPType(VFPType type) {
2670  switch (type) {
2671  case S32:
2672  case U32:
2673  return true;
2674  case F32:
2675  case F64:
2676  return false;
2677  default:
2678  UNREACHABLE();
2679  return false;
2680  }
2681 }
2682 
2683 
2684 static bool IsDoubleVFPType(VFPType type) {
2685  switch (type) {
2686  case F32:
2687  return false;
2688  case F64:
2689  return true;
2690  default:
2691  UNREACHABLE();
2692  return false;
2693  }
2694 }
2695 
2696 
2697 // Split five bit reg_code based on size of reg_type.
2698 // 32-bit register codes are Vm:M
2699 // 64-bit register codes are M:Vm
2700 // where Vm is four bits, and M is a single bit.
2701 static void SplitRegCode(VFPType reg_type,
2702  int reg_code,
2703  int* vm,
2704  int* m) {
2705  ASSERT((reg_code >= 0) && (reg_code <= 31));
2706  if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2707  // 32 bit type.
2708  *m = reg_code & 0x1;
2709  *vm = reg_code >> 1;
2710  } else {
2711  // 64 bit type.
2712  *m = (reg_code & 0x10) >> 4;
2713  *vm = reg_code & 0x0F;
2714  }
2715 }
2716 
2717 
2718 // Encode vcvt.src_type.dst_type instruction.
2719 static Instr EncodeVCVT(const VFPType dst_type,
2720  const int dst_code,
2721  const VFPType src_type,
2722  const int src_code,
2724  const Condition cond) {
2725  ASSERT(src_type != dst_type);
2726  int D, Vd, M, Vm;
2727  SplitRegCode(src_type, src_code, &Vm, &M);
2728  SplitRegCode(dst_type, dst_code, &Vd, &D);
2729 
2730  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2731  // Conversion between IEEE floating point and 32-bit integer.
2732  // Instruction details available in ARM DDI 0406B, A8.6.295.
2733  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2734  // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2735  ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2736 
2737  int sz, opc2, op;
2738 
2739  if (IsIntegerVFPType(dst_type)) {
2740  opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2741  sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2742  op = mode;
2743  } else {
2744  ASSERT(IsIntegerVFPType(src_type));
2745  opc2 = 0x0;
2746  sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2747  op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2748  }
2749 
2750  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2751  Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2752  } else {
2753  // Conversion between IEEE double and single precision.
2754  // Instruction details available in ARM DDI 0406B, A8.6.298.
2755  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2756  // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2757  int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2758  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2759  Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2760  }
2761 }
2762 
2763 
2764 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2765  const SwVfpRegister src,
2766  VFPConversionMode mode,
2767  const Condition cond) {
2768  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2769 }
2770 
2771 
2772 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2773  const SwVfpRegister src,
2774  VFPConversionMode mode,
2775  const Condition cond) {
2776  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2777 }
2778 
2779 
2780 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2781  const SwVfpRegister src,
2782  VFPConversionMode mode,
2783  const Condition cond) {
2784  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2785 }
2786 
2787 
2788 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2789  const DwVfpRegister src,
2790  VFPConversionMode mode,
2791  const Condition cond) {
2792  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2793 }
2794 
2795 
2796 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2797  const DwVfpRegister src,
2798  VFPConversionMode mode,
2799  const Condition cond) {
2800  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2801 }
2802 
2803 
2804 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2805  const SwVfpRegister src,
2806  VFPConversionMode mode,
2807  const Condition cond) {
2808  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2809 }
2810 
2811 
2812 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2813  const DwVfpRegister src,
2814  VFPConversionMode mode,
2815  const Condition cond) {
2816  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2817 }
2818 
2819 
2820 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2821  int fraction_bits,
2822  const Condition cond) {
2823  // Instruction details available in ARM DDI 0406C.b, A8-874.
2824  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2825  // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2826  ASSERT(fraction_bits > 0 && fraction_bits <= 32);
2828  int vd, d;
2829  dst.split_code(&vd, &d);
2830  int i = ((32 - fraction_bits) >> 4) & 1;
2831  int imm4 = (32 - fraction_bits) & 0xf;
2832  emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2833  vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2834 }
2835 
2836 
2837 void Assembler::vneg(const DwVfpRegister dst,
2838  const DwVfpRegister src,
2839  const Condition cond) {
2840  // Instruction details available in ARM DDI 0406C.b, A8-968.
2841  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2842  // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2843  int vd, d;
2844  dst.split_code(&vd, &d);
2845  int vm, m;
2846  src.split_code(&vm, &m);
2847 
2848  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2849  m*B5 | vm);
2850 }
2851 
2852 
2853 void Assembler::vabs(const DwVfpRegister dst,
2854  const DwVfpRegister src,
2855  const Condition cond) {
2856  // Instruction details available in ARM DDI 0406C.b, A8-524.
2857  // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2858  // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2859  int vd, d;
2860  dst.split_code(&vd, &d);
2861  int vm, m;
2862  src.split_code(&vm, &m);
2863  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2864  m*B5 | vm);
2865 }
2866 
2867 
2868 void Assembler::vadd(const DwVfpRegister dst,
2869  const DwVfpRegister src1,
2870  const DwVfpRegister src2,
2871  const Condition cond) {
2872  // Dd = vadd(Dn, Dm) double precision floating point addition.
2873  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2874  // Instruction details available in ARM DDI 0406C.b, A8-830.
2875  // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2876  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2877  int vd, d;
2878  dst.split_code(&vd, &d);
2879  int vn, n;
2880  src1.split_code(&vn, &n);
2881  int vm, m;
2882  src2.split_code(&vm, &m);
2883  emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2884  n*B7 | m*B5 | vm);
2885 }
2886 
2887 
2888 void Assembler::vsub(const DwVfpRegister dst,
2889  const DwVfpRegister src1,
2890  const DwVfpRegister src2,
2891  const Condition cond) {
2892  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2893  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2894  // Instruction details available in ARM DDI 0406C.b, A8-1086.
2895  // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2896  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2897  int vd, d;
2898  dst.split_code(&vd, &d);
2899  int vn, n;
2900  src1.split_code(&vn, &n);
2901  int vm, m;
2902  src2.split_code(&vm, &m);
2903  emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2904  n*B7 | B6 | m*B5 | vm);
2905 }
2906 
2907 
2908 void Assembler::vmul(const DwVfpRegister dst,
2909  const DwVfpRegister src1,
2910  const DwVfpRegister src2,
2911  const Condition cond) {
2912  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2913  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2914  // Instruction details available in ARM DDI 0406C.b, A8-960.
2915  // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
2916  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2917  int vd, d;
2918  dst.split_code(&vd, &d);
2919  int vn, n;
2920  src1.split_code(&vn, &n);
2921  int vm, m;
2922  src2.split_code(&vm, &m);
2923  emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2924  n*B7 | m*B5 | vm);
2925 }
2926 
2927 
2928 void Assembler::vmla(const DwVfpRegister dst,
2929  const DwVfpRegister src1,
2930  const DwVfpRegister src2,
2931  const Condition cond) {
2932  // Instruction details available in ARM DDI 0406C.b, A8-932.
2933  // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2934  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
2935  int vd, d;
2936  dst.split_code(&vd, &d);
2937  int vn, n;
2938  src1.split_code(&vn, &n);
2939  int vm, m;
2940  src2.split_code(&vm, &m);
2941  emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2942  vm);
2943 }
2944 
2945 
2946 void Assembler::vmls(const DwVfpRegister dst,
2947  const DwVfpRegister src1,
2948  const DwVfpRegister src2,
2949  const Condition cond) {
2950  // Instruction details available in ARM DDI 0406C.b, A8-932.
2951  // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
2952  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
2953  int vd, d;
2954  dst.split_code(&vd, &d);
2955  int vn, n;
2956  src1.split_code(&vn, &n);
2957  int vm, m;
2958  src2.split_code(&vm, &m);
2959  emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
2960  m*B5 | vm);
2961 }
2962 
2963 
2964 void Assembler::vdiv(const DwVfpRegister dst,
2965  const DwVfpRegister src1,
2966  const DwVfpRegister src2,
2967  const Condition cond) {
2968  // Dd = vdiv(Dn, Dm) double precision floating point division.
2969  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2970  // Instruction details available in ARM DDI 0406C.b, A8-882.
2971  // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
2972  // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2973  int vd, d;
2974  dst.split_code(&vd, &d);
2975  int vn, n;
2976  src1.split_code(&vn, &n);
2977  int vm, m;
2978  src2.split_code(&vm, &m);
2979  emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
2980  vm);
2981 }
2982 
2983 
2984 void Assembler::vcmp(const DwVfpRegister src1,
2985  const DwVfpRegister src2,
2986  const Condition cond) {
2987  // vcmp(Dd, Dm) double precision floating point comparison.
2988  // Instruction details available in ARM DDI 0406C.b, A8-864.
2989  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
2990  // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2991  int vd, d;
2992  src1.split_code(&vd, &d);
2993  int vm, m;
2994  src2.split_code(&vm, &m);
2995  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2996  m*B5 | vm);
2997 }
2998 
2999 
3000 void Assembler::vcmp(const DwVfpRegister src1,
3001  const double src2,
3002  const Condition cond) {
3003  // vcmp(Dd, #0.0) double precision floating point comparison.
3004  // Instruction details available in ARM DDI 0406C.b, A8-864.
3005  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3006  // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3007  ASSERT(src2 == 0.0);
3008  int vd, d;
3009  src1.split_code(&vd, &d);
3010  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3011 }
3012 
3013 
3014 void Assembler::vmsr(Register dst, Condition cond) {
3015  // Instruction details available in ARM DDI 0406A, A8-652.
3016  // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3017  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3018  emit(cond | 0xE*B24 | 0xE*B20 | B16 |
3019  dst.code()*B12 | 0xA*B8 | B4);
3020 }
3021 
3022 
3023 void Assembler::vmrs(Register dst, Condition cond) {
3024  // Instruction details available in ARM DDI 0406A, A8-652.
3025  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3026  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3027  emit(cond | 0xE*B24 | 0xF*B20 | B16 |
3028  dst.code()*B12 | 0xA*B8 | B4);
3029 }
3030 
3031 
3032 void Assembler::vsqrt(const DwVfpRegister dst,
3033  const DwVfpRegister src,
3034  const Condition cond) {
3035  // Instruction details available in ARM DDI 0406C.b, A8-1058.
3036  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3037  // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3038  int vd, d;
3039  dst.split_code(&vd, &d);
3040  int vm, m;
3041  src.split_code(&vm, &m);
3042  emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3043  m*B5 | vm);
3044 }
3045 
3046 
3047 // Support for NEON.
3048 
3050  const NeonListOperand& dst,
3051  const NeonMemOperand& src) {
3052  // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3053  // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3054  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3056  int vd, d;
3057  dst.base().split_code(&vd, &d);
3058  emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3059  dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3060 }
3061 
3062 
3063 void Assembler::vst1(NeonSize size,
3064  const NeonListOperand& src,
3065  const NeonMemOperand& dst) {
3066  // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3067  // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3068  // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3070  int vd, d;
3071  src.base().split_code(&vd, &d);
3072  emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3073  size*B6 | dst.align()*B4 | dst.rm().code());
3074 }
3075 
3076 
3077 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3078  // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3079  // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3080  // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3082  int vd, d;
3083  dst.split_code(&vd, &d);
3084  int vm, m;
3085  src.split_code(&vm, &m);
3086  emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3087  (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3088 }
3089 
3090 
3091 // Pseudo instructions.
3092 void Assembler::nop(int type) {
3093  // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3094  // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3095  // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3096  // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3097  // a type.
3098  ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3099  emit(al | 13*B21 | type*B12 | type);
3100 }
3101 
3102 
3103 bool Assembler::IsMovT(Instr instr) {
3104  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3105  ((kNumRegisters-1)*B12) | // mask out register
3106  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3107  return instr == 0x34*B20;
3108 }
3109 
3110 
3111 bool Assembler::IsMovW(Instr instr) {
3112  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3113  ((kNumRegisters-1)*B12) | // mask out destination
3114  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3115  return instr == 0x30*B20;
3116 }
3117 
3118 
3119 bool Assembler::IsNop(Instr instr, int type) {
3120  ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3121  // Check for mov rx, rx where x = type.
3122  return instr == (al | 13*B21 | type*B12 | type);
3123 }
3124 
3125 
3127  uint32_t dummy1;
3128  uint32_t dummy2;
3129  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3130 }
3131 
3132 
3134  return is_uint12(abs(imm32));
3135 }
3136 
3137 
3138 // Debugging.
3140  positions_recorder()->WriteRecordedPositions();
3141  CheckBuffer();
3142  RecordRelocInfo(RelocInfo::JS_RETURN);
3143 }
3144 
3145 
3147  positions_recorder()->WriteRecordedPositions();
3148  CheckBuffer();
3149  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3150 }
3151 
3152 
3153 void Assembler::RecordComment(const char* msg) {
3154  if (FLAG_code_comments) {
3155  CheckBuffer();
3156  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3157  }
3158 }
3159 
3160 
3161 void Assembler::RecordConstPool(int size) {
3162  // We only need this for debugger support, to correctly compute offsets in the
3163  // code.
3164 #ifdef ENABLE_DEBUGGER_SUPPORT
3165  RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3166 #endif
3167 }
3168 
3169 
3170 void Assembler::GrowBuffer() {
3171  if (!own_buffer_) FATAL("external code buffer is too small");
3172 
3173  // Compute new buffer size.
3174  CodeDesc desc; // the new buffer
3175  if (buffer_size_ < 4*KB) {
3176  desc.buffer_size = 4*KB;
3177  } else if (buffer_size_ < 1*MB) {
3178  desc.buffer_size = 2*buffer_size_;
3179  } else {
3180  desc.buffer_size = buffer_size_ + 1*MB;
3181  }
3182  CHECK_GT(desc.buffer_size, 0); // no overflow
3183 
3184  // Set up new buffer.
3185  desc.buffer = NewArray<byte>(desc.buffer_size);
3186 
3187  desc.instr_size = pc_offset();
3188  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3189 
3190  // Copy the data.
3191  int pc_delta = desc.buffer - buffer_;
3192  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3193  OS::MemMove(desc.buffer, buffer_, desc.instr_size);
3194  OS::MemMove(reloc_info_writer.pos() + rc_delta,
3195  reloc_info_writer.pos(), desc.reloc_size);
3196 
3197  // Switch buffers.
3199  buffer_ = desc.buffer;
3200  buffer_size_ = desc.buffer_size;
3201  pc_ += pc_delta;
3202  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3203  reloc_info_writer.last_pc() + pc_delta);
3204 
3205  // None of our relocation types are pc relative pointing outside the code
3206  // buffer nor pc absolute pointing inside the code buffer, so there is no need
3207  // to relocate any emitted relocation entries.
3208 
3209  // Relocate pending relocation entries.
3210  for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3211  RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3212  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3213  rinfo.rmode() != RelocInfo::POSITION);
3214  if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3215  rinfo.set_pc(rinfo.pc() + pc_delta);
3216  }
3217  }
3218  for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3219  RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3220  ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3221  rinfo.set_pc(rinfo.pc() + pc_delta);
3222  }
3223  constant_pool_builder_.Relocate(pc_delta);
3224 }
3225 
3226 
3227 void Assembler::db(uint8_t data) {
3228  // No relocation info should be pending while using db. db is used
3229  // to write pure data with no pointers and the constant pool should
3230  // be emitted before using db.
3231  ASSERT(num_pending_32_bit_reloc_info_ == 0);
3232  ASSERT(num_pending_64_bit_reloc_info_ == 0);
3233  CheckBuffer();
3234  *reinterpret_cast<uint8_t*>(pc_) = data;
3235  pc_ += sizeof(uint8_t);
3236 }
3237 
3238 
3239 void Assembler::dd(uint32_t data) {
3240  // No relocation info should be pending while using dd. dd is used
3241  // to write pure data with no pointers and the constant pool should
3242  // be emitted before using dd.
3243  ASSERT(num_pending_32_bit_reloc_info_ == 0);
3244  ASSERT(num_pending_64_bit_reloc_info_ == 0);
3245  CheckBuffer();
3246  *reinterpret_cast<uint32_t*>(pc_) = data;
3247  pc_ += sizeof(uint32_t);
3248 }
3249 
3250 
3251 void Assembler::emit_code_stub_address(Code* stub) {
3252  CheckBuffer();
3253  *reinterpret_cast<uint32_t*>(pc_) =
3254  reinterpret_cast<uint32_t>(stub->instruction_start());
3255  pc_ += sizeof(uint32_t);
3256 }
3257 
3258 
3259 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3260  RelocInfo rinfo(pc_, rmode, data, NULL);
3261  RecordRelocInfo(rinfo);
3262 }
3263 
3264 
3265 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3266  if (!RelocInfo::IsNone(rinfo.rmode())) {
3267  // Don't record external references unless the heap will be serialized.
3268  if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
3269 #ifdef DEBUG
3270  if (!Serializer::enabled()) {
3272  }
3273 #endif
3274  if (!Serializer::enabled() && !emit_debug_code()) {
3275  return;
3276  }
3277  }
3278  ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3279  if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3280  RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3281  rinfo.rmode(),
3282  RecordedAstId().ToInt(),
3283  NULL);
3285  reloc_info_writer.Write(&reloc_info_with_ast_id);
3286  } else {
3287  reloc_info_writer.Write(&rinfo);
3288  }
3289  }
3290 }
3291 
3292 
3293 void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
3294  if (FLAG_enable_ool_constant_pool) {
3295  constant_pool_builder_.AddEntry(this, rinfo);
3296  } else {
3297  if (rinfo.rmode() == RelocInfo::NONE64) {
3298  ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3299  if (num_pending_64_bit_reloc_info_ == 0) {
3300  first_const_pool_64_use_ = pc_offset();
3301  }
3302  pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3303  } else {
3304  ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3305  if (num_pending_32_bit_reloc_info_ == 0) {
3306  first_const_pool_32_use_ = pc_offset();
3307  }
3308  pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3309  }
3310  // Make sure the constant pool is not emitted in place of the next
3311  // instruction for which we just recorded relocation info.
3312  BlockConstPoolFor(1);
3313  }
3314 }
3315 
3316 
3317 void Assembler::BlockConstPoolFor(int instructions) {
3318  if (FLAG_enable_ool_constant_pool) {
3319  // Should be a no-op if using an out-of-line constant pool.
3320  ASSERT(num_pending_32_bit_reloc_info_ == 0);
3321  ASSERT(num_pending_64_bit_reloc_info_ == 0);
3322  return;
3323  }
3324 
3325  int pc_limit = pc_offset() + instructions * kInstrSize;
3326  if (no_const_pool_before_ < pc_limit) {
3327  // Max pool start (if we need a jump and an alignment).
3328 #ifdef DEBUG
3329  int start = pc_limit + kInstrSize + 2 * kPointerSize;
3330  ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3331  (start - first_const_pool_32_use_ +
3332  num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3333  ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3334  (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3335 #endif
3336  no_const_pool_before_ = pc_limit;
3337  }
3338 
3339  if (next_buffer_check_ < no_const_pool_before_) {
3340  next_buffer_check_ = no_const_pool_before_;
3341  }
3342 }
3343 
3344 
3345 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3346  if (FLAG_enable_ool_constant_pool) {
3347  // Should be a no-op if using an out-of-line constant pool.
3348  ASSERT(num_pending_32_bit_reloc_info_ == 0);
3349  ASSERT(num_pending_64_bit_reloc_info_ == 0);
3350  return;
3351  }
3352 
3353  // Some short sequence of instruction mustn't be broken up by constant pool
3354  // emission, such sequences are protected by calls to BlockConstPoolFor and
3355  // BlockConstPoolScope.
3356  if (is_const_pool_blocked()) {
3357  // Something is wrong if emission is forced and blocked at the same time.
3358  ASSERT(!force_emit);
3359  return;
3360  }
3361 
3362  // There is nothing to do if there are no pending constant pool entries.
3363  if ((num_pending_32_bit_reloc_info_ == 0) &&
3364  (num_pending_64_bit_reloc_info_ == 0)) {
3365  // Calculate the offset of the next check.
3366  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3367  return;
3368  }
3369 
3370  // Check that the code buffer is large enough before emitting the constant
3371  // pool (include the jump over the pool and the constant pool marker and
3372  // the gap to the relocation information).
3373  int jump_instr = require_jump ? kInstrSize : 0;
3374  int size_up_to_marker = jump_instr + kInstrSize;
3375  int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3376  bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3377  bool require_64_bit_align = false;
3378  if (has_fp_values) {
3379  require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3380  if (require_64_bit_align) {
3381  size_after_marker += kInstrSize;
3382  }
3383  size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3384  }
3385 
3386  int size = size_up_to_marker + size_after_marker;
3387 
3388  // We emit a constant pool when:
3389  // * requested to do so by parameter force_emit (e.g. after each function).
3390  // * the distance from the first instruction accessing the constant pool to
3391  // any of the constant pool entries will exceed its limit the next
3392  // time the pool is checked. This is overly restrictive, but we don't emit
3393  // constant pool entries in-order so it's conservatively correct.
3394  // * the instruction doesn't require a jump after itself to jump over the
3395  // constant pool, and we're getting close to running out of range.
3396  if (!force_emit) {
3397  ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3398  bool need_emit = false;
3399  if (has_fp_values) {
3400  int dist64 = pc_offset() +
3401  size -
3402  num_pending_32_bit_reloc_info_ * kPointerSize -
3403  first_const_pool_64_use_;
3404  if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3405  (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3406  need_emit = true;
3407  }
3408  }
3409  int dist32 =
3410  pc_offset() + size - first_const_pool_32_use_;
3411  if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3412  (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3413  need_emit = true;
3414  }
3415  if (!need_emit) return;
3416  }
3417 
3418  int needed_space = size + kGap;
3419  while (buffer_space() <= needed_space) GrowBuffer();
3420 
3421  {
3422  // Block recursive calls to CheckConstPool.
3423  BlockConstPoolScope block_const_pool(this);
3424  RecordComment("[ Constant Pool");
3425  RecordConstPool(size);
3426 
3427  // Emit jump over constant pool if necessary.
3428  Label after_pool;
3429  if (require_jump) {
3430  b(&after_pool);
3431  }
3432 
3433  // Put down constant pool marker "Undefined instruction".
3434  // The data size helps disassembly know what to print.
3435  emit(kConstantPoolMarker |
3436  EncodeConstantPoolLength(size_after_marker / kPointerSize));
3437 
3438  if (require_64_bit_align) {
3439  emit(kConstantPoolMarker);
3440  }
3441 
3442  // Emit 64-bit constant pool entries first: their range is smaller than
3443  // 32-bit entries.
3444  for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3445  RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3446 
3447  ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3448 
3449  Instr instr = instr_at(rinfo.pc());
3450  // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3451  ASSERT((IsVldrDPcImmediateOffset(instr) &&
3452  GetVldrDRegisterImmediateOffset(instr) == 0));
3453 
3454  int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3455  ASSERT(is_uint10(delta));
3456 
3457  bool found = false;
3458  uint64_t value = rinfo.raw_data64();
3459  for (int j = 0; j < i; j++) {
3460  RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3461  if (value == rinfo2.raw_data64()) {
3462  found = true;
3463  ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3464  Instr instr2 = instr_at(rinfo2.pc());
3466  delta = GetVldrDRegisterImmediateOffset(instr2);
3467  delta += rinfo2.pc() - rinfo.pc();
3468  break;
3469  }
3470  }
3471 
3472  instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3473 
3474  if (!found) {
3475  uint64_t uint_data = rinfo.raw_data64();
3476  emit(uint_data & 0xFFFFFFFF);
3477  emit(uint_data >> 32);
3478  }
3479  }
3480 
3481  // Emit 32-bit constant pool entries.
3482  for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3483  RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3484  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3485  rinfo.rmode() != RelocInfo::POSITION &&
3486  rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3487  rinfo.rmode() != RelocInfo::CONST_POOL &&
3488  rinfo.rmode() != RelocInfo::NONE64);
3489 
3490  Instr instr = instr_at(rinfo.pc());
3491 
3492  // 64-bit loads shouldn't get here.
3494 
3495  if (IsLdrPcImmediateOffset(instr) &&
3496  GetLdrRegisterImmediateOffset(instr) == 0) {
3497  int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3498  ASSERT(is_uint12(delta));
3499  // 0 is the smallest delta:
3500  // ldr rd, [pc, #0]
3501  // constant pool marker
3502  // data
3503 
3504  bool found = false;
3505  if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
3506  for (int j = 0; j < i; j++) {
3507  RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3508 
3509  if ((rinfo2.data() == rinfo.data()) &&
3510  (rinfo2.rmode() == rinfo.rmode())) {
3511  Instr instr2 = instr_at(rinfo2.pc());
3512  if (IsLdrPcImmediateOffset(instr2)) {
3513  delta = GetLdrRegisterImmediateOffset(instr2);
3514  delta += rinfo2.pc() - rinfo.pc();
3515  found = true;
3516  break;
3517  }
3518  }
3519  }
3520  }
3521 
3522  instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3523 
3524  if (!found) {
3525  emit(rinfo.data());
3526  }
3527  } else {
3528  ASSERT(IsMovW(instr));
3529  }
3530  }
3531 
3532  num_pending_32_bit_reloc_info_ = 0;
3533  num_pending_64_bit_reloc_info_ = 0;
3534  first_const_pool_32_use_ = -1;
3535  first_const_pool_64_use_ = -1;
3536 
3537  RecordComment("]");
3538 
3539  if (after_pool.is_linked()) {
3540  bind(&after_pool);
3541  }
3542  }
3543 
3544  // Since a constant pool was just emitted, move the check offset forward by
3545  // the standard interval.
3546  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3547 }
3548 
3549 
3550 MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
3551  ASSERT(FLAG_enable_ool_constant_pool);
3552  return constant_pool_builder_.Allocate(heap);
3553 }
3554 
3555 
3556 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3557  ASSERT(FLAG_enable_ool_constant_pool);
3558  constant_pool_builder_.Populate(this, constant_pool);
3559 }
3560 
3561 
3562 ConstantPoolBuilder::ConstantPoolBuilder()
3563  : entries_(),
3564  merged_indexes_(),
3565  count_of_64bit_(0),
3566  count_of_code_ptr_(0),
3567  count_of_heap_ptr_(0),
3568  count_of_32bit_(0) { }
3569 
3570 
3571 bool ConstantPoolBuilder::IsEmpty() {
3572  return entries_.size() == 0;
3573 }
3574 
3575 
3576 bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
3577  return rmode == RelocInfo::NONE64;
3578 }
3579 
3580 
3581 bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
3582  return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
3583 }
3584 
3585 
3586 bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
3587  return RelocInfo::IsCodeTarget(rmode);
3588 }
3589 
3590 
3591 bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
3592  return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
3593 }
3594 
3595 
3596 void ConstantPoolBuilder::AddEntry(Assembler* assm,
3597  const RelocInfo& rinfo) {
3598  RelocInfo::Mode rmode = rinfo.rmode();
3599  ASSERT(rmode != RelocInfo::COMMENT &&
3600  rmode != RelocInfo::POSITION &&
3601  rmode != RelocInfo::STATEMENT_POSITION &&
3602  rmode != RelocInfo::CONST_POOL);
3603 
3604 
3605  // Try to merge entries which won't be patched.
3606  int merged_index = -1;
3607  if (RelocInfo::IsNone(rmode) ||
3608  (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
3609  size_t i;
3610  std::vector<RelocInfo>::const_iterator it;
3611  for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3612  if (RelocInfo::IsEqual(rinfo, *it)) {
3613  merged_index = i;
3614  break;
3615  }
3616  }
3617  }
3618 
3619  entries_.push_back(rinfo);
3620  merged_indexes_.push_back(merged_index);
3621 
3622  if (merged_index == -1) {
3623  // Not merged, so update the appropriate count.
3624  if (Is64BitEntry(rmode)) {
3625  count_of_64bit_++;
3626  } else if (Is32BitEntry(rmode)) {
3627  count_of_32bit_++;
3628  } else if (IsCodePtrEntry(rmode)) {
3629  count_of_code_ptr_++;
3630  } else {
3631  ASSERT(IsHeapPtrEntry(rmode));
3632  count_of_heap_ptr_++;
3633  }
3634  }
3635 
3636  // Check if we still have room for another entry given Arm's ldr and vldr
3637  // immediate offset range.
3638  if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
3639  count_of_code_ptr_,
3640  count_of_heap_ptr_,
3641  count_of_32bit_))) &&
3642  is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
3643  assm->set_constant_pool_full();
3644  }
3645 }
3646 
3647 
3648 void ConstantPoolBuilder::Relocate(int pc_delta) {
3649  for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
3650  rinfo != entries_.end(); rinfo++) {
3651  ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
3652  rinfo->set_pc(rinfo->pc() + pc_delta);
3653  }
3654 }
3655 
3656 
3657 MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
3658  if (IsEmpty()) {
3659  return heap->empty_constant_pool_array();
3660  } else {
3661  return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
3662  count_of_heap_ptr_, count_of_32bit_);
3663  }
3664 }
3665 
3666 
3667 void ConstantPoolBuilder::Populate(Assembler* assm,
3668  ConstantPoolArray* constant_pool) {
3669  ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
3670  ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
3671  ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
3672  ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
3673  ASSERT(entries_.size() == merged_indexes_.size());
3674 
3675  int index_64bit = 0;
3676  int index_code_ptr = count_of_64bit_;
3677  int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
3678  int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
3679 
3680  size_t i;
3681  std::vector<RelocInfo>::const_iterator rinfo;
3682  for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
3683  RelocInfo::Mode rmode = rinfo->rmode();
3684 
3685  // Update constant pool if necessary and get the entry's offset.
3686  int offset;
3687  if (merged_indexes_[i] == -1) {
3688  if (Is64BitEntry(rmode)) {
3689  offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
3690  constant_pool->set(index_64bit++, rinfo->data64());
3691  } else if (Is32BitEntry(rmode)) {
3692  offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
3693  constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
3694  } else if (IsCodePtrEntry(rmode)) {
3695  offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
3697  constant_pool->set(index_code_ptr++,
3698  reinterpret_cast<Object *>(rinfo->data()));
3699  } else {
3700  ASSERT(IsHeapPtrEntry(rmode));
3701  offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
3703  constant_pool->set(index_heap_ptr++,
3704  reinterpret_cast<Object *>(rinfo->data()));
3705  }
3706  merged_indexes_[i] = offset; // Stash offset for merged entries.
3707  } else {
3708  size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
3709  ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
3710  offset = merged_indexes_[merged_index];
3711  }
3712 
3713  // Patch vldr/ldr instruction with correct offset.
3714  Instr instr = assm->instr_at(rinfo->pc());
3715  if (Is64BitEntry(rmode)) {
3716  // Instruction to patch must be 'vldr rd, [pp, #0]'.
3719  ASSERT(is_uint10(offset));
3720  assm->instr_at_put(rinfo->pc(),
3722  } else {
3723  // Instruction to patch must be 'ldr rd, [pp, #0]'.
3726  ASSERT(is_uint12(offset));
3727  assm->instr_at_put(rinfo->pc(),
3729  }
3730  }
3731 
3732  ASSERT((index_64bit == count_of_64bit_) &&
3733  (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
3734  (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
3735  (index_32bit == (index_heap_ptr + count_of_32bit_)));
3736 }
3737 
3738 
3739 } } // namespace v8::internal
3740 
3741 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
void cmp(Register src1, const Operand &src2, Condition cond=al)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static bool IsBranch(Instr instr)
static const int kMaxDistToFPPool
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
const Instr kCmpCmnMask
bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
static bool IsVldrDPcImmediateOffset(Instr instr)
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
static int GetBranchOffset(Instr instr)
const Instr kMovwMask
void db(uint8_t data)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const Instr kLdrPCMask
const Instr kLdrRegFpOffsetPattern
static bool IsCmpRegister(Instr instr)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static void PrintFeatures()
const Instr kMovwLeaveCCFlip
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mov_label_offset(Register dst, Label *label)
#define FATAL(msg)
Definition: checks.h:48
void mrs(Register dst, SRegister s, Condition cond=al)
const Instr kLdrPCPattern
const Instr kMovMvnPattern
static bool IsStrRegFpNegOffset(Instr instr)
void instr_at_put(int pos, Instr instr)
const int kNumRegisters
Definition: constants-arm.h:57
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
ptrdiff_t offset() const
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Instr kMovLrPc
static bool IsStrRegisterImmediate(Instr instr)
const int KB
Definition: globals.h:245
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond=al)
#define CHECK_GT(a, b)
Definition: checks.h:260
static bool IsMovW(Instr instr)
static bool ArmUsingHardFloat()
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
static bool IsVldrDPpImmediateOffset(Instr instr)
const int kRegister_pc_Code
static HeapObject * cast(Object *obj)
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
int EncodeConstantPoolLength(int length)
Definition: constants-arm.h:44
static int GetCmpImmediateRawImmediate(Instr instr)
void dd(uint32_t data)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
void b(int branch_offset, Condition cond=al)
void vld1(NeonSize size, const NeonListOperand &dst, const NeonMemOperand &src)
static const char * Name(int reg, bool is_double)
int int32_t
Definition: unicode.cc:47
void cmn(Register src1, const Operand &src2, Condition cond=al)
uint32_t RegList
Definition: frames.h:41
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
const Instr kAddSubFlip
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
const Instr kLdrStrOffsetMask
static bool IsLdrPpImmediateOffset(Instr instr)
void clz(Register dst, Register src, Condition cond=al)
static bool enabled()
Definition: serialize.h:485
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static bool IsStrRegFpOffset(Instr instr)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void RecordConstPool(int size)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static Register GetRm(Instr instr)
const uint32_t kMaxStopCode
const Instr kLdrPpMask
static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset)
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
friend class BlockConstPoolScope
void svc(uint32_t imm24, Condition cond=al)
static bool IsCmpImmediate(Instr instr)
unsigned short uint16_t
Definition: unicode.cc:46
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
const Instr kBlxRegMask
void ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
void ldc2(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short)
#define CHECK(condition)
Definition: checks.h:75
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const Instr kCmpCmnPattern
void blx(int branch_offset)
void target_at_put(int pos, int target_pos)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vst1(NeonSize size, const NeonListOperand &src, const NeonMemOperand &dst)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const Register pp
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset)
void strb(Register src, const MemOperand &dst, Condition cond=al)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
const Instr kPopRegPattern
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
const VmovIndex VmovIndexHi
static const char * AllocationIndexToString(int index)
static const int kMaxNumPending32RelocInfo
void asr(const Register &rd, const Register &rn, unsigned shift)
const Instr kVldrDPCPattern
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Condition GetCondition(Instr instr)
uint8_t byte
Definition: globals.h:185
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
const Instr kPushRegPattern
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static bool IsPush(Instr instr)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
const Register sp
void sh(Register rd, const MemOperand &rs)
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void vmls(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Instr kLdrStrInstrArgumentMask
const int32_t kDefaultStopCode
void pkhtb(Register dst, Register src1, const Operand &src2, Condition cond=al)
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const int kDoubleSize
Definition: globals.h:266
static void MemCopy(void *dest, const void *src, size_t size)
Definition: platform.h:399
const Instr kLdrRegFpNegOffsetPattern
const Register ip
void GetCode(CodeDesc *desc)
const int kPointerSize
Definition: globals.h:268
void strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
#define kScratchDoubleReg
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kAndBicFlip
int branch_offset(Label *L, bool jump_elimination_allowed)
static void TooLateToEnableNow()
Definition: serialize.h:484
const int kHeapObjectTag
Definition: v8.h:5473
void umlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsPop(Instr instr)
const Instr kMovLeaveCCMask
void movt(Register reg, uint32_t immediate, Condition cond=al)
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2)
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
const Instr kBlxIp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static bool IsMovT(Instr instr)
static void MemMove(void *dest, const void *src, size_t size)
Definition: platform.h:402
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Register pc
const Instr kPopInstruction
const Instr kStrRegFpOffsetPattern
static Register from_code(int code)
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
const int kRegister_r8_Code
void vmrs(const Register dst, const Condition cond=al)
MaybeObject * AllocateConstantPool(Heap *heap)
void vmla(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
const Instr kVldrDPCMask
static const int kMaxDistToIntPool
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
const int kRegister_fp_Code
void vmov(const DwVfpRegister dst, double imm, const Register scratch=no_reg)
void CheckConstPool(bool force_emit, bool require_jump)
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
const int kRegister_lr_Code
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:51
static Register GetRn(Instr instr)
void uxtb16(Register dst, const Operand &src, Condition cond=al)
void pld(const MemOperand &address)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset)
static bool IsTstImmediate(Instr instr)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Register GetRd(Instr instr)
void uxtb(Register dst, const Operand &src, Condition cond=al)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
Definition: v8.h:2107
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
const int kConstantPoolMarker
Definition: constants-arm.h:42
const Register lr
void movw(Register reg, uint32_t immediate, Condition cond=al)
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset)
const Instr kMovLeaveCCPattern
const Instr kLdrStrInstrTypeMask
void smlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
#define kDoubleRegZero
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
const Instr kLdrPpPattern
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
bool emit_debug_code() const
Definition: assembler.h:65
static const int kMaxNumPending64RelocInfo
void emit_code_stub_address(Code *stub)
static bool IsVldrDRegisterImmediate(Instr instr)
#define UNIMPLEMENTED()
Definition: checks.h:50
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const VmovIndex VmovIndexLo
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static Register GetCmpImmediateRegister(Instr instr)
const Instr kBlxRegPattern
const Instr kMovMvnMask
bool is_const_pool_blocked() const
static bool IsAddRegisterImmediate(Instr instr)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static const int kHeaderSize
Definition: objects.h:5604
void vmsr(const Register dst, const Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
static int SizeFor(int number_of_int64_entries, int number_of_code_ptr_entries, int number_of_heap_ptr_entries, int number_of_int32_entries)
Definition: objects.h:3232
static LowDwVfpRegister from_code(int code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
Condition NegateCondition(Condition cond)
const Instr kMovwPattern
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void bx(Register target, Condition cond=al)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
uint32_t SRegisterFieldMask
static int GetVldrDRegisterImmediateOffset(Instr instr)
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool ImmediateFitsAddrMode2Instruction(int32_t imm32)
PositionsRecorder * positions_recorder()
Condition ConditionField() const
static const int kInstrSize
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2, Condition cond=al)
const Instr kMovMvnFlip
static uint64_t CpuFeaturesImpliedByPlatform()
void bfc(Register dst, int lsb, int width, Condition cond=al)
HeapObject * obj
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
const Register no_reg
bool IsEnabled(CpuFeature f)
Definition: assembler.h:75
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool can_use_constant_pool() const
const Instr kStrRegFpNegOffsetPattern
const int kRegister_sp_Code
void DeleteArray(T *array)
Definition: allocation.h:91
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrRegisterImmediate(Instr instr)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src)
void ldc(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short, Condition cond=al)
static bool IsLdrRegFpNegOffset(Instr instr)
void bkpt(uint32_t imm16)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
void uxtab(Register dst, Register src1, const Operand &src2, Condition cond=al)
static int GetLdrRegisterImmediateOffset(Instr instr)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void rsc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
static bool IsLdrRegFpOffset(Instr instr)
const Instr kCmpCmnFlip
static const int kNumReservedRegisters
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrPcImmediateOffset(Instr instr)
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void pkhbt(Register dst, Register src1, const Operand &src2, Condition cond=al)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const int MB
Definition: globals.h:246