v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #include "v8.h"
38 
39 #if defined(V8_TARGET_ARCH_ARM)
40 
41 #include "arm/assembler-arm-inl.h"
42 #include "serialize.h"
43 
44 namespace v8 {
45 namespace internal {
46 
47 #ifdef DEBUG
48 bool CpuFeatures::initialized_ = false;
49 #endif
50 unsigned CpuFeatures::supported_ = 0;
51 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
52 
53 
54 // Get the CPU features enabled by the build. For cross compilation the
55 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
56 // can be defined to enable ARMv7 and VFPv3 instructions when building the
57 // snapshot.
58 static unsigned CpuFeaturesImpliedByCompiler() {
59  unsigned answer = 0;
60 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
61  answer |= 1u << ARMv7;
62 #endif // CAN_USE_ARMV7_INSTRUCTIONS
63 #ifdef CAN_USE_VFP3_INSTRUCTIONS
64  answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
65 #endif // CAN_USE_VFP3_INSTRUCTIONS
66 #ifdef CAN_USE_VFP2_INSTRUCTIONS
67  answer |= 1u << VFP2;
68 #endif // CAN_USE_VFP2_INSTRUCTIONS
69 
70 #ifdef __arm__
71  // If the compiler is allowed to use VFP then we can use VFP too in our code
72  // generation even when generating snapshots. ARMv7 and hardware floating
73  // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
74 #if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
75  && !defined(__SOFTFP__)
76  answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
77 #endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
78  // && !defined(__SOFTFP__)
79 #endif // _arm__
80  if (answer & (1u << ARMv7)) {
81  answer |= 1u << UNALIGNED_ACCESSES;
82  }
83 
84  return answer;
85 }
86 
87 
88 void CpuFeatures::Probe() {
89  unsigned standard_features = static_cast<unsigned>(
90  OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
91  ASSERT(supported_ == 0 || supported_ == standard_features);
92 #ifdef DEBUG
93  initialized_ = true;
94 #endif
95 
96  // Get the features implied by the OS and the compiler settings. This is the
97  // minimal set of features which is also alowed for generated code in the
98  // snapshot.
99  supported_ |= standard_features;
100 
101  if (Serializer::enabled()) {
102  // No probing for features if we might serialize (generate snapshot).
103  return;
104  }
105 
106 #ifndef __arm__
107  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
108  // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
109  if (FLAG_enable_vfp3) {
110  supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
111  }
112  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
113  if (FLAG_enable_armv7) {
114  supported_ |= 1u << ARMv7;
115  }
116 
117  if (FLAG_enable_sudiv) {
118  supported_ |= 1u << SUDIV;
119  }
120 
121  if (FLAG_enable_movw_movt) {
122  supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
123  }
124 #else // __arm__
125  // Probe for additional features not already known to be available.
127  // This implementation also sets the VFP flags if runtime
128  // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
129  // 0406B, page A1-6.
130  found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
131  } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
132  found_by_runtime_probing_ |= 1u << VFP2;
133  }
134 
135  if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
136  found_by_runtime_probing_ |= 1u << ARMv7;
137  }
138 
140  found_by_runtime_probing_ |= 1u << SUDIV;
141  }
142 
144  found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
145  }
146 
148  OS::ArmCpuHasFeature(ARMv7)) {
149  found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
150  }
151 
152  supported_ |= found_by_runtime_probing_;
153 #endif
154 
155  // Assert that VFP3 implies VFP2 and ARMv7.
156  ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
157 }
158 
159 
160 // -----------------------------------------------------------------------------
161 // Implementation of RelocInfo
162 
163 const int RelocInfo::kApplyMask = 0;
164 
165 
166 bool RelocInfo::IsCodedSpecially() {
167  // The deserializer needs to know whether a pointer is specially coded. Being
168  // specially coded on ARM means that it is a movw/movt instruction. We don't
169  // generate those yet.
170  return false;
171 }
172 
173 
174 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
175  // Patch the code at the current address with the supplied instructions.
176  Instr* pc = reinterpret_cast<Instr*>(pc_);
177  Instr* instr = reinterpret_cast<Instr*>(instructions);
178  for (int i = 0; i < instruction_count; i++) {
179  *(pc + i) = *(instr + i);
180  }
181 
182  // Indicate that code has changed.
183  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
184 }
185 
186 
187 // Patch the code at the current PC with a call to the target address.
188 // Additional guard instructions can be added if required.
189 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
190  // Patch the code at the current address with a call to the target.
191  UNIMPLEMENTED();
192 }
193 
194 
195 // -----------------------------------------------------------------------------
196 // Implementation of Operand and MemOperand
197 // See assembler-arm-inl.h for inlined constructors
198 
199 Operand::Operand(Handle<Object> handle) {
200  rm_ = no_reg;
201  // Verify all Objects referred by code are NOT in new space.
202  Object* obj = *handle;
203  ASSERT(!HEAP->InNewSpace(obj));
204  if (obj->IsHeapObject()) {
205  imm32_ = reinterpret_cast<intptr_t>(handle.location());
206  rmode_ = RelocInfo::EMBEDDED_OBJECT;
207  } else {
208  // no relocation needed
209  imm32_ = reinterpret_cast<intptr_t>(obj);
210  rmode_ = RelocInfo::NONE;
211  }
212 }
213 
214 
215 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
216  ASSERT(is_uint5(shift_imm));
217  ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
218  rm_ = rm;
219  rs_ = no_reg;
220  shift_op_ = shift_op;
221  shift_imm_ = shift_imm & 31;
222  if (shift_op == RRX) {
223  // encoded as ROR with shift_imm == 0
224  ASSERT(shift_imm == 0);
225  shift_op_ = ROR;
226  shift_imm_ = 0;
227  }
228 }
229 
230 
231 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
232  ASSERT(shift_op != RRX);
233  rm_ = rm;
234  rs_ = no_reg;
235  shift_op_ = shift_op;
236  rs_ = rs;
237 }
238 
239 
240 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
241  rn_ = rn;
242  rm_ = no_reg;
243  offset_ = offset;
244  am_ = am;
245 }
246 
247 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
248  rn_ = rn;
249  rm_ = rm;
250  shift_op_ = LSL;
251  shift_imm_ = 0;
252  am_ = am;
253 }
254 
255 
256 MemOperand::MemOperand(Register rn, Register rm,
257  ShiftOp shift_op, int shift_imm, AddrMode am) {
258  ASSERT(is_uint5(shift_imm));
259  rn_ = rn;
260  rm_ = rm;
261  shift_op_ = shift_op;
262  shift_imm_ = shift_imm & 31;
263  am_ = am;
264 }
265 
266 
267 // -----------------------------------------------------------------------------
268 // Specific instructions, constants, and masks.
269 
270 // add(sp, sp, 4) instruction (aka Pop())
271 const Instr kPopInstruction =
272  al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
274 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
275 // register r is not encoded.
276 const Instr kPushRegPattern =
277  al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
278 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
279 // register r is not encoded.
280 const Instr kPopRegPattern =
281  al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
282 // mov lr, pc
284 // ldr rd, [pc, #offset]
285 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
286 const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
287 // blxcc rm
288 const Instr kBlxRegMask =
289  15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
290 const Instr kBlxRegPattern =
291  B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
292 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
293 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
294 const Instr kMovMvnPattern = 0xd * B21;
295 const Instr kMovMvnFlip = B22;
296 const Instr kMovLeaveCCMask = 0xdff * B16;
297 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
298 const Instr kMovwMask = 0xff * B20;
299 const Instr kMovwPattern = 0x30 * B20;
300 const Instr kMovwLeaveCCFlip = 0x5 * B21;
301 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
302 const Instr kCmpCmnPattern = 0x15 * B20;
303 const Instr kCmpCmnFlip = B21;
304 const Instr kAddSubFlip = 0x6 * B21;
305 const Instr kAndBicFlip = 0xe * B21;
306 
307 // A mask for the Rd register for push, pop, ldr, str instructions.
309  al | B26 | L | Offset | kRegister_fp_Code * B16;
316 const Instr kLdrStrInstrTypeMask = 0xffff0000;
317 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
318 const Instr kLdrStrOffsetMask = 0x00000fff;
319 
320 
321 // Spare buffer.
322 static const int kMinimalBufferSize = 4*KB;
323 
324 
325 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
326  : AssemblerBase(arg_isolate),
327  recorded_ast_id_(TypeFeedbackId::None()),
328  positions_recorder_(this),
329  emit_debug_code_(FLAG_debug_code),
330  predictable_code_size_(false) {
331  if (buffer == NULL) {
332  // Do our own buffer management.
333  if (buffer_size <= kMinimalBufferSize) {
334  buffer_size = kMinimalBufferSize;
335 
336  if (isolate()->assembler_spare_buffer() != NULL) {
337  buffer = isolate()->assembler_spare_buffer();
338  isolate()->set_assembler_spare_buffer(NULL);
339  }
340  }
341  if (buffer == NULL) {
342  buffer_ = NewArray<byte>(buffer_size);
343  } else {
344  buffer_ = static_cast<byte*>(buffer);
345  }
346  buffer_size_ = buffer_size;
347  own_buffer_ = true;
348 
349  } else {
350  // Use externally provided buffer instead.
351  ASSERT(buffer_size > 0);
352  buffer_ = static_cast<byte*>(buffer);
353  buffer_size_ = buffer_size;
354  own_buffer_ = false;
355  }
356 
357  // Set up buffer pointers.
358  ASSERT(buffer_ != NULL);
359  pc_ = buffer_;
360  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
361  num_pending_reloc_info_ = 0;
362  next_buffer_check_ = 0;
363  const_pool_blocked_nesting_ = 0;
364  no_const_pool_before_ = 0;
365  first_const_pool_use_ = -1;
366  last_bound_pos_ = 0;
368 }
369 
370 
372  ASSERT(const_pool_blocked_nesting_ == 0);
373  if (own_buffer_) {
374  if (isolate()->assembler_spare_buffer() == NULL &&
375  buffer_size_ == kMinimalBufferSize) {
376  isolate()->set_assembler_spare_buffer(buffer_);
377  } else {
378  DeleteArray(buffer_);
379  }
380  }
381 }
382 
383 
384 void Assembler::GetCode(CodeDesc* desc) {
385  // Emit constant pool if necessary.
386  CheckConstPool(true, false);
387  ASSERT(num_pending_reloc_info_ == 0);
388 
389  // Set up code descriptor.
390  desc->buffer = buffer_;
391  desc->buffer_size = buffer_size_;
392  desc->instr_size = pc_offset();
393  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
394 }
395 
396 
397 void Assembler::Align(int m) {
398  ASSERT(m >= 4 && IsPowerOf2(m));
399  while ((pc_offset() & (m - 1)) != 0) {
400  nop();
401  }
402 }
403 
404 
406  // Preferred alignment of jump targets on some ARM chips.
407  Align(8);
408 }
409 
410 
412  return Instruction::ConditionField(instr);
413 }
414 
415 
416 bool Assembler::IsBranch(Instr instr) {
417  return (instr & (B27 | B25)) == (B27 | B25);
418 }
419 
420 
422  ASSERT(IsBranch(instr));
423  // Take the jump offset in the lower 24 bits, sign extend it and multiply it
424  // with 4 to get the offset in bytes.
425  return ((instr & kImm24Mask) << 8) >> 6;
426 }
427 
428 
430  return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
431 }
432 
433 
436  bool positive = (instr & B23) == B23;
437  int offset = instr & kOff12Mask; // Zero extended offset.
438  return positive ? offset : -offset;
439 }
440 
441 
444  bool positive = offset >= 0;
445  if (!positive) offset = -offset;
446  ASSERT(is_uint12(offset));
447  // Set bit indicating whether the offset should be added.
448  instr = (instr & ~B23) | (positive ? B23 : 0);
449  // Set the actual offset.
450  return (instr & ~kOff12Mask) | offset;
451 }
452 
453 
455  return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
456 }
457 
458 
461  bool positive = offset >= 0;
462  if (!positive) offset = -offset;
463  ASSERT(is_uint12(offset));
464  // Set bit indicating whether the offset should be added.
465  instr = (instr & ~B23) | (positive ? B23 : 0);
466  // Set the actual offset.
467  return (instr & ~kOff12Mask) | offset;
468 }
469 
470 
472  return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
473 }
474 
475 
478  ASSERT(offset >= 0);
479  ASSERT(is_uint12(offset));
480  // Set the offset.
481  return (instr & ~kOff12Mask) | offset;
482 }
483 
484 
485 Register Assembler::GetRd(Instr instr) {
486  Register reg;
487  reg.code_ = Instruction::RdValue(instr);
488  return reg;
489 }
490 
491 
492 Register Assembler::GetRn(Instr instr) {
493  Register reg;
494  reg.code_ = Instruction::RnValue(instr);
495  return reg;
496 }
497 
498 
499 Register Assembler::GetRm(Instr instr) {
500  Register reg;
501  reg.code_ = Instruction::RmValue(instr);
502  return reg;
503 }
504 
505 
506 bool Assembler::IsPush(Instr instr) {
507  return ((instr & ~kRdMask) == kPushRegPattern);
508 }
509 
510 
511 bool Assembler::IsPop(Instr instr) {
512  return ((instr & ~kRdMask) == kPopRegPattern);
513 }
514 
515 
517  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
518 }
519 
520 
522  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
523 }
524 
525 
527  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
528 }
529 
530 
532  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
533 }
534 
535 
537  // Check the instruction is indeed a
538  // ldr<cond> <Rd>, [pc +/- offset_12].
539  return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
540 }
541 
542 
543 bool Assembler::IsTstImmediate(Instr instr) {
544  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
545  (I | TST | S);
546 }
547 
548 
549 bool Assembler::IsCmpRegister(Instr instr) {
550  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
551  (CMP | S);
552 }
553 
554 
555 bool Assembler::IsCmpImmediate(Instr instr) {
556  return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
557  (I | CMP | S);
558 }
559 
560 
562  ASSERT(IsCmpImmediate(instr));
563  return GetRn(instr);
564 }
565 
566 
568  ASSERT(IsCmpImmediate(instr));
569  return instr & kOff12Mask;
570 }
571 
572 // Labels refer to positions in the (to be) generated code.
573 // There are bound, linked, and unused labels.
574 //
575 // Bound labels refer to known positions in the already
576 // generated code. pos() is the position the label refers to.
577 //
578 // Linked labels refer to unknown positions in the code
579 // to be generated; pos() is the position of the last
580 // instruction using the label.
581 
582 
583 // The link chain is terminated by a negative code position (must be aligned)
584 const int kEndOfChain = -4;
585 
586 
587 int Assembler::target_at(int pos) {
588  Instr instr = instr_at(pos);
589  if ((instr & ~kImm24Mask) == 0) {
590  // Emitted label constant, not part of a branch.
591  return instr - (Code::kHeaderSize - kHeapObjectTag);
592  }
593  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
594  int imm26 = ((instr & kImm24Mask) << 8) >> 6;
596  ((instr & B24) != 0)) {
597  // blx uses bit 24 to encode bit 2 of imm26
598  imm26 += 2;
599  }
600  return pos + kPcLoadDelta + imm26;
601 }
602 
603 
604 void Assembler::target_at_put(int pos, int target_pos) {
605  Instr instr = instr_at(pos);
606  if ((instr & ~kImm24Mask) == 0) {
607  ASSERT(target_pos == kEndOfChain || target_pos >= 0);
608  // Emitted label constant, not part of a branch.
609  // Make label relative to Code* of generated Code object.
610  instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
611  return;
612  }
613  int imm26 = target_pos - (pos + kPcLoadDelta);
614  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
616  // blx uses bit 24 to encode bit 2 of imm26
617  ASSERT((imm26 & 1) == 0);
618  instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
619  } else {
620  ASSERT((imm26 & 3) == 0);
621  instr &= ~kImm24Mask;
622  }
623  int imm24 = imm26 >> 2;
624  ASSERT(is_int24(imm24));
625  instr_at_put(pos, instr | (imm24 & kImm24Mask));
626 }
627 
628 
629 void Assembler::print(Label* L) {
630  if (L->is_unused()) {
631  PrintF("unused label\n");
632  } else if (L->is_bound()) {
633  PrintF("bound label to %d\n", L->pos());
634  } else if (L->is_linked()) {
635  Label l = *L;
636  PrintF("unbound label");
637  while (l.is_linked()) {
638  PrintF("@ %d ", l.pos());
639  Instr instr = instr_at(l.pos());
640  if ((instr & ~kImm24Mask) == 0) {
641  PrintF("value\n");
642  } else {
643  ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
645  const char* b;
646  const char* c;
647  if (cond == kSpecialCondition) {
648  b = "blx";
649  c = "";
650  } else {
651  if ((instr & B24) != 0)
652  b = "bl";
653  else
654  b = "b";
655 
656  switch (cond) {
657  case eq: c = "eq"; break;
658  case ne: c = "ne"; break;
659  case hs: c = "hs"; break;
660  case lo: c = "lo"; break;
661  case mi: c = "mi"; break;
662  case pl: c = "pl"; break;
663  case vs: c = "vs"; break;
664  case vc: c = "vc"; break;
665  case hi: c = "hi"; break;
666  case ls: c = "ls"; break;
667  case ge: c = "ge"; break;
668  case lt: c = "lt"; break;
669  case gt: c = "gt"; break;
670  case le: c = "le"; break;
671  case al: c = ""; break;
672  default:
673  c = "";
674  UNREACHABLE();
675  }
676  }
677  PrintF("%s%s\n", b, c);
678  }
679  next(&l);
680  }
681  } else {
682  PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
683  }
684 }
685 
686 
687 void Assembler::bind_to(Label* L, int pos) {
688  ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
689  while (L->is_linked()) {
690  int fixup_pos = L->pos();
691  next(L); // call next before overwriting link with target at fixup_pos
692  target_at_put(fixup_pos, pos);
693  }
694  L->bind_to(pos);
695 
696  // Keep track of the last bound label so we don't eliminate any instructions
697  // before a bound label.
698  if (pos > last_bound_pos_)
699  last_bound_pos_ = pos;
700 }
701 
702 
703 void Assembler::link_to(Label* L, Label* appendix) {
704  if (appendix->is_linked()) {
705  if (L->is_linked()) {
706  // Append appendix to L's list.
707  int fixup_pos;
708  int link = L->pos();
709  do {
710  fixup_pos = link;
711  link = target_at(fixup_pos);
712  } while (link > 0);
713  ASSERT(link == kEndOfChain);
714  target_at_put(fixup_pos, appendix->pos());
715  } else {
716  // L is empty, simply use appendix.
717  *L = *appendix;
718  }
719  }
720  appendix->Unuse(); // appendix should not be used anymore
721 }
722 
723 
724 void Assembler::bind(Label* L) {
725  ASSERT(!L->is_bound()); // label can only be bound once
726  bind_to(L, pc_offset());
727 }
728 
729 
730 void Assembler::next(Label* L) {
731  ASSERT(L->is_linked());
732  int link = target_at(L->pos());
733  if (link == kEndOfChain) {
734  L->Unuse();
735  } else {
736  ASSERT(link >= 0);
737  L->link_to(link);
738  }
739 }
740 
741 
742 // Low-level code emission routines depending on the addressing mode.
743 // If this returns true then you have to use the rotate_imm and immed_8
744 // that it returns, because it may have already changed the instruction
745 // to match them!
746 static bool fits_shifter(uint32_t imm32,
747  uint32_t* rotate_imm,
748  uint32_t* immed_8,
749  Instr* instr) {
750  // imm32 must be unsigned.
751  for (int rot = 0; rot < 16; rot++) {
752  uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
753  if ((imm8 <= 0xff)) {
754  *rotate_imm = rot;
755  *immed_8 = imm8;
756  return true;
757  }
758  }
759  // If the opcode is one with a complementary version and the complementary
760  // immediate fits, change the opcode.
761  if (instr != NULL) {
762  if ((*instr & kMovMvnMask) == kMovMvnPattern) {
763  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
764  *instr ^= kMovMvnFlip;
765  return true;
766  } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
767  if (CpuFeatures::IsSupported(ARMv7)) {
768  if (imm32 < 0x10000) {
769  *instr ^= kMovwLeaveCCFlip;
770  *instr |= EncodeMovwImmediate(imm32);
771  *rotate_imm = *immed_8 = 0; // Not used for movw.
772  return true;
773  }
774  }
775  }
776  } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
777  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
778  *instr ^= kCmpCmnFlip;
779  return true;
780  }
781  } else {
782  Instr alu_insn = (*instr & kALUMask);
783  if (alu_insn == ADD ||
784  alu_insn == SUB) {
785  if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
786  *instr ^= kAddSubFlip;
787  return true;
788  }
789  } else if (alu_insn == AND ||
790  alu_insn == BIC) {
791  if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
792  *instr ^= kAndBicFlip;
793  return true;
794  }
795  }
796  }
797  }
798  return false;
799 }
800 
801 
802 // We have to use the temporary register for things that can be relocated even
803 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
804 // space. There is no guarantee that the relocated location can be similarly
805 // encoded.
806 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
807  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
808 #ifdef DEBUG
809  if (!Serializer::enabled()) {
811  }
812 #endif // def DEBUG
813  if (assembler != NULL && assembler->predictable_code_size()) return true;
814  return Serializer::enabled();
815  } else if (rmode_ == RelocInfo::NONE) {
816  return false;
817  }
818  return true;
819 }
820 
821 
822 static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
824  return true;
825  }
826  if (x.must_output_reloc_info(assembler)) {
827  return false;
828  }
829  return CpuFeatures::IsSupported(ARMv7);
830 }
831 
832 
833 bool Operand::is_single_instruction(const Assembler* assembler,
834  Instr instr) const {
835  if (rm_.is_valid()) return true;
836  uint32_t dummy1, dummy2;
837  if (must_output_reloc_info(assembler) ||
838  !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
839  // The immediate operand cannot be encoded as a shifter operand, or use of
840  // constant pool is required. For a mov instruction not setting the
841  // condition code additional instruction conventions can be used.
842  if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
843  return !use_movw_movt(*this, assembler);
844  } else {
845  // If this is not a mov or mvn instruction there will always an additional
846  // instructions - either mov or ldr. The mov might actually be two
847  // instructions mov or movw followed by movt so including the actual
848  // instruction two or three instructions will be generated.
849  return false;
850  }
851  } else {
852  // No use of constant pool and the immediate operand can be encoded as a
853  // shifter operand.
854  return true;
855  }
856 }
857 
858 
859 void Assembler::move_32_bit_immediate(Condition cond,
860  Register rd,
861  SBit s,
862  const Operand& x) {
863  if (rd.code() != pc.code() && s == LeaveCC) {
864  if (use_movw_movt(x, this)) {
865  if (x.must_output_reloc_info(this)) {
866  RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
867  // Make sure the movw/movt doesn't get separated.
869  }
870  emit(cond | 0x30*B20 | rd.code()*B12 |
871  EncodeMovwImmediate(x.imm32_ & 0xffff));
872  movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
873  return;
874  }
875  }
876 
877  RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
878  ldr(rd, MemOperand(pc, 0), cond);
879 }
880 
881 
882 void Assembler::addrmod1(Instr instr,
883  Register rn,
884  Register rd,
885  const Operand& x) {
886  CheckBuffer();
887  ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
888  if (!x.rm_.is_valid()) {
889  // Immediate.
890  uint32_t rotate_imm;
891  uint32_t immed_8;
892  if (x.must_output_reloc_info(this) ||
893  !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
894  // The immediate operand cannot be encoded as a shifter operand, so load
895  // it first to register ip and change the original instruction to use ip.
896  // However, if the original instruction is a 'mov rd, x' (not setting the
897  // condition code), then replace it with a 'ldr rd, [pc]'.
898  CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
900  if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
901  move_32_bit_immediate(cond, rd, LeaveCC, x);
902  } else {
903  if ((instr & kMovMvnMask) == kMovMvnPattern) {
904  // Moves need to use a constant pool entry.
905  RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
906  ldr(ip, MemOperand(pc, 0), cond);
907  } else if (x.must_output_reloc_info(this)) {
908  // Otherwise, use most efficient form of fetching from constant pool.
909  move_32_bit_immediate(cond, ip, LeaveCC, x);
910  } else {
911  // If this is not a mov or mvn instruction we may still be able to
912  // avoid a constant pool entry by using mvn or movw.
913  mov(ip, x, LeaveCC, cond);
914  }
915  addrmod1(instr, rn, rd, Operand(ip));
916  }
917  return;
918  }
919  instr |= I | rotate_imm*B8 | immed_8;
920  } else if (!x.rs_.is_valid()) {
921  // Immediate shift.
922  instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
923  } else {
924  // Register shift.
925  ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
926  instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
927  }
928  emit(instr | rn.code()*B16 | rd.code()*B12);
929  if (rn.is(pc) || x.rm_.is(pc)) {
930  // Block constant pool emission for one instruction after reading pc.
932  }
933 }
934 
935 
936 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
937  ASSERT((instr & ~(kCondMask | B | L)) == B26);
938  int am = x.am_;
939  if (!x.rm_.is_valid()) {
940  // Immediate offset.
941  int offset_12 = x.offset_;
942  if (offset_12 < 0) {
943  offset_12 = -offset_12;
944  am ^= U;
945  }
946  if (!is_uint12(offset_12)) {
947  // Immediate offset cannot be encoded, load it first to register ip
948  // rn (and rd in a load) should never be ip, or will be trashed.
949  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
950  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
951  addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
952  return;
953  }
954  ASSERT(offset_12 >= 0); // no masking needed
955  instr |= offset_12;
956  } else {
957  // Register offset (shift_imm_ and shift_op_ are 0) or scaled
958  // register offset the constructors make sure than both shift_imm_
959  // and shift_op_ are initialized.
960  ASSERT(!x.rm_.is(pc));
961  instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
962  }
963  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
964  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
965 }
966 
967 
968 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
969  ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
970  ASSERT(x.rn_.is_valid());
971  int am = x.am_;
972  if (!x.rm_.is_valid()) {
973  // Immediate offset.
974  int offset_8 = x.offset_;
975  if (offset_8 < 0) {
976  offset_8 = -offset_8;
977  am ^= U;
978  }
979  if (!is_uint8(offset_8)) {
980  // Immediate offset cannot be encoded, load it first to register ip
981  // rn (and rd in a load) should never be ip, or will be trashed.
982  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
983  mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
984  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
985  return;
986  }
987  ASSERT(offset_8 >= 0); // no masking needed
988  instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
989  } else if (x.shift_imm_ != 0) {
990  // Scaled register offset not supported, load index first
991  // rn (and rd in a load) should never be ip, or will be trashed.
992  ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
993  mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
995  addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
996  return;
997  } else {
998  // Register offset.
999  ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1000  instr |= x.rm_.code();
1001  }
1002  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1003  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1004 }
1005 
1006 
1007 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1008  ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
1009  ASSERT(rl != 0);
1010  ASSERT(!rn.is(pc));
1011  emit(instr | rn.code()*B16 | rl);
1012 }
1013 
1014 
1015 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1016  // Unindexed addressing is not encoded by this function.
1017  ASSERT_EQ((B27 | B26),
1018  (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1019  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
1020  int am = x.am_;
1021  int offset_8 = x.offset_;
1022  ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
1023  offset_8 >>= 2;
1024  if (offset_8 < 0) {
1025  offset_8 = -offset_8;
1026  am ^= U;
1027  }
1028  ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1029  ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1030 
1031  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1032  if ((am & P) == 0)
1033  am |= W;
1034 
1035  ASSERT(offset_8 >= 0); // no masking needed
1036  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1037 }
1038 
1039 
1040 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1041  int target_pos;
1042  if (L->is_bound()) {
1043  target_pos = L->pos();
1044  } else {
1045  if (L->is_linked()) {
1046  target_pos = L->pos(); // L's link
1047  } else {
1048  target_pos = kEndOfChain;
1049  }
1050  L->link_to(pc_offset());
1051  }
1052 
1053  // Block the emission of the constant pool, since the branch instruction must
1054  // be emitted at the pc offset recorded by the label.
1055  BlockConstPoolFor(1);
1056  return target_pos - (pc_offset() + kPcLoadDelta);
1057 }
1058 
1059 
1060 void Assembler::label_at_put(Label* L, int at_offset) {
1061  int target_pos;
1062  if (L->is_bound()) {
1063  target_pos = L->pos();
1064  } else {
1065  if (L->is_linked()) {
1066  target_pos = L->pos(); // L's link
1067  } else {
1068  target_pos = kEndOfChain;
1069  }
1070  L->link_to(at_offset);
1071  instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1072  }
1073 }
1074 
1075 
1076 // Branch instructions.
1077 void Assembler::b(int branch_offset, Condition cond) {
1078  ASSERT((branch_offset & 3) == 0);
1079  int imm24 = branch_offset >> 2;
1080  ASSERT(is_int24(imm24));
1081  emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1082 
1083  if (cond == al) {
1084  // Dead code is a good location to emit the constant pool.
1085  CheckConstPool(false, false);
1086  }
1087 }
1088 
1089 
1090 void Assembler::bl(int branch_offset, Condition cond) {
1091  positions_recorder()->WriteRecordedPositions();
1092  ASSERT((branch_offset & 3) == 0);
1093  int imm24 = branch_offset >> 2;
1094  ASSERT(is_int24(imm24));
1095  emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1096 }
1097 
1098 
1099 void Assembler::blx(int branch_offset) { // v5 and above
1100  positions_recorder()->WriteRecordedPositions();
1101  ASSERT((branch_offset & 1) == 0);
1102  int h = ((branch_offset & 2) >> 1)*B24;
1103  int imm24 = branch_offset >> 2;
1104  ASSERT(is_int24(imm24));
1105  emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1106 }
1107 
1108 
1109 void Assembler::blx(Register target, Condition cond) { // v5 and above
1110  positions_recorder()->WriteRecordedPositions();
1111  ASSERT(!target.is(pc));
1112  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1113 }
1114 
1115 
1116 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1117  positions_recorder()->WriteRecordedPositions();
1118  ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1119  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1120 }
1121 
1122 
1123 // Data-processing instructions.
1124 
1125 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1126  SBit s, Condition cond) {
1127  addrmod1(cond | AND | s, src1, dst, src2);
1128 }
1129 
1130 
1131 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1132  SBit s, Condition cond) {
1133  addrmod1(cond | EOR | s, src1, dst, src2);
1134 }
1135 
1136 
1137 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1138  SBit s, Condition cond) {
1139  addrmod1(cond | SUB | s, src1, dst, src2);
1140 }
1141 
1142 
1143 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1144  SBit s, Condition cond) {
1145  addrmod1(cond | RSB | s, src1, dst, src2);
1146 }
1147 
1148 
1149 void Assembler::add(Register dst, Register src1, const Operand& src2,
1150  SBit s, Condition cond) {
1151  addrmod1(cond | ADD | s, src1, dst, src2);
1152 }
1153 
1154 
1155 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1156  SBit s, Condition cond) {
1157  addrmod1(cond | ADC | s, src1, dst, src2);
1158 }
1159 
1160 
1161 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1162  SBit s, Condition cond) {
1163  addrmod1(cond | SBC | s, src1, dst, src2);
1164 }
1165 
1166 
1167 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1168  SBit s, Condition cond) {
1169  addrmod1(cond | RSC | s, src1, dst, src2);
1170 }
1171 
1172 
1173 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1174  addrmod1(cond | TST | S, src1, r0, src2);
1175 }
1176 
1177 
1178 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1179  addrmod1(cond | TEQ | S, src1, r0, src2);
1180 }
1181 
1182 
1183 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1184  addrmod1(cond | CMP | S, src1, r0, src2);
1185 }
1186 
1187 
1189  Register src, int raw_immediate, Condition cond) {
1190  ASSERT(is_uint12(raw_immediate));
1191  emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1192 }
1193 
1194 
1195 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1196  addrmod1(cond | CMN | S, src1, r0, src2);
1197 }
1198 
1199 
1200 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1201  SBit s, Condition cond) {
1202  addrmod1(cond | ORR | s, src1, dst, src2);
1203 }
1204 
1205 
1206 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1207  if (dst.is(pc)) {
1208  positions_recorder()->WriteRecordedPositions();
1209  }
1210  // Don't allow nop instructions in the form mov rn, rn to be generated using
1211  // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1212  // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1213  ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1214  addrmod1(cond | MOV | s, r0, dst, src);
1215 }
1216 
1217 
1218 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1219  ASSERT(immediate < 0x10000);
1220  // May use movw if supported, but on unsupported platforms will try to use
1221  // equivalent rotated immed_8 value and other tricks before falling back to a
1222  // constant pool load.
1223  mov(reg, Operand(immediate), LeaveCC, cond);
1224 }
1225 
1226 
1227 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1228  emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1229 }
1230 
1231 
1232 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1233  SBit s, Condition cond) {
1234  addrmod1(cond | BIC | s, src1, dst, src2);
1235 }
1236 
1237 
1238 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1239  addrmod1(cond | MVN | s, r0, dst, src);
1240 }
1241 
1242 
1243 // Multiply instructions.
1244 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1245  SBit s, Condition cond) {
1246  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1247  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1248  src2.code()*B8 | B7 | B4 | src1.code());
1249 }
1250 
1251 
1252 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1253  Condition cond) {
1254  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1255  emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1256  src2.code()*B8 | B7 | B4 | src1.code());
1257 }
1258 
1259 
1260 void Assembler::sdiv(Register dst, Register src1, Register src2,
1261  Condition cond) {
1262  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1263  emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1264  src2.code()*B8 | B4 | src1.code());
1265 }
1266 
1267 
1268 void Assembler::mul(Register dst, Register src1, Register src2,
1269  SBit s, Condition cond) {
1270  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1271  // dst goes in bits 16-19 for this instruction!
1272  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1273 }
1274 
1275 
1276 void Assembler::smlal(Register dstL,
1277  Register dstH,
1278  Register src1,
1279  Register src2,
1280  SBit s,
1281  Condition cond) {
1282  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1283  ASSERT(!dstL.is(dstH));
1284  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1285  src2.code()*B8 | B7 | B4 | src1.code());
1286 }
1287 
1288 
1289 void Assembler::smull(Register dstL,
1290  Register dstH,
1291  Register src1,
1292  Register src2,
1293  SBit s,
1294  Condition cond) {
1295  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1296  ASSERT(!dstL.is(dstH));
1297  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1298  src2.code()*B8 | B7 | B4 | src1.code());
1299 }
1300 
1301 
1302 void Assembler::umlal(Register dstL,
1303  Register dstH,
1304  Register src1,
1305  Register src2,
1306  SBit s,
1307  Condition cond) {
1308  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1309  ASSERT(!dstL.is(dstH));
1310  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1311  src2.code()*B8 | B7 | B4 | src1.code());
1312 }
1313 
1314 
1315 void Assembler::umull(Register dstL,
1316  Register dstH,
1317  Register src1,
1318  Register src2,
1319  SBit s,
1320  Condition cond) {
1321  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1322  ASSERT(!dstL.is(dstH));
1323  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1324  src2.code()*B8 | B7 | B4 | src1.code());
1325 }
1326 
1327 
1328 // Miscellaneous arithmetic instructions.
1329 void Assembler::clz(Register dst, Register src, Condition cond) {
1330  // v5 and above.
1331  ASSERT(!dst.is(pc) && !src.is(pc));
1332  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1333  15*B8 | CLZ | src.code());
1334 }
1335 
1336 
1337 // Saturating instructions.
1338 
1339 // Unsigned saturate.
1340 void Assembler::usat(Register dst,
1341  int satpos,
1342  const Operand& src,
1343  Condition cond) {
1344  // v6 and above.
1346  ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1347  ASSERT((satpos >= 0) && (satpos <= 31));
1348  ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1349  ASSERT(src.rs_.is(no_reg));
1350 
1351  int sh = 0;
1352  if (src.shift_op_ == ASR) {
1353  sh = 1;
1354  }
1355 
1356  emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1357  src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1358 }
1359 
1360 
1361 // Bitfield manipulation instructions.
1362 
1363 // Unsigned bit field extract.
1364 // Extracts #width adjacent bits from position #lsb in a register, and
1365 // writes them to the low bits of a destination register.
1366 // ubfx dst, src, #lsb, #width
1367 void Assembler::ubfx(Register dst,
1368  Register src,
1369  int lsb,
1370  int width,
1371  Condition cond) {
1372  // v7 and above.
1374  ASSERT(!dst.is(pc) && !src.is(pc));
1375  ASSERT((lsb >= 0) && (lsb <= 31));
1376  ASSERT((width >= 1) && (width <= (32 - lsb)));
1377  emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1378  lsb*B7 | B6 | B4 | src.code());
1379 }
1380 
1381 
1382 // Signed bit field extract.
1383 // Extracts #width adjacent bits from position #lsb in a register, and
1384 // writes them to the low bits of a destination register. The extracted
1385 // value is sign extended to fill the destination register.
1386 // sbfx dst, src, #lsb, #width
1387 void Assembler::sbfx(Register dst,
1388  Register src,
1389  int lsb,
1390  int width,
1391  Condition cond) {
1392  // v7 and above.
1394  ASSERT(!dst.is(pc) && !src.is(pc));
1395  ASSERT((lsb >= 0) && (lsb <= 31));
1396  ASSERT((width >= 1) && (width <= (32 - lsb)));
1397  emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1398  lsb*B7 | B6 | B4 | src.code());
1399 }
1400 
1401 
1402 // Bit field clear.
1403 // Sets #width adjacent bits at position #lsb in the destination register
1404 // to zero, preserving the value of the other bits.
1405 // bfc dst, #lsb, #width
1406 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1407  // v7 and above.
1409  ASSERT(!dst.is(pc));
1410  ASSERT((lsb >= 0) && (lsb <= 31));
1411  ASSERT((width >= 1) && (width <= (32 - lsb)));
1412  int msb = lsb + width - 1;
1413  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1414 }
1415 
1416 
1417 // Bit field insert.
1418 // Inserts #width adjacent bits from the low bits of the source register
1419 // into position #lsb of the destination register.
1420 // bfi dst, src, #lsb, #width
1421 void Assembler::bfi(Register dst,
1422  Register src,
1423  int lsb,
1424  int width,
1425  Condition cond) {
1426  // v7 and above.
1428  ASSERT(!dst.is(pc) && !src.is(pc));
1429  ASSERT((lsb >= 0) && (lsb <= 31));
1430  ASSERT((width >= 1) && (width <= (32 - lsb)));
1431  int msb = lsb + width - 1;
1432  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1433  src.code());
1434 }
1435 
1436 
1437 // Status register access instructions.
1438 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1439  ASSERT(!dst.is(pc));
1440  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1441 }
1442 
1443 
1444 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1445  Condition cond) {
1446  ASSERT(fields >= B16 && fields < B20); // at least one field set
1447  Instr instr;
1448  if (!src.rm_.is_valid()) {
1449  // Immediate.
1450  uint32_t rotate_imm;
1451  uint32_t immed_8;
1452  if (src.must_output_reloc_info(this) ||
1453  !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1454  // Immediate operand cannot be encoded, load it first to register ip.
1455  RecordRelocInfo(src.rmode_, src.imm32_);
1456  ldr(ip, MemOperand(pc, 0), cond);
1457  msr(fields, Operand(ip), cond);
1458  return;
1459  }
1460  instr = I | rotate_imm*B8 | immed_8;
1461  } else {
1462  ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1463  instr = src.rm_.code();
1464  }
1465  emit(cond | instr | B24 | B21 | fields | 15*B12);
1466 }
1467 
1468 
1469 // Load/Store instructions.
1470 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1471  if (dst.is(pc)) {
1472  positions_recorder()->WriteRecordedPositions();
1473  }
1474  addrmod2(cond | B26 | L, dst, src);
1475 }
1476 
1477 
1478 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1479  addrmod2(cond | B26, src, dst);
1480 }
1481 
1482 
1483 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1484  addrmod2(cond | B26 | B | L, dst, src);
1485 }
1486 
1487 
1488 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1489  addrmod2(cond | B26 | B, src, dst);
1490 }
1491 
1492 
1493 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1494  addrmod3(cond | L | B7 | H | B4, dst, src);
1495 }
1496 
1497 
1498 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1499  addrmod3(cond | B7 | H | B4, src, dst);
1500 }
1501 
1502 
1503 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1504  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1505 }
1506 
1507 
1508 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1509  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1510 }
1511 
1512 
1513 void Assembler::ldrd(Register dst1, Register dst2,
1514  const MemOperand& src, Condition cond) {
1515  ASSERT(CpuFeatures::IsEnabled(ARMv7));
1516  ASSERT(src.rm().is(no_reg));
1517  ASSERT(!dst1.is(lr)); // r14.
1518  ASSERT_EQ(0, dst1.code() % 2);
1519  ASSERT_EQ(dst1.code() + 1, dst2.code());
1520  addrmod3(cond | B7 | B6 | B4, dst1, src);
1521 }
1522 
1523 
1524 void Assembler::strd(Register src1, Register src2,
1525  const MemOperand& dst, Condition cond) {
1526  ASSERT(dst.rm().is(no_reg));
1527  ASSERT(!src1.is(lr)); // r14.
1528  ASSERT_EQ(0, src1.code() % 2);
1529  ASSERT_EQ(src1.code() + 1, src2.code());
1530  ASSERT(CpuFeatures::IsEnabled(ARMv7));
1531  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1532 }
1533 
1534 // Load/Store multiple instructions.
1536  Register base,
1537  RegList dst,
1538  Condition cond) {
1539  // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
1540  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1541 
1542  addrmod4(cond | B27 | am | L, base, dst);
1543 
1544  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1545  if (cond == al && (dst & pc.bit()) != 0) {
1546  // There is a slight chance that the ldm instruction was actually a call,
1547  // in which case it would be wrong to return into the constant pool; we
1548  // recognize this case by checking if the emission of the pool was blocked
1549  // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1550  // the case, we emit a jump over the pool.
1551  CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1552  }
1553 }
1554 
1555 
1557  Register base,
1558  RegList src,
1559  Condition cond) {
1560  addrmod4(cond | B27 | am, base, src);
1561 }
1562 
1563 
1564 // Exception-generating instructions and debugging support.
1565 // Stops with a non-negative code less than kNumOfWatchedStops support
1566 // enabling/disabling and a counter feature. See simulator-arm.h .
1567 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1568 #ifndef __arm__
1569  ASSERT(code >= kDefaultStopCode);
1570  {
1571  // The Simulator will handle the stop instruction and get the message
1572  // address. It expects to find the address just after the svc instruction.
1573  BlockConstPoolScope block_const_pool(this);
1574  if (code >= 0) {
1575  svc(kStopCode + code, cond);
1576  } else {
1577  svc(kStopCode + kMaxStopCode, cond);
1578  }
1579  emit(reinterpret_cast<Instr>(msg));
1580  }
1581 #else // def __arm__
1582 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1583  if (cond != al) {
1584  Label skip;
1585  b(&skip, NegateCondition(cond));
1586  bkpt(0);
1587  bind(&skip);
1588  } else {
1589  bkpt(0);
1590  }
1591 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1592  svc(0x9f0001, cond);
1593 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1594 #endif // def __arm__
1595 }
1596 
1597 
1598 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1599  ASSERT(is_uint16(imm16));
1600  emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1601 }
1602 
1603 
1604 void Assembler::svc(uint32_t imm24, Condition cond) {
1605  ASSERT(is_uint24(imm24));
1606  emit(cond | 15*B24 | imm24);
1607 }
1608 
1609 
1610 // Coprocessor instructions.
1611 void Assembler::cdp(Coprocessor coproc,
1612  int opcode_1,
1613  CRegister crd,
1614  CRegister crn,
1615  CRegister crm,
1616  int opcode_2,
1617  Condition cond) {
1618  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1619  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1620  crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1621 }
1622 
1623 
1624 void Assembler::cdp2(Coprocessor coproc,
1625  int opcode_1,
1626  CRegister crd,
1627  CRegister crn,
1628  CRegister crm,
1629  int opcode_2) { // v5 and above
1630  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
1631 }
1632 
1633 
1634 void Assembler::mcr(Coprocessor coproc,
1635  int opcode_1,
1636  Register rd,
1637  CRegister crn,
1638  CRegister crm,
1639  int opcode_2,
1640  Condition cond) {
1641  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1642  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1643  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1644 }
1645 
1646 
1647 void Assembler::mcr2(Coprocessor coproc,
1648  int opcode_1,
1649  Register rd,
1650  CRegister crn,
1651  CRegister crm,
1652  int opcode_2) { // v5 and above
1653  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1654 }
1655 
1656 
1657 void Assembler::mrc(Coprocessor coproc,
1658  int opcode_1,
1659  Register rd,
1660  CRegister crn,
1661  CRegister crm,
1662  int opcode_2,
1663  Condition cond) {
1664  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1665  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1666  rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1667 }
1668 
1669 
1670 void Assembler::mrc2(Coprocessor coproc,
1671  int opcode_1,
1672  Register rd,
1673  CRegister crn,
1674  CRegister crm,
1675  int opcode_2) { // v5 and above
1676  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1677 }
1678 
1679 
1680 void Assembler::ldc(Coprocessor coproc,
1681  CRegister crd,
1682  const MemOperand& src,
1683  LFlag l,
1684  Condition cond) {
1685  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1686 }
1687 
1688 
1689 void Assembler::ldc(Coprocessor coproc,
1690  CRegister crd,
1691  Register rn,
1692  int option,
1693  LFlag l,
1694  Condition cond) {
1695  // Unindexed addressing.
1696  ASSERT(is_uint8(option));
1697  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1698  coproc*B8 | (option & 255));
1699 }
1700 
1701 
1702 void Assembler::ldc2(Coprocessor coproc,
1703  CRegister crd,
1704  const MemOperand& src,
1705  LFlag l) { // v5 and above
1706  ldc(coproc, crd, src, l, kSpecialCondition);
1707 }
1708 
1709 
1710 void Assembler::ldc2(Coprocessor coproc,
1711  CRegister crd,
1712  Register rn,
1713  int option,
1714  LFlag l) { // v5 and above
1715  ldc(coproc, crd, rn, option, l, kSpecialCondition);
1716 }
1717 
1718 
1719 // Support for VFP.
1720 
1721 void Assembler::vldr(const DwVfpRegister dst,
1722  const Register base,
1723  int offset,
1724  const Condition cond) {
1725  // Ddst = MEM(Rbase + offset).
1726  // Instruction details available in ARM DDI 0406A, A8-628.
1727  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1728  // Vdst(15-12) | 1011(11-8) | offset
1729  ASSERT(CpuFeatures::IsEnabled(VFP2));
1730  int u = 1;
1731  if (offset < 0) {
1732  offset = -offset;
1733  u = 0;
1734  }
1735 
1736  ASSERT(offset >= 0);
1737  if ((offset % 4) == 0 && (offset / 4) < 256) {
1738  emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1739  0xB*B8 | ((offset / 4) & 255));
1740  } else {
1741  // Larger offsets must be handled by computing the correct address
1742  // in the ip register.
1743  ASSERT(!base.is(ip));
1744  if (u == 1) {
1745  add(ip, base, Operand(offset));
1746  } else {
1747  sub(ip, base, Operand(offset));
1748  }
1749  emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
1750  }
1751 }
1752 
1753 
1754 void Assembler::vldr(const DwVfpRegister dst,
1755  const MemOperand& operand,
1756  const Condition cond) {
1757  ASSERT(!operand.rm().is_valid());
1758  ASSERT(operand.am_ == Offset);
1759  vldr(dst, operand.rn(), operand.offset(), cond);
1760 }
1761 
1762 
1763 void Assembler::vldr(const SwVfpRegister dst,
1764  const Register base,
1765  int offset,
1766  const Condition cond) {
1767  // Sdst = MEM(Rbase + offset).
1768  // Instruction details available in ARM DDI 0406A, A8-628.
1769  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
1770  // Vdst(15-12) | 1010(11-8) | offset
1771  ASSERT(CpuFeatures::IsEnabled(VFP2));
1772  int u = 1;
1773  if (offset < 0) {
1774  offset = -offset;
1775  u = 0;
1776  }
1777  int sd, d;
1778  dst.split_code(&sd, &d);
1779  ASSERT(offset >= 0);
1780 
1781  if ((offset % 4) == 0 && (offset / 4) < 256) {
1782  emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
1783  0xA*B8 | ((offset / 4) & 255));
1784  } else {
1785  // Larger offsets must be handled by computing the correct address
1786  // in the ip register.
1787  ASSERT(!base.is(ip));
1788  if (u == 1) {
1789  add(ip, base, Operand(offset));
1790  } else {
1791  sub(ip, base, Operand(offset));
1792  }
1793  emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1794  }
1795 }
1796 
1797 
1798 void Assembler::vldr(const SwVfpRegister dst,
1799  const MemOperand& operand,
1800  const Condition cond) {
1801  ASSERT(!operand.rm().is_valid());
1802  ASSERT(operand.am_ == Offset);
1803  vldr(dst, operand.rn(), operand.offset(), cond);
1804 }
1805 
1806 
1807 void Assembler::vstr(const DwVfpRegister src,
1808  const Register base,
1809  int offset,
1810  const Condition cond) {
1811  // MEM(Rbase + offset) = Dsrc.
1812  // Instruction details available in ARM DDI 0406A, A8-786.
1813  // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
1814  // Vsrc(15-12) | 1011(11-8) | (offset/4)
1815  ASSERT(CpuFeatures::IsEnabled(VFP2));
1816  int u = 1;
1817  if (offset < 0) {
1818  offset = -offset;
1819  u = 0;
1820  }
1821  ASSERT(offset >= 0);
1822  if ((offset % 4) == 0 && (offset / 4) < 256) {
1823  emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1824  0xB*B8 | ((offset / 4) & 255));
1825  } else {
1826  // Larger offsets must be handled by computing the correct address
1827  // in the ip register.
1828  ASSERT(!base.is(ip));
1829  if (u == 1) {
1830  add(ip, base, Operand(offset));
1831  } else {
1832  sub(ip, base, Operand(offset));
1833  }
1834  emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
1835  }
1836 }
1837 
1838 
1839 void Assembler::vstr(const DwVfpRegister src,
1840  const MemOperand& operand,
1841  const Condition cond) {
1842  ASSERT(!operand.rm().is_valid());
1843  ASSERT(operand.am_ == Offset);
1844  vstr(src, operand.rn(), operand.offset(), cond);
1845 }
1846 
1847 
1848 void Assembler::vstr(const SwVfpRegister src,
1849  const Register base,
1850  int offset,
1851  const Condition cond) {
1852  // MEM(Rbase + offset) = SSrc.
1853  // Instruction details available in ARM DDI 0406A, A8-786.
1854  // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
1855  // Vdst(15-12) | 1010(11-8) | (offset/4)
1856  ASSERT(CpuFeatures::IsEnabled(VFP2));
1857  int u = 1;
1858  if (offset < 0) {
1859  offset = -offset;
1860  u = 0;
1861  }
1862  int sd, d;
1863  src.split_code(&sd, &d);
1864  ASSERT(offset >= 0);
1865  if ((offset % 4) == 0 && (offset / 4) < 256) {
1866  emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
1867  0xA*B8 | ((offset / 4) & 255));
1868  } else {
1869  // Larger offsets must be handled by computing the correct address
1870  // in the ip register.
1871  ASSERT(!base.is(ip));
1872  if (u == 1) {
1873  add(ip, base, Operand(offset));
1874  } else {
1875  sub(ip, base, Operand(offset));
1876  }
1877  emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1878  }
1879 }
1880 
1881 
1882 void Assembler::vstr(const SwVfpRegister src,
1883  const MemOperand& operand,
1884  const Condition cond) {
1885  ASSERT(!operand.rm().is_valid());
1886  ASSERT(operand.am_ == Offset);
1887  vstr(src, operand.rn(), operand.offset(), cond);
1888 }
1889 
1890 
1892  Register base,
1893  DwVfpRegister first,
1894  DwVfpRegister last,
1895  Condition cond) {
1896  // Instruction details available in ARM DDI 0406A, A8-626.
1897  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1898  // first(15-12) | 1010(11-8) | (count * 2)
1899  ASSERT(CpuFeatures::IsEnabled(VFP2));
1900  ASSERT_LE(first.code(), last.code());
1901  ASSERT(am == ia || am == ia_w || am == db_w);
1902  ASSERT(!base.is(pc));
1903 
1904  int sd, d;
1905  first.split_code(&sd, &d);
1906  int count = last.code() - first.code() + 1;
1907  ASSERT(count <= 16);
1908  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1909  0xB*B8 | count*2);
1910 }
1911 
1912 
1914  Register base,
1915  DwVfpRegister first,
1916  DwVfpRegister last,
1917  Condition cond) {
1918  // Instruction details available in ARM DDI 0406A, A8-784.
1919  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1920  // first(15-12) | 1011(11-8) | (count * 2)
1921  ASSERT(CpuFeatures::IsEnabled(VFP2));
1922  ASSERT_LE(first.code(), last.code());
1923  ASSERT(am == ia || am == ia_w || am == db_w);
1924  ASSERT(!base.is(pc));
1925 
1926  int sd, d;
1927  first.split_code(&sd, &d);
1928  int count = last.code() - first.code() + 1;
1929  ASSERT(count <= 16);
1930  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1931  0xB*B8 | count*2);
1932 }
1933 
1935  Register base,
1936  SwVfpRegister first,
1937  SwVfpRegister last,
1938  Condition cond) {
1939  // Instruction details available in ARM DDI 0406A, A8-626.
1940  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1941  // first(15-12) | 1010(11-8) | (count/2)
1942  ASSERT(CpuFeatures::IsEnabled(VFP2));
1943  ASSERT_LE(first.code(), last.code());
1944  ASSERT(am == ia || am == ia_w || am == db_w);
1945  ASSERT(!base.is(pc));
1946 
1947  int sd, d;
1948  first.split_code(&sd, &d);
1949  int count = last.code() - first.code() + 1;
1950  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1951  0xA*B8 | count);
1952 }
1953 
1954 
1956  Register base,
1957  SwVfpRegister first,
1958  SwVfpRegister last,
1959  Condition cond) {
1960  // Instruction details available in ARM DDI 0406A, A8-784.
1961  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1962  // first(15-12) | 1011(11-8) | (count/2)
1963  ASSERT(CpuFeatures::IsEnabled(VFP2));
1964  ASSERT_LE(first.code(), last.code());
1965  ASSERT(am == ia || am == ia_w || am == db_w);
1966  ASSERT(!base.is(pc));
1967 
1968  int sd, d;
1969  first.split_code(&sd, &d);
1970  int count = last.code() - first.code() + 1;
1971  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1972  0xA*B8 | count);
1973 }
1974 
1975 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1976  uint64_t i;
1977  memcpy(&i, &d, 8);
1978 
1979  *lo = i & 0xffffffff;
1980  *hi = i >> 32;
1981 }
1982 
1983 // Only works for little endian floating point formats.
1984 // We don't support VFP on the mixed endian floating point platform.
1985 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
1987 
1988  // VMOV can accept an immediate of the form:
1989  //
1990  // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
1991  //
1992  // The immediate is encoded using an 8-bit quantity, comprised of two
1993  // 4-bit fields. For an 8-bit immediate of the form:
1994  //
1995  // [abcdefgh]
1996  //
1997  // where a is the MSB and h is the LSB, an immediate 64-bit double can be
1998  // created of the form:
1999  //
2000  // [aBbbbbbb,bbcdefgh,00000000,00000000,
2001  // 00000000,00000000,00000000,00000000]
2002  //
2003  // where B = ~b.
2004  //
2005 
2006  uint32_t lo, hi;
2007  DoubleAsTwoUInt32(d, &lo, &hi);
2008 
2009  // The most obvious constraint is the long block of zeroes.
2010  if ((lo != 0) || ((hi & 0xffff) != 0)) {
2011  return false;
2012  }
2013 
2014  // Bits 62:55 must be all clear or all set.
2015  if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2016  return false;
2017  }
2018 
2019  // Bit 63 must be NOT bit 62.
2020  if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2021  return false;
2022  }
2023 
2024  // Create the encoded immediate in the form:
2025  // [00000000,0000abcd,00000000,0000efgh]
2026  *encoding = (hi >> 16) & 0xf; // Low nybble.
2027  *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2028  *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2029 
2030  return true;
2031 }
2032 
2033 
2034 void Assembler::vmov(const DwVfpRegister dst,
2035  double imm,
2036  const Register scratch,
2037  const Condition cond) {
2038  // Dd = immediate
2039  // Instruction details available in ARM DDI 0406B, A8-640.
2040  ASSERT(CpuFeatures::IsEnabled(VFP2));
2041 
2042  uint32_t enc;
2043  if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2044  // The double can be encoded in the instruction.
2045  emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
2046  } else {
2047  // Synthesise the double from ARM immediates. This could be implemented
2048  // using vldr from a constant pool.
2049  uint32_t lo, hi;
2050  DoubleAsTwoUInt32(imm, &lo, &hi);
2051  mov(ip, Operand(lo));
2052 
2053  if (scratch.is(no_reg)) {
2054  // Move the low part of the double into the lower of the corresponsing S
2055  // registers of D register dst.
2056  vmov(dst.low(), ip, cond);
2057 
2058  // Move the high part of the double into the higher of the corresponsing S
2059  // registers of D register dst.
2060  mov(ip, Operand(hi));
2061  vmov(dst.high(), ip, cond);
2062  } else {
2063  // Move the low and high parts of the double to a D register in one
2064  // instruction.
2065  mov(scratch, Operand(hi));
2066  vmov(dst, ip, scratch, cond);
2067  }
2068  }
2069 }
2070 
2071 
2072 void Assembler::vmov(const SwVfpRegister dst,
2073  const SwVfpRegister src,
2074  const Condition cond) {
2075  // Sd = Sm
2076  // Instruction details available in ARM DDI 0406B, A8-642.
2077  ASSERT(CpuFeatures::IsEnabled(VFP2));
2078  int sd, d, sm, m;
2079  dst.split_code(&sd, &d);
2080  src.split_code(&sm, &m);
2081  emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2082 }
2083 
2084 
2085 void Assembler::vmov(const DwVfpRegister dst,
2086  const DwVfpRegister src,
2087  const Condition cond) {
2088  // Dd = Dm
2089  // Instruction details available in ARM DDI 0406B, A8-642.
2090  ASSERT(CpuFeatures::IsEnabled(VFP2));
2091  emit(cond | 0xE*B24 | 0xB*B20 |
2092  dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2093 }
2094 
2095 
2096 void Assembler::vmov(const DwVfpRegister dst,
2097  const Register src1,
2098  const Register src2,
2099  const Condition cond) {
2100  // Dm = <Rt,Rt2>.
2101  // Instruction details available in ARM DDI 0406A, A8-646.
2102  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2103  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2104  ASSERT(CpuFeatures::IsEnabled(VFP2));
2105  ASSERT(!src1.is(pc) && !src2.is(pc));
2106  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2107  src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2108 }
2109 
2110 
2111 void Assembler::vmov(const Register dst1,
2112  const Register dst2,
2113  const DwVfpRegister src,
2114  const Condition cond) {
2115  // <Rt,Rt2> = Dm.
2116  // Instruction details available in ARM DDI 0406A, A8-646.
2117  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2118  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2119  ASSERT(CpuFeatures::IsEnabled(VFP2));
2120  ASSERT(!dst1.is(pc) && !dst2.is(pc));
2121  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2122  dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2123 }
2124 
2125 
2126 void Assembler::vmov(const SwVfpRegister dst,
2127  const Register src,
2128  const Condition cond) {
2129  // Sn = Rt.
2130  // Instruction details available in ARM DDI 0406A, A8-642.
2131  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2132  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2133  ASSERT(CpuFeatures::IsEnabled(VFP2));
2134  ASSERT(!src.is(pc));
2135  int sn, n;
2136  dst.split_code(&sn, &n);
2137  emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2138 }
2139 
2140 
2141 void Assembler::vmov(const Register dst,
2142  const SwVfpRegister src,
2143  const Condition cond) {
2144  // Rt = Sn.
2145  // Instruction details available in ARM DDI 0406A, A8-642.
2146  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2147  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2148  ASSERT(CpuFeatures::IsEnabled(VFP2));
2149  ASSERT(!dst.is(pc));
2150  int sn, n;
2151  src.split_code(&sn, &n);
2152  emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2153 }
2154 
2155 
2156 // Type of data to read from or write to VFP register.
2157 // Used as specifier in generic vcvt instruction.
2158 enum VFPType { S32, U32, F32, F64 };
2159 
2160 
2161 static bool IsSignedVFPType(VFPType type) {
2162  switch (type) {
2163  case S32:
2164  return true;
2165  case U32:
2166  return false;
2167  default:
2168  UNREACHABLE();
2169  return false;
2170  }
2171 }
2172 
2173 
2174 static bool IsIntegerVFPType(VFPType type) {
2175  switch (type) {
2176  case S32:
2177  case U32:
2178  return true;
2179  case F32:
2180  case F64:
2181  return false;
2182  default:
2183  UNREACHABLE();
2184  return false;
2185  }
2186 }
2187 
2188 
2189 static bool IsDoubleVFPType(VFPType type) {
2190  switch (type) {
2191  case F32:
2192  return false;
2193  case F64:
2194  return true;
2195  default:
2196  UNREACHABLE();
2197  return false;
2198  }
2199 }
2200 
2201 
2202 // Split five bit reg_code based on size of reg_type.
2203 // 32-bit register codes are Vm:M
2204 // 64-bit register codes are M:Vm
2205 // where Vm is four bits, and M is a single bit.
2206 static void SplitRegCode(VFPType reg_type,
2207  int reg_code,
2208  int* vm,
2209  int* m) {
2210  ASSERT((reg_code >= 0) && (reg_code <= 31));
2211  if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2212  // 32 bit type.
2213  *m = reg_code & 0x1;
2214  *vm = reg_code >> 1;
2215  } else {
2216  // 64 bit type.
2217  *m = (reg_code & 0x10) >> 4;
2218  *vm = reg_code & 0x0F;
2219  }
2220 }
2221 
2222 
2223 // Encode vcvt.src_type.dst_type instruction.
2224 static Instr EncodeVCVT(const VFPType dst_type,
2225  const int dst_code,
2226  const VFPType src_type,
2227  const int src_code,
2228  VFPConversionMode mode,
2229  const Condition cond) {
2230  ASSERT(src_type != dst_type);
2231  int D, Vd, M, Vm;
2232  SplitRegCode(src_type, src_code, &Vm, &M);
2233  SplitRegCode(dst_type, dst_code, &Vd, &D);
2234 
2235  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2236  // Conversion between IEEE floating point and 32-bit integer.
2237  // Instruction details available in ARM DDI 0406B, A8.6.295.
2238  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2239  // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2240  ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2241 
2242  int sz, opc2, op;
2243 
2244  if (IsIntegerVFPType(dst_type)) {
2245  opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2246  sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2247  op = mode;
2248  } else {
2249  ASSERT(IsIntegerVFPType(src_type));
2250  opc2 = 0x0;
2251  sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2252  op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2253  }
2254 
2255  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2256  Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2257  } else {
2258  // Conversion between IEEE double and single precision.
2259  // Instruction details available in ARM DDI 0406B, A8.6.298.
2260  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2261  // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2262  int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2263  return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2264  Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2265  }
2266 }
2267 
2268 
2269 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2270  const SwVfpRegister src,
2271  VFPConversionMode mode,
2272  const Condition cond) {
2273  ASSERT(CpuFeatures::IsEnabled(VFP2));
2274  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2275 }
2276 
2277 
2278 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2279  const SwVfpRegister src,
2280  VFPConversionMode mode,
2281  const Condition cond) {
2282  ASSERT(CpuFeatures::IsEnabled(VFP2));
2283  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2284 }
2285 
2286 
2287 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2288  const SwVfpRegister src,
2289  VFPConversionMode mode,
2290  const Condition cond) {
2291  ASSERT(CpuFeatures::IsEnabled(VFP2));
2292  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2293 }
2294 
2295 
2296 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2297  const DwVfpRegister src,
2298  VFPConversionMode mode,
2299  const Condition cond) {
2300  ASSERT(CpuFeatures::IsEnabled(VFP2));
2301  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2302 }
2303 
2304 
2305 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2306  const DwVfpRegister src,
2307  VFPConversionMode mode,
2308  const Condition cond) {
2309  ASSERT(CpuFeatures::IsEnabled(VFP2));
2310  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2311 }
2312 
2313 
2314 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2315  const SwVfpRegister src,
2316  VFPConversionMode mode,
2317  const Condition cond) {
2318  ASSERT(CpuFeatures::IsEnabled(VFP2));
2319  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2320 }
2321 
2322 
2323 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2324  const DwVfpRegister src,
2325  VFPConversionMode mode,
2326  const Condition cond) {
2327  ASSERT(CpuFeatures::IsEnabled(VFP2));
2328  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2329 }
2330 
2331 
2332 void Assembler::vneg(const DwVfpRegister dst,
2333  const DwVfpRegister src,
2334  const Condition cond) {
2335  ASSERT(CpuFeatures::IsEnabled(VFP2));
2336  emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2337  0x5*B9 | B8 | B6 | src.code());
2338 }
2339 
2340 
2341 void Assembler::vabs(const DwVfpRegister dst,
2342  const DwVfpRegister src,
2343  const Condition cond) {
2344  ASSERT(CpuFeatures::IsEnabled(VFP2));
2345  emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2346  0x5*B9 | B8 | 0x3*B6 | src.code());
2347 }
2348 
2349 
2350 void Assembler::vadd(const DwVfpRegister dst,
2351  const DwVfpRegister src1,
2352  const DwVfpRegister src2,
2353  const Condition cond) {
2354  // Dd = vadd(Dn, Dm) double precision floating point addition.
2355  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2356  // Instruction details available in ARM DDI 0406A, A8-536.
2357  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2358  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2359  ASSERT(CpuFeatures::IsEnabled(VFP2));
2360  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2361  dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2362 }
2363 
2364 
2365 void Assembler::vsub(const DwVfpRegister dst,
2366  const DwVfpRegister src1,
2367  const DwVfpRegister src2,
2368  const Condition cond) {
2369  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2370  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2371  // Instruction details available in ARM DDI 0406A, A8-784.
2372  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2373  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2374  ASSERT(CpuFeatures::IsEnabled(VFP2));
2375  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2376  dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2377 }
2378 
2379 
2380 void Assembler::vmul(const DwVfpRegister dst,
2381  const DwVfpRegister src1,
2382  const DwVfpRegister src2,
2383  const Condition cond) {
2384  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2385  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2386  // Instruction details available in ARM DDI 0406A, A8-784.
2387  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2388  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2389  ASSERT(CpuFeatures::IsEnabled(VFP2));
2390  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2391  dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2392 }
2393 
2394 
2395 void Assembler::vdiv(const DwVfpRegister dst,
2396  const DwVfpRegister src1,
2397  const DwVfpRegister src2,
2398  const Condition cond) {
2399  // Dd = vdiv(Dn, Dm) double precision floating point division.
2400  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2401  // Instruction details available in ARM DDI 0406A, A8-584.
2402  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2403  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2404  ASSERT(CpuFeatures::IsEnabled(VFP2));
2405  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2406  dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2407 }
2408 
2409 
2410 void Assembler::vcmp(const DwVfpRegister src1,
2411  const DwVfpRegister src2,
2412  const Condition cond) {
2413  // vcmp(Dd, Dm) double precision floating point comparison.
2414  // Instruction details available in ARM DDI 0406A, A8-570.
2415  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2416  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2417  ASSERT(CpuFeatures::IsEnabled(VFP2));
2418  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2419  src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2420 }
2421 
2422 
2423 void Assembler::vcmp(const DwVfpRegister src1,
2424  const double src2,
2425  const Condition cond) {
2426  // vcmp(Dd, Dm) double precision floating point comparison.
2427  // Instruction details available in ARM DDI 0406A, A8-570.
2428  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2429  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2430  ASSERT(CpuFeatures::IsEnabled(VFP2));
2431  ASSERT(src2 == 0.0);
2432  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2433  src1.code()*B12 | 0x5*B9 | B8 | B6);
2434 }
2435 
2436 
2437 void Assembler::vmsr(Register dst, Condition cond) {
2438  // Instruction details available in ARM DDI 0406A, A8-652.
2439  // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2440  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2441  ASSERT(CpuFeatures::IsEnabled(VFP2));
2442  emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2443  dst.code()*B12 | 0xA*B8 | B4);
2444 }
2445 
2446 
2447 void Assembler::vmrs(Register dst, Condition cond) {
2448  // Instruction details available in ARM DDI 0406A, A8-652.
2449  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2450  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2451  ASSERT(CpuFeatures::IsEnabled(VFP2));
2452  emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2453  dst.code()*B12 | 0xA*B8 | B4);
2454 }
2455 
2456 
2457 void Assembler::vsqrt(const DwVfpRegister dst,
2458  const DwVfpRegister src,
2459  const Condition cond) {
2460  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2461  // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2462  ASSERT(CpuFeatures::IsEnabled(VFP2));
2463  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2464  dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2465 }
2466 
2467 
2468 // Pseudo instructions.
2469 void Assembler::nop(int type) {
2470  // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
2471  // some of the CPU's pipeline and has to issue. Older ARM chips simply used
2472  // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
2473  // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
2474  // a type.
2475  ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
2476  emit(al | 13*B21 | type*B12 | type);
2477 }
2478 
2479 
2480 bool Assembler::IsMovT(Instr instr) {
2481  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
2482  ((kNumRegisters-1)*B12) | // mask out register
2483  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
2484  return instr == 0x34*B20;
2485 }
2486 
2487 
2488 bool Assembler::IsMovW(Instr instr) {
2489  instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
2490  ((kNumRegisters-1)*B12) | // mask out destination
2491  EncodeMovwImmediate(0xFFFF)); // mask out immediate value
2492  return instr == 0x30*B20;
2493 }
2494 
2495 
2496 bool Assembler::IsNop(Instr instr, int type) {
2497  ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
2498  // Check for mov rx, rx where x = type.
2499  return instr == (al | 13*B21 | type*B12 | type);
2500 }
2501 
2502 
2504  uint32_t dummy1;
2505  uint32_t dummy2;
2506  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2507 }
2508 
2509 
2510 // Debugging.
2512  positions_recorder()->WriteRecordedPositions();
2513  CheckBuffer();
2514  RecordRelocInfo(RelocInfo::JS_RETURN);
2515 }
2516 
2517 
2519  positions_recorder()->WriteRecordedPositions();
2520  CheckBuffer();
2521  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2522 }
2523 
2524 
2525 void Assembler::RecordComment(const char* msg) {
2526  if (FLAG_code_comments) {
2527  CheckBuffer();
2528  RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2529  }
2530 }
2531 
2532 
2533 void Assembler::RecordConstPool(int size) {
2534  // We only need this for debugger support, to correctly compute offsets in the
2535  // code.
2536 #ifdef ENABLE_DEBUGGER_SUPPORT
2537  RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
2538 #endif
2539 }
2540 
2541 void Assembler::GrowBuffer() {
2542  if (!own_buffer_) FATAL("external code buffer is too small");
2543 
2544  // Compute new buffer size.
2545  CodeDesc desc; // the new buffer
2546  if (buffer_size_ < 4*KB) {
2547  desc.buffer_size = 4*KB;
2548  } else if (buffer_size_ < 1*MB) {
2549  desc.buffer_size = 2*buffer_size_;
2550  } else {
2551  desc.buffer_size = buffer_size_ + 1*MB;
2552  }
2553  CHECK_GT(desc.buffer_size, 0); // no overflow
2554 
2555  // Set up new buffer.
2556  desc.buffer = NewArray<byte>(desc.buffer_size);
2557 
2558  desc.instr_size = pc_offset();
2559  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2560 
2561  // Copy the data.
2562  int pc_delta = desc.buffer - buffer_;
2563  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2564  memmove(desc.buffer, buffer_, desc.instr_size);
2565  memmove(reloc_info_writer.pos() + rc_delta,
2566  reloc_info_writer.pos(), desc.reloc_size);
2567 
2568  // Switch buffers.
2569  DeleteArray(buffer_);
2570  buffer_ = desc.buffer;
2571  buffer_size_ = desc.buffer_size;
2572  pc_ += pc_delta;
2573  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2574  reloc_info_writer.last_pc() + pc_delta);
2575 
2576  // None of our relocation types are pc relative pointing outside the code
2577  // buffer nor pc absolute pointing inside the code buffer, so there is no need
2578  // to relocate any emitted relocation entries.
2579 
2580  // Relocate pending relocation entries.
2581  for (int i = 0; i < num_pending_reloc_info_; i++) {
2582  RelocInfo& rinfo = pending_reloc_info_[i];
2583  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2584  rinfo.rmode() != RelocInfo::POSITION);
2585  if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2586  rinfo.set_pc(rinfo.pc() + pc_delta);
2587  }
2588  }
2589 }
2590 
2591 
2592 void Assembler::db(uint8_t data) {
2593  // No relocation info should be pending while using db. db is used
2594  // to write pure data with no pointers and the constant pool should
2595  // be emitted before using db.
2596  ASSERT(num_pending_reloc_info_ == 0);
2597  CheckBuffer();
2598  *reinterpret_cast<uint8_t*>(pc_) = data;
2599  pc_ += sizeof(uint8_t);
2600 }
2601 
2602 
2603 void Assembler::dd(uint32_t data) {
2604  // No relocation info should be pending while using dd. dd is used
2605  // to write pure data with no pointers and the constant pool should
2606  // be emitted before using dd.
2607  ASSERT(num_pending_reloc_info_ == 0);
2608  CheckBuffer();
2609  *reinterpret_cast<uint32_t*>(pc_) = data;
2610  pc_ += sizeof(uint32_t);
2611 }
2612 
2613 
2614 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
2615  UseConstantPoolMode mode) {
2616  // We do not try to reuse pool constants.
2617  RelocInfo rinfo(pc_, rmode, data, NULL);
2618  if (((rmode >= RelocInfo::JS_RETURN) &&
2619  (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2620  (rmode == RelocInfo::CONST_POOL) ||
2621  mode == DONT_USE_CONSTANT_POOL) {
2622  // Adjust code for new modes.
2623  ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2624  || RelocInfo::IsJSReturn(rmode)
2625  || RelocInfo::IsComment(rmode)
2626  || RelocInfo::IsPosition(rmode)
2627  || RelocInfo::IsConstPool(rmode)
2628  || mode == DONT_USE_CONSTANT_POOL);
2629  // These modes do not need an entry in the constant pool.
2630  } else {
2631  ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2632  if (num_pending_reloc_info_ == 0) {
2633  first_const_pool_use_ = pc_offset();
2634  }
2635  pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2636  // Make sure the constant pool is not emitted in place of the next
2637  // instruction for which we just recorded relocation info.
2638  BlockConstPoolFor(1);
2639  }
2640  if (rinfo.rmode() != RelocInfo::NONE) {
2641  // Don't record external references unless the heap will be serialized.
2642  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2643 #ifdef DEBUG
2644  if (!Serializer::enabled()) {
2646  }
2647 #endif
2648  if (!Serializer::enabled() && !emit_debug_code()) {
2649  return;
2650  }
2651  }
2652  ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2653  if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2654  RelocInfo reloc_info_with_ast_id(pc_,
2655  rmode,
2656  RecordedAstId().ToInt(),
2657  NULL);
2659  reloc_info_writer.Write(&reloc_info_with_ast_id);
2660  } else {
2661  reloc_info_writer.Write(&rinfo);
2662  }
2663  }
2664 }
2665 
2666 
2667 void Assembler::BlockConstPoolFor(int instructions) {
2668  int pc_limit = pc_offset() + instructions * kInstrSize;
2669  if (no_const_pool_before_ < pc_limit) {
2670  // If there are some pending entries, the constant pool cannot be blocked
2671  // further than first_const_pool_use_ + kMaxDistToPool
2672  ASSERT((num_pending_reloc_info_ == 0) ||
2673  (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
2674  no_const_pool_before_ = pc_limit;
2675  }
2676 
2677  if (next_buffer_check_ < no_const_pool_before_) {
2678  next_buffer_check_ = no_const_pool_before_;
2679  }
2680 }
2681 
2682 
2683 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2684  // Some short sequence of instruction mustn't be broken up by constant pool
2685  // emission, such sequences are protected by calls to BlockConstPoolFor and
2686  // BlockConstPoolScope.
2687  if (is_const_pool_blocked()) {
2688  // Something is wrong if emission is forced and blocked at the same time.
2689  ASSERT(!force_emit);
2690  return;
2691  }
2692 
2693  // There is nothing to do if there are no pending constant pool entries.
2694  if (num_pending_reloc_info_ == 0) {
2695  // Calculate the offset of the next check.
2696  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
2697  return;
2698  }
2699 
2700  // We emit a constant pool when:
2701  // * requested to do so by parameter force_emit (e.g. after each function).
2702  // * the distance to the first instruction accessing the constant pool is
2703  // kAvgDistToPool or more.
2704  // * no jump is required and the distance to the first instruction accessing
2705  // the constant pool is at least kMaxDistToPool / 2.
2706  ASSERT(first_const_pool_use_ >= 0);
2707  int dist = pc_offset() - first_const_pool_use_;
2708  if (!force_emit && dist < kAvgDistToPool &&
2709  (require_jump || (dist < (kMaxDistToPool / 2)))) {
2710  return;
2711  }
2712 
2713  // Check that the code buffer is large enough before emitting the constant
2714  // pool (include the jump over the pool and the constant pool marker and
2715  // the gap to the relocation information).
2716  int jump_instr = require_jump ? kInstrSize : 0;
2717  int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
2718  int needed_space = size + kGap;
2719  while (buffer_space() <= needed_space) GrowBuffer();
2720 
2721  {
2722  // Block recursive calls to CheckConstPool.
2723  BlockConstPoolScope block_const_pool(this);
2724  RecordComment("[ Constant Pool");
2725  RecordConstPool(size);
2726 
2727  // Emit jump over constant pool if necessary.
2728  Label after_pool;
2729  if (require_jump) {
2730  b(&after_pool);
2731  }
2732 
2733  // Put down constant pool marker "Undefined instruction" as specified by
2734  // A5.6 (ARMv7) Instruction set encoding.
2735  emit(kConstantPoolMarker | num_pending_reloc_info_);
2736 
2737  // Emit constant pool entries.
2738  for (int i = 0; i < num_pending_reloc_info_; i++) {
2739  RelocInfo& rinfo = pending_reloc_info_[i];
2740  ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2741  rinfo.rmode() != RelocInfo::POSITION &&
2742  rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2743  rinfo.rmode() != RelocInfo::CONST_POOL);
2744 
2745  Instr instr = instr_at(rinfo.pc());
2746  // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2747  if (IsLdrPcImmediateOffset(instr) &&
2748  GetLdrRegisterImmediateOffset(instr) == 0) {
2749  int delta = pc_ - rinfo.pc() - kPcLoadDelta;
2750  // 0 is the smallest delta:
2751  // ldr rd, [pc, #0]
2752  // constant pool marker
2753  // data
2754  ASSERT(is_uint12(delta));
2755 
2756  instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
2757  } else {
2758  ASSERT(IsMovW(instr));
2759  }
2760  emit(rinfo.data());
2761  }
2762 
2763  num_pending_reloc_info_ = 0;
2764  first_const_pool_use_ = -1;
2765 
2766  RecordComment("]");
2767 
2768  if (after_pool.is_linked()) {
2769  bind(&after_pool);
2770  }
2771  }
2772 
2773  // Since a constant pool was just emitted, move the check offset forward by
2774  // the standard interval.
2775  next_buffer_check_ = pc_offset() + kCheckPoolInterval;
2776 }
2777 
2778 
2779 } } // namespace v8::internal
2780 
2781 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:157
void cmp(Register src1, const Operand &src2, Condition cond=al)
static bool IsBranch(Instr instr)
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
const Instr kCmpCmnMask
bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
Isolate * isolate() const
Definition: assembler.h:61
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
static int GetBranchOffset(Instr instr)
const Instr kMovwMask
bool is_int24(int x)
Definition: assembler.h:839
void db(uint8_t data)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
const Instr kLdrPCMask
const Instr kLdrRegFpOffsetPattern
int32_t offset() const
static bool IsCmpRegister(Instr instr)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
const Instr kMovwLeaveCCFlip
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
#define FATAL(msg)
Definition: checks.h:46
void mrs(Register dst, SRegister s, Condition cond=al)
const Instr kLdrPCPattern
const Instr kMovMvnPattern
static bool IsStrRegFpNegOffset(Instr instr)
void instr_at_put(int pos, Instr instr)
const int kNumRegisters
Definition: constants-arm.h:92
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Instr kMovLrPc
static bool IsStrRegisterImmediate(Instr instr)
const int KB
Definition: globals.h:207
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond=al)
#define CHECK_GT(a, b)
Definition: checks.h:227
void vmov(const DwVfpRegister dst, double imm, const Register scratch=no_reg, const Condition cond=al)
static bool IsMovW(Instr instr)
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
const int kRegister_pc_Code
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
static int GetCmpImmediateRawImmediate(Instr instr)
void dd(uint32_t data)
void b(int branch_offset, Condition cond=al)
int int32_t
Definition: unicode.cc:47
void cmn(Register src1, const Operand &src2, Condition cond=al)
uint32_t RegList
Definition: frames.h:38
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
const Instr kAddSubFlip
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsSupported(CpuFeature f)
const Instr kLdrStrOffsetMask
void clz(Register dst, Register src, Condition cond=al)
static bool enabled()
Definition: serialize.h:481
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
bool is_uint3(int x)
Definition: assembler.h:846
static bool IsStrRegFpOffset(Instr instr)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void RecordConstPool(int size)
static Register GetRm(Instr instr)
bool is_uint24(int x)
Definition: assembler.h:854
const uint32_t kMaxStopCode
static const int kMinimalBufferSize
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:270
friend class BlockConstPoolScope
void svc(uint32_t imm24, Condition cond=al)
static bool IsCmpImmediate(Instr instr)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
const Instr kBlxRegMask
void ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
void ldc2(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short)
#define CHECK(condition)
Definition: checks.h:56
const Instr kCmpCmnPattern
void blx(int branch_offset)
void target_at_put(int pos, int target_pos)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset)
void strb(Register src, const MemOperand &dst, Condition cond=al)
bool is_uint12(int x)
Definition: assembler.h:852
const Instr kPopRegPattern
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Condition GetCondition(Instr instr)
uint8_t byte
Definition: globals.h:156
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
const Instr kPushRegPattern
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static bool IsPush(Instr instr)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
const Register sp
void sh(Register rd, const MemOperand &rs)
#define UNREACHABLE()
Definition: checks.h:50
const Instr kLdrStrInstrArgumentMask
const int32_t kDefaultStopCode
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Instr kLdrRegFpNegOffsetPattern
const Register ip
void GetCode(CodeDesc *desc)
const int kPointerSize
Definition: globals.h:220
void strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kAndBicFlip
int branch_offset(Label *L, bool jump_elimination_allowed)
static void TooLateToEnableNow()
Definition: serialize.h:480
const int kHeapObjectTag
Definition: v8.h:4009
static bool use_immediate_embedded_pointer_loads(const Assembler *assembler)
void umlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsPop(Instr instr)
const Instr kMovLeaveCCMask
void movt(Register reg, uint32_t immediate, Condition cond=al)
bool is_uint5(int x)
Definition: assembler.h:848
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2)
bool is_uint16(int x)
Definition: assembler.h:853
const Instr kBlxIp
static bool IsMovT(Instr instr)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Register pc
const Instr kPopInstruction
const Instr kStrRegFpOffsetPattern
static bool ArmCpuHasFeature(CpuFeature feature)
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
void vmrs(const Register dst, const Condition cond=al)
void str(Register src, const MemOperand &dst, Condition cond=al)
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
const int kRegister_fp_Code
static CpuImplementer GetCpuImplementer()
void CheckConstPool(bool force_emit, bool require_jump)
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
const int kRegister_lr_Code
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
static Register GetRn(Instr instr)
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset)
static bool IsTstImmediate(Instr instr)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static const int kMaxNumPendingRelocInfo
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
Definition: v8.h:1425
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
const int kConstantPoolMarker
Definition: constants-arm.h:88
const Register lr
void movw(Register reg, uint32_t immediate, Condition cond=al)
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset)
const Instr kMovLeaveCCPattern
const Instr kLdrStrInstrTypeMask
void smlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
void RecordComment(const char *msg)
void bl(int branch_offset, Condition cond=al)
#define UNIMPLEMENTED()
Definition: checks.h:48
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static Register GetCmpImmediateRegister(Instr instr)
const Instr kBlxRegPattern
const Instr kMovMvnMask
bool is_const_pool_blocked() const
static bool IsAddRegisterImmediate(Instr instr)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static const int kHeaderSize
Definition: objects.h:4549
void vmsr(const Register dst, const Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
#define HEAP
Definition: isolate.h:1433
void usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
Condition NegateCondition(Condition cond)
const Instr kMovwPattern
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void bx(Register target, Condition cond=al)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
uint32_t SRegisterFieldMask
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
PositionsRecorder * positions_recorder()
Condition ConditionField() const
static const int kInstrSize
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2, Condition cond=al)
MemOperand(Register rn, int32_t offset=0)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
const Instr kMovMvnFlip
static uint64_t CpuFeaturesImpliedByPlatform()
void bfc(Register dst, int lsb, int width, Condition cond=al)
bool is_uint8(int x)
Definition: assembler.h:850
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void FlushICache(void *start, size_t size)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
const Instr kStrRegFpNegOffsetPattern
const int kRegister_sp_Code
void DeleteArray(T *array)
Definition: allocation.h:91
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrRegisterImmediate(Instr instr)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
void ldc(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short, Condition cond=al)
static bool IsLdrRegFpNegOffset(Instr instr)
void bkpt(uint32_t imm16)
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
static int GetLdrRegisterImmediateOffset(Instr instr)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool emit_debug_code() const
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void rsc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
static bool IsLdrRegFpOffset(Instr instr)
const Instr kCmpCmnFlip
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrPcImmediateOffset(Instr instr)
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
bool is_uint4(int x)
Definition: assembler.h:847
static const int kMaxDistToPool
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const int MB
Definition: globals.h:208