v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 // A light-weight ARM Assembler
38 // Generates user mode instructions for the ARM architecture up to version 5
39 
40 #ifndef V8_ARM_ASSEMBLER_ARM_H_
41 #define V8_ARM_ASSEMBLER_ARM_H_
42 #include <stdio.h>
43 #include "assembler.h"
44 #include "constants-arm.h"
45 #include "serialize.h"
46 
47 namespace v8 {
48 namespace internal {
49 
50 // CPU Registers.
51 //
52 // 1) We would prefer to use an enum, but enum values are assignment-
53 // compatible with int, which has caused code-generation bugs.
54 //
55 // 2) We would prefer to use a class instead of a struct but we don't like
56 // the register initialization to depend on the particular initialization
57 // order (which appears to be different on OS X, Linux, and Windows for the
58 // installed versions of C++ we tried). Using a struct permits C-style
59 // "initialization". Also, the Register objects cannot be const as this
60 // forces initialization stubs in MSVC, making us dependent on initialization
61 // order.
62 //
63 // 3) By not using an enum, we are possibly preventing the compiler from
64 // doing certain constant folds, which may significantly reduce the
65 // code generated for some assembly instructions (because they boil down
66 // to a few constants). If this is a problem, we could change the code
67 // such that we use an enum in optimized mode, and the struct in debug
68 // mode. This way we get the compile-time error checking in debug mode
69 // and best performance in optimized code.
70 
71 // Core register
72 struct Register {
73  static const int kNumRegisters = 16;
74  static const int kNumAllocatableRegisters = 8;
75  static const int kSizeInBytes = 4;
76 
77  static int ToAllocationIndex(Register reg) {
79  return reg.code();
80  }
81 
82  static Register FromAllocationIndex(int index) {
83  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
84  return from_code(index);
85  }
86 
87  static const char* AllocationIndexToString(int index) {
88  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
89  const char* const names[] = {
90  "r0",
91  "r1",
92  "r2",
93  "r3",
94  "r4",
95  "r5",
96  "r6",
97  "r7",
98  };
99  return names[index];
100  }
101 
102  static Register from_code(int code) {
103  Register r = { code };
104  return r;
105  }
106 
107  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
108  bool is(Register reg) const { return code_ == reg.code_; }
109  int code() const {
110  ASSERT(is_valid());
111  return code_;
112  }
113  int bit() const {
114  ASSERT(is_valid());
115  return 1 << code_;
116  }
117 
118  void set_code(int code) {
119  code_ = code;
120  ASSERT(is_valid());
121  }
122 
123  // Unfortunately we can't make this private in a struct.
124  int code_;
125 };
126 
127 // These constants are used in several locations, including static initializers
128 const int kRegister_no_reg_Code = -1;
129 const int kRegister_r0_Code = 0;
130 const int kRegister_r1_Code = 1;
131 const int kRegister_r2_Code = 2;
132 const int kRegister_r3_Code = 3;
133 const int kRegister_r4_Code = 4;
134 const int kRegister_r5_Code = 5;
135 const int kRegister_r6_Code = 6;
136 const int kRegister_r7_Code = 7;
137 const int kRegister_r8_Code = 8;
138 const int kRegister_r9_Code = 9;
139 const int kRegister_r10_Code = 10;
140 const int kRegister_fp_Code = 11;
141 const int kRegister_ip_Code = 12;
142 const int kRegister_sp_Code = 13;
143 const int kRegister_lr_Code = 14;
144 const int kRegister_pc_Code = 15;
145 
147 
156 // Used as context register.
158 // Used as lithium codegen scratch register.
160 // Used as roots register.
167 
168 
169 // Single word VFP register.
171  bool is_valid() const { return 0 <= code_ && code_ < 32; }
172  bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
173  int code() const {
174  ASSERT(is_valid());
175  return code_;
176  }
177  int bit() const {
178  ASSERT(is_valid());
179  return 1 << code_;
180  }
181  void split_code(int* vm, int* m) const {
182  ASSERT(is_valid());
183  *m = code_ & 0x1;
184  *vm = code_ >> 1;
185  }
186 
187  int code_;
188 };
189 
190 
191 // Double word VFP register.
193  static const int kNumRegisters = 16;
194  // A few double registers are reserved: one as a scratch register and one to
195  // hold 0.0, that does not fit in the immediate field of vmov instructions.
196  // d14: 0.0
197  // d15: scratch register.
198  static const int kNumReservedRegisters = 2;
201 
202  inline static int ToAllocationIndex(DwVfpRegister reg);
203 
204  static DwVfpRegister FromAllocationIndex(int index) {
205  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
206  return from_code(index);
207  }
208 
209  static const char* AllocationIndexToString(int index) {
210  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
211  const char* const names[] = {
212  "d0",
213  "d1",
214  "d2",
215  "d3",
216  "d4",
217  "d5",
218  "d6",
219  "d7",
220  "d8",
221  "d9",
222  "d10",
223  "d11",
224  "d12",
225  "d13"
226  };
227  return names[index];
228  }
229 
231  DwVfpRegister r = { code };
232  return r;
233  }
234 
235  // Supporting d0 to d15, can be later extended to d31.
236  bool is_valid() const { return 0 <= code_ && code_ < 16; }
237  bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
238  SwVfpRegister low() const {
239  SwVfpRegister reg;
240  reg.code_ = code_ * 2;
241 
242  ASSERT(reg.is_valid());
243  return reg;
244  }
245  SwVfpRegister high() const {
246  SwVfpRegister reg;
247  reg.code_ = (code_ * 2) + 1;
248 
249  ASSERT(reg.is_valid());
250  return reg;
251  }
252  int code() const {
253  ASSERT(is_valid());
254  return code_;
255  }
256  int bit() const {
257  ASSERT(is_valid());
258  return 1 << code_;
259  }
260  void split_code(int* vm, int* m) const {
261  ASSERT(is_valid());
262  *m = (code_ & 0x10) >> 4;
263  *vm = code_ & 0x0F;
264  }
265 
266  int code_;
267 };
268 
269 
271 
272 
273 // Support for the VFP registers s0 to s31 (d0 to d15).
274 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
275 const SwVfpRegister s0 = { 0 };
276 const SwVfpRegister s1 = { 1 };
277 const SwVfpRegister s2 = { 2 };
278 const SwVfpRegister s3 = { 3 };
279 const SwVfpRegister s4 = { 4 };
280 const SwVfpRegister s5 = { 5 };
281 const SwVfpRegister s6 = { 6 };
282 const SwVfpRegister s7 = { 7 };
283 const SwVfpRegister s8 = { 8 };
284 const SwVfpRegister s9 = { 9 };
285 const SwVfpRegister s10 = { 10 };
286 const SwVfpRegister s11 = { 11 };
287 const SwVfpRegister s12 = { 12 };
288 const SwVfpRegister s13 = { 13 };
289 const SwVfpRegister s14 = { 14 };
290 const SwVfpRegister s15 = { 15 };
291 const SwVfpRegister s16 = { 16 };
292 const SwVfpRegister s17 = { 17 };
293 const SwVfpRegister s18 = { 18 };
294 const SwVfpRegister s19 = { 19 };
295 const SwVfpRegister s20 = { 20 };
296 const SwVfpRegister s21 = { 21 };
297 const SwVfpRegister s22 = { 22 };
298 const SwVfpRegister s23 = { 23 };
299 const SwVfpRegister s24 = { 24 };
300 const SwVfpRegister s25 = { 25 };
301 const SwVfpRegister s26 = { 26 };
302 const SwVfpRegister s27 = { 27 };
303 const SwVfpRegister s28 = { 28 };
304 const SwVfpRegister s29 = { 29 };
305 const SwVfpRegister s30 = { 30 };
306 const SwVfpRegister s31 = { 31 };
307 
308 const DwVfpRegister no_dreg = { -1 };
309 const DwVfpRegister d0 = { 0 };
310 const DwVfpRegister d1 = { 1 };
311 const DwVfpRegister d2 = { 2 };
312 const DwVfpRegister d3 = { 3 };
313 const DwVfpRegister d4 = { 4 };
314 const DwVfpRegister d5 = { 5 };
315 const DwVfpRegister d6 = { 6 };
316 const DwVfpRegister d7 = { 7 };
317 const DwVfpRegister d8 = { 8 };
318 const DwVfpRegister d9 = { 9 };
319 const DwVfpRegister d10 = { 10 };
320 const DwVfpRegister d11 = { 11 };
321 const DwVfpRegister d12 = { 12 };
322 const DwVfpRegister d13 = { 13 };
323 const DwVfpRegister d14 = { 14 };
324 const DwVfpRegister d15 = { 15 };
325 
326 // Aliases for double registers. Defined using #define instead of
327 // "static const DwVfpRegister&" because Clang complains otherwise when a
328 // compilation unit that includes this header doesn't use the variables.
329 #define kFirstCalleeSavedDoubleReg d8
330 #define kLastCalleeSavedDoubleReg d15
331 #define kDoubleRegZero d14
332 #define kScratchDoubleReg d15
333 
334 
335 // Coprocessor register
336 struct CRegister {
337  bool is_valid() const { return 0 <= code_ && code_ < 16; }
338  bool is(CRegister creg) const { return code_ == creg.code_; }
339  int code() const {
340  ASSERT(is_valid());
341  return code_;
342  }
343  int bit() const {
344  ASSERT(is_valid());
345  return 1 << code_;
346  }
347 
348  // Unfortunately we can't make this private in a struct.
349  int code_;
350 };
351 
352 
353 const CRegister no_creg = { -1 };
354 
355 const CRegister cr0 = { 0 };
356 const CRegister cr1 = { 1 };
357 const CRegister cr2 = { 2 };
358 const CRegister cr3 = { 3 };
359 const CRegister cr4 = { 4 };
360 const CRegister cr5 = { 5 };
361 const CRegister cr6 = { 6 };
362 const CRegister cr7 = { 7 };
363 const CRegister cr8 = { 8 };
364 const CRegister cr9 = { 9 };
365 const CRegister cr10 = { 10 };
366 const CRegister cr11 = { 11 };
367 const CRegister cr12 = { 12 };
368 const CRegister cr13 = { 13 };
369 const CRegister cr14 = { 14 };
370 const CRegister cr15 = { 15 };
371 
372 
373 // Coprocessor number
375  p0 = 0,
376  p1 = 1,
377  p2 = 2,
378  p3 = 3,
379  p4 = 4,
380  p5 = 5,
381  p6 = 6,
382  p7 = 7,
383  p8 = 8,
384  p9 = 9,
385  p10 = 10,
386  p11 = 11,
387  p12 = 12,
388  p13 = 13,
389  p14 = 14,
390  p15 = 15
391 };
392 
393 
394 // -----------------------------------------------------------------------------
395 // Machine instruction Operands
396 
397 // Class Operand represents a shifter operand in data processing instructions
398 class Operand BASE_EMBEDDED {
399  public:
400  // immediate
401  INLINE(explicit Operand(int32_t immediate,
402  RelocInfo::Mode rmode = RelocInfo::NONE));
403  INLINE(static Operand Zero()) {
404  return Operand(static_cast<int32_t>(0));
405  }
406  INLINE(explicit Operand(const ExternalReference& f));
407  explicit Operand(Handle<Object> handle);
408  INLINE(explicit Operand(Smi* value));
409 
410  // rm
411  INLINE(explicit Operand(Register rm));
412 
413  // rm <shift_op> shift_imm
414  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
415 
416  // rm <shift_op> rs
417  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
418 
419  // Return true if this is a register operand.
420  INLINE(bool is_reg() const);
421 
422  // Return true if this operand fits in one instruction so that no
423  // 2-instruction solution with a load into the ip register is necessary. If
424  // the instruction this operand is used for is a MOV or MVN instruction the
425  // actual instruction to use is required for this calculation. For other
426  // instructions instr is ignored.
427  bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
428  bool must_output_reloc_info(const Assembler* assembler) const;
429 
430  inline int32_t immediate() const {
431  ASSERT(!rm_.is_valid());
432  return imm32_;
433  }
434 
435  Register rm() const { return rm_; }
436  Register rs() const { return rs_; }
437  ShiftOp shift_op() const { return shift_op_; }
438 
439  private:
440  Register rm_;
441  Register rs_;
442  ShiftOp shift_op_;
443  int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
444  int32_t imm32_; // valid if rm_ == no_reg
445  RelocInfo::Mode rmode_;
446 
447  friend class Assembler;
448 };
449 
450 
451 // Class MemOperand represents a memory operand in load and store instructions
453  public:
454  // [rn +/- offset] Offset/NegOffset
455  // [rn +/- offset]! PreIndex/NegPreIndex
456  // [rn], +/- offset PostIndex/NegPostIndex
457  // offset is any signed 32-bit value; offset is first loaded to register ip if
458  // it does not fit the addressing mode (12-bit unsigned and sign bit)
459  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
460 
461  // [rn +/- rm] Offset/NegOffset
462  // [rn +/- rm]! PreIndex/NegPreIndex
463  // [rn], +/- rm PostIndex/NegPostIndex
464  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
465 
466  // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
467  // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
468  // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
469  explicit MemOperand(Register rn, Register rm,
470  ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
471 
473  ASSERT(rm_.is(no_reg));
474  offset_ = offset;
475  }
476 
477  uint32_t offset() const {
478  ASSERT(rm_.is(no_reg));
479  return offset_;
480  }
481 
482  Register rn() const { return rn_; }
483  Register rm() const { return rm_; }
484  AddrMode am() const { return am_; }
485 
486  bool OffsetIsUint12Encodable() const {
487  return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
488  }
489 
490  private:
491  Register rn_; // base
492  Register rm_; // register offset
493  int32_t offset_; // valid if rm_ == no_reg
494  ShiftOp shift_op_;
495  int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
496  AddrMode am_; // bits P, U, and W
497 
498  friend class Assembler;
499 };
500 
501 // CpuFeatures keeps track of which features are supported by the target CPU.
502 // Supported features must be enabled by a Scope before use.
503 class CpuFeatures : public AllStatic {
504  public:
505  // Detect features of the target CPU. Set safe defaults if the serializer
506  // is enabled (snapshots must be portable).
507  static void Probe();
508 
509  // Check whether a feature is supported by the target CPU.
510  static bool IsSupported(CpuFeature f) {
511  ASSERT(initialized_);
512  if (f == VFP3 && !FLAG_enable_vfp3) return false;
513  if (f == VFP2 && !FLAG_enable_vfp2) return false;
514  if (f == SUDIV && !FLAG_enable_sudiv) return false;
515  if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
516  return false;
517  }
518  return (supported_ & (1u << f)) != 0;
519  }
520 
521 #ifdef DEBUG
522  // Check whether a feature is currently enabled.
523  static bool IsEnabled(CpuFeature f) {
524  ASSERT(initialized_);
525  Isolate* isolate = Isolate::UncheckedCurrent();
526  if (isolate == NULL) {
527  // When no isolate is available, work as if we're running in
528  // release mode.
529  return IsSupported(f);
530  }
531  unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
532  return (enabled & (1u << f)) != 0;
533  }
534 #endif
535 
536  // Enable a specified feature within a scope.
538 #ifdef DEBUG
539 
540  public:
541  explicit Scope(CpuFeature f) {
542  unsigned mask = 1u << f;
543  // VFP2 and ARMv7 are implied by VFP3.
544  if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
547  (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
548  isolate_ = Isolate::UncheckedCurrent();
549  old_enabled_ = 0;
550  if (isolate_ != NULL) {
551  old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
552  isolate_->set_enabled_cpu_features(old_enabled_ | mask);
553  }
554  }
555  ~Scope() {
556  ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
557  if (isolate_ != NULL) {
558  isolate_->set_enabled_cpu_features(old_enabled_);
559  }
560  }
561 
562  private:
563  Isolate* isolate_;
564  unsigned old_enabled_;
565 #else
566 
567  public:
568  explicit Scope(CpuFeature f) {}
569 #endif
570  };
571 
572  class TryForceFeatureScope BASE_EMBEDDED {
573  public:
575  : old_supported_(CpuFeatures::supported_) {
576  if (CanForce()) {
577  CpuFeatures::supported_ |= (1u << f);
578  }
579  }
580 
582  if (CanForce()) {
583  CpuFeatures::supported_ = old_supported_;
584  }
585  }
586 
587  private:
588  static bool CanForce() {
589  // It's only safe to temporarily force support of CPU features
590  // when there's only a single isolate, which is guaranteed when
591  // the serializer is enabled.
592  return Serializer::enabled();
593  }
594 
595  const unsigned old_supported_;
596  };
597 
598  private:
599 #ifdef DEBUG
600  static bool initialized_;
601 #endif
602  static unsigned supported_;
603  static unsigned found_by_runtime_probing_;
604 
605  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
606 };
607 
608 
609 extern const Instr kMovLrPc;
610 extern const Instr kLdrPCMask;
611 extern const Instr kLdrPCPattern;
612 extern const Instr kBlxRegMask;
613 extern const Instr kBlxRegPattern;
614 extern const Instr kBlxIp;
615 
616 extern const Instr kMovMvnMask;
617 extern const Instr kMovMvnPattern;
618 extern const Instr kMovMvnFlip;
619 
620 extern const Instr kMovLeaveCCMask;
621 extern const Instr kMovLeaveCCPattern;
622 extern const Instr kMovwMask;
623 extern const Instr kMovwPattern;
624 extern const Instr kMovwLeaveCCFlip;
625 
626 extern const Instr kCmpCmnMask;
627 extern const Instr kCmpCmnPattern;
628 extern const Instr kCmpCmnFlip;
629 extern const Instr kAddSubFlip;
630 extern const Instr kAndBicFlip;
631 
632 
633 
634 class Assembler : public AssemblerBase {
635  public:
636  // Create an assembler. Instructions and relocation information are emitted
637  // into a buffer, with the instructions starting from the beginning and the
638  // relocation information starting from the end of the buffer. See CodeDesc
639  // for a detailed comment on the layout (globals.h).
640  //
641  // If the provided buffer is NULL, the assembler allocates and grows its own
642  // buffer, and buffer_size determines the initial buffer size. The buffer is
643  // owned by the assembler and deallocated upon destruction of the assembler.
644  //
645  // If the provided buffer is not NULL, the assembler uses the provided buffer
646  // for code generation and assumes its size to be buffer_size. If the buffer
647  // is too small, a fatal error occurs. No deallocation of the buffer is done
648  // upon destruction of the assembler.
649  Assembler(Isolate* isolate, void* buffer, int buffer_size);
650  ~Assembler();
651 
652  // Overrides the default provided by FLAG_debug_code.
653  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
654 
655  // Avoids using instructions that vary in size in unpredictable ways between
656  // the snapshot and the running VM. This is needed by the full compiler so
657  // that it can recompile code with debug support and fix the PC.
658  void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
659 
660  // GetCode emits any pending (non-emitted) code and fills the descriptor
661  // desc. GetCode() is idempotent; it returns the same result if no other
662  // Assembler functions are invoked in between GetCode() calls.
663  void GetCode(CodeDesc* desc);
664 
665  // Label operations & relative jumps (PPUM Appendix D)
666  //
667  // Takes a branch opcode (cc) and a label (L) and generates
668  // either a backward branch or a forward branch and links it
669  // to the label fixup chain. Usage:
670  //
671  // Label L; // unbound label
672  // j(cc, &L); // forward branch to unbound label
673  // bind(&L); // bind label to the current pc
674  // j(cc, &L); // backward branch to bound label
675  // bind(&L); // illegal: a label may be bound only once
676  //
677  // Note: The same Label can be used for forward and backward branches
678  // but it may be bound only once.
679 
680  void bind(Label* L); // binds an unbound label L to the current code position
681 
682  // Returns the branch offset to the given label from the current code position
683  // Links the label to the current position if it is still unbound
684  // Manages the jump elimination optimization if the second parameter is true.
685  int branch_offset(Label* L, bool jump_elimination_allowed);
686 
687  // Puts a labels target address at the given position.
688  // The high 8 bits are set to zero.
689  void label_at_put(Label* L, int at_offset);
690 
691  // Return the address in the constant pool of the code target address used by
692  // the branch/call instruction at pc, or the object in a mov.
693  INLINE(static Address target_pointer_address_at(Address pc));
694 
695  // Read/Modify the pointer in the branch/call/move instruction at pc.
696  INLINE(static Address target_pointer_at(Address pc));
697  INLINE(static void set_target_pointer_at(Address pc, Address target));
698 
699  // Read/Modify the code target address in the branch/call instruction at pc.
701  INLINE(static void set_target_address_at(Address pc, Address target));
702 
703  // Return the code target address at a call site from the return address
704  // of that call in the instruction stream.
706 
707  // Given the address of the beginning of a call, return the address
708  // in the instruction stream that the call will return from.
709  INLINE(static Address return_address_from_call_start(Address pc));
710 
711  // This sets the branch destination (which is in the constant pool on ARM).
712  // This is for calls and branches within generated code.
713  inline static void deserialization_set_special_target_at(
714  Address constant_pool_entry, Address target);
715 
716  // This sets the branch destination (which is in the constant pool on ARM).
717  // This is for calls and branches to runtime code.
718  inline static void set_external_target_at(Address constant_pool_entry,
719  Address target);
720 
721  // Here we are patching the address in the constant pool, not the actual call
722  // instruction. The address in the constant pool is the same size as a
723  // pointer.
724  static const int kSpecialTargetSize = kPointerSize;
725 
726  // Size of an instruction.
727  static const int kInstrSize = sizeof(Instr);
728 
729  // Distance between start of patched return sequence and the emitted address
730  // to jump to.
731 #ifdef USE_BLX
732  // Patched return sequence is:
733  // ldr ip, [pc, #0] @ emited address and start
734  // blx ip
736 #else
737  // Patched return sequence is:
738  // mov lr, pc @ start of sequence
739  // ldr pc, [pc, #-4] @ emited address
741 #endif
742 
743  // Distance between start of patched debug break slot and the emitted address
744  // to jump to.
745 #ifdef USE_BLX
746  // Patched debug break slot code is:
747  // ldr ip, [pc, #0] @ emited address and start
748  // blx ip
750 #else
751  // Patched debug break slot code is:
752  // mov lr, pc @ start of sequence
753  // ldr pc, [pc, #-4] @ emited address
755 #endif
756 
757 #ifdef USE_BLX
759 #else
760  static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
761 #endif
762 
763  // Difference between address of current opcode and value read from pc
764  // register.
765  static const int kPcLoadDelta = 8;
766 
767  static const int kJSReturnSequenceInstructions = 4;
768  static const int kDebugBreakSlotInstructions = 3;
769  static const int kDebugBreakSlotLength =
771 
772  // ---------------------------------------------------------------------------
773  // Code generation
774 
775  // Insert the smallest number of nop instructions
776  // possible to align the pc offset to a multiple
777  // of m. m must be a power of 2 (>= 4).
778  void Align(int m);
779  // Aligns code to something that's optimal for a jump target for the platform.
780  void CodeTargetAlign();
781 
782  // Branch instructions
783  void b(int branch_offset, Condition cond = al);
784  void bl(int branch_offset, Condition cond = al);
785  void blx(int branch_offset); // v5 and above
786  void blx(Register target, Condition cond = al); // v5 and above
787  void bx(Register target, Condition cond = al); // v5 and above, plus v4t
788 
789  // Convenience branch instructions using labels
790  void b(Label* L, Condition cond = al) {
791  b(branch_offset(L, cond == al), cond);
792  }
793  void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
794  void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
795  void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
796  void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
797 
798  // Data-processing instructions
799 
800  void and_(Register dst, Register src1, const Operand& src2,
801  SBit s = LeaveCC, Condition cond = al);
802 
803  void eor(Register dst, Register src1, const Operand& src2,
804  SBit s = LeaveCC, Condition cond = al);
805 
806  void sub(Register dst, Register src1, const Operand& src2,
807  SBit s = LeaveCC, Condition cond = al);
808  void sub(Register dst, Register src1, Register src2,
809  SBit s = LeaveCC, Condition cond = al) {
810  sub(dst, src1, Operand(src2), s, cond);
811  }
812 
813  void rsb(Register dst, Register src1, const Operand& src2,
814  SBit s = LeaveCC, Condition cond = al);
815 
816  void add(Register dst, Register src1, const Operand& src2,
817  SBit s = LeaveCC, Condition cond = al);
818  void add(Register dst, Register src1, Register src2,
819  SBit s = LeaveCC, Condition cond = al) {
820  add(dst, src1, Operand(src2), s, cond);
821  }
822 
823  void adc(Register dst, Register src1, const Operand& src2,
824  SBit s = LeaveCC, Condition cond = al);
825 
826  void sbc(Register dst, Register src1, const Operand& src2,
827  SBit s = LeaveCC, Condition cond = al);
828 
829  void rsc(Register dst, Register src1, const Operand& src2,
830  SBit s = LeaveCC, Condition cond = al);
831 
832  void tst(Register src1, const Operand& src2, Condition cond = al);
833  void tst(Register src1, Register src2, Condition cond = al) {
834  tst(src1, Operand(src2), cond);
835  }
836 
837  void teq(Register src1, const Operand& src2, Condition cond = al);
838 
839  void cmp(Register src1, const Operand& src2, Condition cond = al);
840  void cmp(Register src1, Register src2, Condition cond = al) {
841  cmp(src1, Operand(src2), cond);
842  }
843  void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
844 
845  void cmn(Register src1, const Operand& src2, Condition cond = al);
846 
847  void orr(Register dst, Register src1, const Operand& src2,
848  SBit s = LeaveCC, Condition cond = al);
849  void orr(Register dst, Register src1, Register src2,
850  SBit s = LeaveCC, Condition cond = al) {
851  orr(dst, src1, Operand(src2), s, cond);
852  }
853 
854  void mov(Register dst, const Operand& src,
855  SBit s = LeaveCC, Condition cond = al);
856  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
857  mov(dst, Operand(src), s, cond);
858  }
859 
860  // ARMv7 instructions for loading a 32 bit immediate in two instructions.
861  // This may actually emit a different mov instruction, but on an ARMv7 it
862  // is guaranteed to only emit one instruction.
863  void movw(Register reg, uint32_t immediate, Condition cond = al);
864  // The constant for movt should be in the range 0-0xffff.
865  void movt(Register reg, uint32_t immediate, Condition cond = al);
866 
867  void bic(Register dst, Register src1, const Operand& src2,
868  SBit s = LeaveCC, Condition cond = al);
869 
870  void mvn(Register dst, const Operand& src,
871  SBit s = LeaveCC, Condition cond = al);
872 
873  // Multiply instructions
874 
875  void mla(Register dst, Register src1, Register src2, Register srcA,
876  SBit s = LeaveCC, Condition cond = al);
877 
878  void mls(Register dst, Register src1, Register src2, Register srcA,
879  Condition cond = al);
880 
881  void sdiv(Register dst, Register src1, Register src2,
882  Condition cond = al);
883 
884  void mul(Register dst, Register src1, Register src2,
885  SBit s = LeaveCC, Condition cond = al);
886 
887  void smlal(Register dstL, Register dstH, Register src1, Register src2,
888  SBit s = LeaveCC, Condition cond = al);
889 
890  void smull(Register dstL, Register dstH, Register src1, Register src2,
891  SBit s = LeaveCC, Condition cond = al);
892 
893  void umlal(Register dstL, Register dstH, Register src1, Register src2,
894  SBit s = LeaveCC, Condition cond = al);
895 
896  void umull(Register dstL, Register dstH, Register src1, Register src2,
897  SBit s = LeaveCC, Condition cond = al);
898 
899  // Miscellaneous arithmetic instructions
900 
901  void clz(Register dst, Register src, Condition cond = al); // v5 and above
902 
903  // Saturating instructions. v6 and above.
904 
905  // Unsigned saturate.
906  //
907  // Saturate an optionally shifted signed value to an unsigned range.
908  //
909  // usat dst, #satpos, src
910  // usat dst, #satpos, src, lsl #sh
911  // usat dst, #satpos, src, asr #sh
912  //
913  // Register dst will contain:
914  //
915  // 0, if s < 0
916  // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
917  // s, otherwise
918  //
919  // where s is the contents of src after shifting (if used.)
920  void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
921 
922  // Bitfield manipulation instructions. v7 and above.
923 
924  void ubfx(Register dst, Register src, int lsb, int width,
925  Condition cond = al);
926 
927  void sbfx(Register dst, Register src, int lsb, int width,
928  Condition cond = al);
929 
930  void bfc(Register dst, int lsb, int width, Condition cond = al);
931 
932  void bfi(Register dst, Register src, int lsb, int width,
933  Condition cond = al);
934 
935  // Status register access instructions
936 
937  void mrs(Register dst, SRegister s, Condition cond = al);
938  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
939 
940  // Load/Store instructions
941  void ldr(Register dst, const MemOperand& src, Condition cond = al);
942  void str(Register src, const MemOperand& dst, Condition cond = al);
943  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
944  void strb(Register src, const MemOperand& dst, Condition cond = al);
945  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
946  void strh(Register src, const MemOperand& dst, Condition cond = al);
947  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
948  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
949  void ldrd(Register dst1,
950  Register dst2,
951  const MemOperand& src, Condition cond = al);
952  void strd(Register src1,
953  Register src2,
954  const MemOperand& dst, Condition cond = al);
955 
956  // Load/Store multiple instructions
957  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
958  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
959 
960  // Exception-generating instructions and debugging support
961  void stop(const char* msg,
962  Condition cond = al,
964 
965  void bkpt(uint32_t imm16); // v5 and above
966  void svc(uint32_t imm24, Condition cond = al);
967 
968  // Coprocessor instructions
969 
970  void cdp(Coprocessor coproc, int opcode_1,
971  CRegister crd, CRegister crn, CRegister crm,
972  int opcode_2, Condition cond = al);
973 
974  void cdp2(Coprocessor coproc, int opcode_1,
975  CRegister crd, CRegister crn, CRegister crm,
976  int opcode_2); // v5 and above
977 
978  void mcr(Coprocessor coproc, int opcode_1,
979  Register rd, CRegister crn, CRegister crm,
980  int opcode_2 = 0, Condition cond = al);
981 
982  void mcr2(Coprocessor coproc, int opcode_1,
983  Register rd, CRegister crn, CRegister crm,
984  int opcode_2 = 0); // v5 and above
985 
986  void mrc(Coprocessor coproc, int opcode_1,
987  Register rd, CRegister crn, CRegister crm,
988  int opcode_2 = 0, Condition cond = al);
989 
990  void mrc2(Coprocessor coproc, int opcode_1,
991  Register rd, CRegister crn, CRegister crm,
992  int opcode_2 = 0); // v5 and above
993 
994  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
995  LFlag l = Short, Condition cond = al);
996  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
997  LFlag l = Short, Condition cond = al);
998 
999  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
1000  LFlag l = Short); // v5 and above
1001  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
1002  LFlag l = Short); // v5 and above
1003 
1004  // Support for VFP.
1005  // All these APIs support S0 to S31 and D0 to D15.
1006  // Currently these APIs do not support extended D registers, i.e, D16 to D31.
1007  // However, some simple modifications can allow
1008  // these APIs to support D16 to D31.
1009 
1010  void vldr(const DwVfpRegister dst,
1011  const Register base,
1012  int offset,
1013  const Condition cond = al);
1014  void vldr(const DwVfpRegister dst,
1015  const MemOperand& src,
1016  const Condition cond = al);
1017 
1018  void vldr(const SwVfpRegister dst,
1019  const Register base,
1020  int offset,
1021  const Condition cond = al);
1022  void vldr(const SwVfpRegister dst,
1023  const MemOperand& src,
1024  const Condition cond = al);
1025 
1026  void vstr(const DwVfpRegister src,
1027  const Register base,
1028  int offset,
1029  const Condition cond = al);
1030  void vstr(const DwVfpRegister src,
1031  const MemOperand& dst,
1032  const Condition cond = al);
1033 
1034  void vstr(const SwVfpRegister src,
1035  const Register base,
1036  int offset,
1037  const Condition cond = al);
1038  void vstr(const SwVfpRegister src,
1039  const MemOperand& dst,
1040  const Condition cond = al);
1041 
1042  void vldm(BlockAddrMode am,
1043  Register base,
1044  DwVfpRegister first,
1045  DwVfpRegister last,
1046  Condition cond = al);
1047 
1048  void vstm(BlockAddrMode am,
1049  Register base,
1050  DwVfpRegister first,
1051  DwVfpRegister last,
1052  Condition cond = al);
1053 
1054  void vldm(BlockAddrMode am,
1055  Register base,
1056  SwVfpRegister first,
1057  SwVfpRegister last,
1058  Condition cond = al);
1059 
1060  void vstm(BlockAddrMode am,
1061  Register base,
1062  SwVfpRegister first,
1063  SwVfpRegister last,
1064  Condition cond = al);
1065 
1066  void vmov(const DwVfpRegister dst,
1067  double imm,
1068  const Register scratch = no_reg,
1069  const Condition cond = al);
1070  void vmov(const SwVfpRegister dst,
1071  const SwVfpRegister src,
1072  const Condition cond = al);
1073  void vmov(const DwVfpRegister dst,
1074  const DwVfpRegister src,
1075  const Condition cond = al);
1076  void vmov(const DwVfpRegister dst,
1077  const Register src1,
1078  const Register src2,
1079  const Condition cond = al);
1080  void vmov(const Register dst1,
1081  const Register dst2,
1082  const DwVfpRegister src,
1083  const Condition cond = al);
1084  void vmov(const SwVfpRegister dst,
1085  const Register src,
1086  const Condition cond = al);
1087  void vmov(const Register dst,
1088  const SwVfpRegister src,
1089  const Condition cond = al);
1090  void vcvt_f64_s32(const DwVfpRegister dst,
1091  const SwVfpRegister src,
1093  const Condition cond = al);
1094  void vcvt_f32_s32(const SwVfpRegister dst,
1095  const SwVfpRegister src,
1097  const Condition cond = al);
1098  void vcvt_f64_u32(const DwVfpRegister dst,
1099  const SwVfpRegister src,
1101  const Condition cond = al);
1102  void vcvt_s32_f64(const SwVfpRegister dst,
1103  const DwVfpRegister src,
1105  const Condition cond = al);
1106  void vcvt_u32_f64(const SwVfpRegister dst,
1107  const DwVfpRegister src,
1109  const Condition cond = al);
1110  void vcvt_f64_f32(const DwVfpRegister dst,
1111  const SwVfpRegister src,
1113  const Condition cond = al);
1114  void vcvt_f32_f64(const SwVfpRegister dst,
1115  const DwVfpRegister src,
1117  const Condition cond = al);
1118 
1119  void vneg(const DwVfpRegister dst,
1120  const DwVfpRegister src,
1121  const Condition cond = al);
1122  void vabs(const DwVfpRegister dst,
1123  const DwVfpRegister src,
1124  const Condition cond = al);
1125  void vadd(const DwVfpRegister dst,
1126  const DwVfpRegister src1,
1127  const DwVfpRegister src2,
1128  const Condition cond = al);
1129  void vsub(const DwVfpRegister dst,
1130  const DwVfpRegister src1,
1131  const DwVfpRegister src2,
1132  const Condition cond = al);
1133  void vmul(const DwVfpRegister dst,
1134  const DwVfpRegister src1,
1135  const DwVfpRegister src2,
1136  const Condition cond = al);
1137  void vdiv(const DwVfpRegister dst,
1138  const DwVfpRegister src1,
1139  const DwVfpRegister src2,
1140  const Condition cond = al);
1141  void vcmp(const DwVfpRegister src1,
1142  const DwVfpRegister src2,
1143  const Condition cond = al);
1144  void vcmp(const DwVfpRegister src1,
1145  const double src2,
1146  const Condition cond = al);
1147  void vmrs(const Register dst,
1148  const Condition cond = al);
1149  void vmsr(const Register dst,
1150  const Condition cond = al);
1151  void vsqrt(const DwVfpRegister dst,
1152  const DwVfpRegister src,
1153  const Condition cond = al);
1154 
1155  // Pseudo instructions
1156 
1157  // Different nop operations are used by the code generator to detect certain
1158  // states of the generated code.
1162  // IC markers.
1166  // Helper values.
1169  };
1170 
1171  void nop(int type = 0); // 0 is the default non-marking type.
1172 
1173  void push(Register src, Condition cond = al) {
1174  str(src, MemOperand(sp, 4, NegPreIndex), cond);
1175  }
1176 
1177  void pop(Register dst, Condition cond = al) {
1178  ldr(dst, MemOperand(sp, 4, PostIndex), cond);
1179  }
1180 
1181  void pop() {
1182  add(sp, sp, Operand(kPointerSize));
1183  }
1184 
1185  // Jump unconditionally to given label.
1186  void jmp(Label* L) { b(L, al); }
1187 
1188  bool predictable_code_size() const { return predictable_code_size_; }
1189 
1191  const Assembler* assembler) {
1192 #ifdef USE_BLX
1194  (assembler == NULL || !assembler->predictable_code_size());
1195 #else
1196  // If not using BLX, all loads from the constant pool cannot be immediate,
1197  // because the ldr pc, [pc + #xxxx] used for calls must be a single
1198  // instruction and cannot be easily distinguished out of context from
1199  // other loads that could use movw/movt.
1200  return false;
1201 #endif
1202  }
1203 
1204  // Check the code size generated from label to here.
1205  int SizeOfCodeGeneratedSince(Label* label) {
1206  return pc_offset() - label->pos();
1207  }
1208 
1209  // Check the number of instructions generated from label to here.
1210  int InstructionsGeneratedSince(Label* label) {
1211  return SizeOfCodeGeneratedSince(label) / kInstrSize;
1212  }
1213 
1214  // Check whether an immediate fits an addressing mode 1 instruction.
1216 
1217  // Class for scoping postponing the constant pool generation.
1219  public:
1220  explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1221  assem_->StartBlockConstPool();
1222  }
1224  assem_->EndBlockConstPool();
1225  }
1226 
1227  private:
1228  Assembler* assem_;
1229 
1230  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1231  };
1232 
1233  // Debugging
1234 
1235  // Mark address of the ExitJSFrame code.
1236  void RecordJSReturn();
1237 
1238  // Mark address of a debug break slot.
1239  void RecordDebugBreakSlot();
1240 
1241  // Record the AST id of the CallIC being compiled, so that it can be placed
1242  // in the relocation information.
1245  recorded_ast_id_ = ast_id;
1246  }
1247 
1250  return recorded_ast_id_;
1251  }
1252 
1254 
1255  // Record a comment relocation entry that can be used by a disassembler.
1256  // Use --code-comments to enable.
1257  void RecordComment(const char* msg);
1258 
1259  // Record the emission of a constant pool.
1260  //
1261  // The emission of constant pool depends on the size of the code generated and
1262  // the number of RelocInfo recorded.
1263  // The Debug mechanism needs to map code offsets between two versions of a
1264  // function, compiled with and without debugger support (see for example
1265  // Debug::PrepareForBreakPoints()).
1266  // Compiling functions with debugger support generates additional code
1267  // (Debug::GenerateSlot()). This may affect the emission of the constant
1268  // pools and cause the version of the code with debugger support to have
1269  // constant pools generated in different places.
1270  // Recording the position and size of emitted constant pools allows to
1271  // correctly compute the offset mappings between the different versions of a
1272  // function in all situations.
1273  //
1274  // The parameter indicates the size of the constant pool (in bytes), including
1275  // the marker and branch over the data.
1276  void RecordConstPool(int size);
1277 
1278  // Writes a single byte or word of data in the code stream. Used
1279  // for inline tables, e.g., jump-tables. The constant pool should be
1280  // emitted before any use of db and dd to ensure that constant pools
1281  // are not emitted as part of the tables generated.
1282  void db(uint8_t data);
1283  void dd(uint32_t data);
1284 
1285  int pc_offset() const { return pc_ - buffer_; }
1286 
1287  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1288 
1289  // Read/patch instructions
1290  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1291  void instr_at_put(int pos, Instr instr) {
1292  *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1293  }
1294  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1295  static void instr_at_put(byte* pc, Instr instr) {
1296  *reinterpret_cast<Instr*>(pc) = instr;
1297  }
1298  static Condition GetCondition(Instr instr);
1299  static bool IsBranch(Instr instr);
1300  static int GetBranchOffset(Instr instr);
1301  static bool IsLdrRegisterImmediate(Instr instr);
1302  static int GetLdrRegisterImmediateOffset(Instr instr);
1303  static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
1304  static bool IsStrRegisterImmediate(Instr instr);
1305  static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
1306  static bool IsAddRegisterImmediate(Instr instr);
1307  static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
1308  static Register GetRd(Instr instr);
1309  static Register GetRn(Instr instr);
1310  static Register GetRm(Instr instr);
1311  static bool IsPush(Instr instr);
1312  static bool IsPop(Instr instr);
1313  static bool IsStrRegFpOffset(Instr instr);
1314  static bool IsLdrRegFpOffset(Instr instr);
1315  static bool IsStrRegFpNegOffset(Instr instr);
1316  static bool IsLdrRegFpNegOffset(Instr instr);
1317  static bool IsLdrPcImmediateOffset(Instr instr);
1318  static bool IsTstImmediate(Instr instr);
1319  static bool IsCmpRegister(Instr instr);
1320  static bool IsCmpImmediate(Instr instr);
1321  static Register GetCmpImmediateRegister(Instr instr);
1322  static int GetCmpImmediateRawImmediate(Instr instr);
1323  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1324  static bool IsMovT(Instr instr);
1325  static bool IsMovW(Instr instr);
1326 
1327  // Constants in pools are accessed via pc relative addressing, which can
1328  // reach +/-4KB thereby defining a maximum distance between the instruction
1329  // and the accessed constant.
1330  static const int kMaxDistToPool = 4*KB;
1332 
1333  // Postpone the generation of the constant pool for the specified number of
1334  // instructions.
1335  void BlockConstPoolFor(int instructions);
1336 
1337  // Check if is time to emit a constant pool.
1338  void CheckConstPool(bool force_emit, bool require_jump);
1339 
1340  protected:
1341  // Relocation for a type-recording IC has the AST id added to it. This
1342  // member variable is a way to pass the information from the call site to
1343  // the relocation info.
1345 
1346  bool emit_debug_code() const { return emit_debug_code_; }
1347 
1348  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1349 
1350  // Decode branch instruction at pos and return branch target pos
1351  int target_at(int pos);
1352 
1353  // Patch branch instruction at pos to branch to given branch target pos
1354  void target_at_put(int pos, int target_pos);
1355 
1356  // Prevent contant pool emission until EndBlockConstPool is called.
1357  // Call to this function can be nested but must be followed by an equal
1358  // number of call to EndBlockConstpool.
1360  if (const_pool_blocked_nesting_++ == 0) {
1361  // Prevent constant pool checks happening by setting the next check to
1362  // the biggest possible offset.
1363  next_buffer_check_ = kMaxInt;
1364  }
1365  }
1366 
1367  // Resume constant pool emission. Need to be called as many time as
1368  // StartBlockConstPool to have an effect.
1370  if (--const_pool_blocked_nesting_ == 0) {
1371  // Check the constant pool hasn't been blocked for too long.
1372  ASSERT((num_pending_reloc_info_ == 0) ||
1373  (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
1374  // Two cases:
1375  // * no_const_pool_before_ >= next_buffer_check_ and the emission is
1376  // still blocked
1377  // * no_const_pool_before_ < next_buffer_check_ and the next emit will
1378  // trigger a check.
1379  next_buffer_check_ = no_const_pool_before_;
1380  }
1381  }
1382 
1383  bool is_const_pool_blocked() const {
1384  return (const_pool_blocked_nesting_ > 0) ||
1385  (pc_offset() < no_const_pool_before_);
1386  }
1387 
1388  private:
1389  // Code buffer:
1390  // The buffer into which code and relocation info are generated.
1391  byte* buffer_;
1392  int buffer_size_;
1393  // True if the assembler owns the buffer, false if buffer is external.
1394  bool own_buffer_;
1395 
1396  int next_buffer_check_; // pc offset of next buffer check
1397 
1398  // Code generation
1399  // The relocation writer's position is at least kGap bytes below the end of
1400  // the generated instructions. This is so that multi-instruction sequences do
1401  // not have to check for overflow. The same is true for writes of large
1402  // relocation info entries.
1403  static const int kGap = 32;
1404  byte* pc_; // the program counter; moves forward
1405 
1406  // Constant pool generation
1407  // Pools are emitted in the instruction stream, preferably after unconditional
1408  // jumps or after returns from functions (in dead code locations).
1409  // If a long code sequence does not contain unconditional jumps, it is
1410  // necessary to emit the constant pool before the pool gets too far from the
1411  // location it is accessed from. In this case, we emit a jump over the emitted
1412  // constant pool.
1413  // Constants in the pool may be addresses of functions that gets relocated;
1414  // if so, a relocation info entry is associated to the constant pool entry.
1415 
1416  // Repeated checking whether the constant pool should be emitted is rather
1417  // expensive. By default we only check again once a number of instructions
1418  // has been generated. That also means that the sizing of the buffers is not
1419  // an exact science, and that we rely on some slop to not overrun buffers.
1420  static const int kCheckPoolIntervalInst = 32;
1421  static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
1422 
1423 
1424  // Average distance beetween a constant pool and the first instruction
1425  // accessing the constant pool. Longer distance should result in less I-cache
1426  // pollution.
1427  // In practice the distance will be smaller since constant pool emission is
1428  // forced after function return and sometimes after unconditional branches.
1429  static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
1430 
1431  // Emission of the constant pool may be blocked in some code sequences.
1432  int const_pool_blocked_nesting_; // Block emission if this is not zero.
1433  int no_const_pool_before_; // Block emission before this pc offset.
1434 
1435  // Keep track of the first instruction requiring a constant pool entry
1436  // since the previous constant pool was emitted.
1437  int first_const_pool_use_;
1438 
1439  // Relocation info generation
1440  // Each relocation is encoded as a variable size value
1441  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1442  RelocInfoWriter reloc_info_writer;
1443 
1444  // Relocation info records are also used during code generation as temporary
1445  // containers for constants and code target addresses until they are emitted
1446  // to the constant pool. These pending relocation info records are temporarily
1447  // stored in a separate buffer until a constant pool is emitted.
1448  // If every instruction in a long sequence is accessing the pool, we need one
1449  // pending relocation entry per instruction.
1450 
1451  // the buffer of pending relocation info
1452  RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
1453  // number of pending reloc info entries in the buffer
1454  int num_pending_reloc_info_;
1455 
1456  // The bound position, before this we cannot do instruction elimination.
1457  int last_bound_pos_;
1458 
1459  // Code emission
1460  inline void CheckBuffer();
1461  void GrowBuffer();
1462  inline void emit(Instr x);
1463 
1464  // 32-bit immediate values
1465  void move_32_bit_immediate(Condition cond,
1466  Register rd,
1467  SBit s,
1468  const Operand& x);
1469 
1470  // Instruction generation
1471  void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
1472  void addrmod2(Instr instr, Register rd, const MemOperand& x);
1473  void addrmod3(Instr instr, Register rd, const MemOperand& x);
1474  void addrmod4(Instr instr, Register rn, RegList rl);
1475  void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
1476 
1477  // Labels
1478  void print(Label* L);
1479  void bind_to(Label* L, int pos);
1480  void link_to(Label* L, Label* appendix);
1481  void next(Label* L);
1482 
1483  enum UseConstantPoolMode {
1484  USE_CONSTANT_POOL,
1485  DONT_USE_CONSTANT_POOL
1486  };
1487 
1488  // Record reloc info for current pc_
1489  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
1490  UseConstantPoolMode mode = USE_CONSTANT_POOL);
1491 
1493  friend class RelocInfo;
1494  friend class CodePatcher;
1495  friend class BlockConstPoolScope;
1496 
1497  PositionsRecorder positions_recorder_;
1498 
1499  bool emit_debug_code_;
1500  bool predictable_code_size_;
1501 
1502  friend class PositionsRecorder;
1503  friend class EnsureSpace;
1504 };
1505 
1506 
1507 class EnsureSpace BASE_EMBEDDED {
1508  public:
1509  explicit EnsureSpace(Assembler* assembler) {
1510  assembler->CheckBuffer();
1511  }
1512 };
1513 
1514 
1516  public:
1518  : asm_(assembler) {
1519  old_value_ = assembler->predictable_code_size();
1520  assembler->set_predictable_code_size(true);
1521  }
1522 
1524  if (!old_value_) {
1525  asm_->set_predictable_code_size(false);
1526  }
1527  }
1528 
1529  private:
1530  Assembler* asm_;
1531  bool old_value_;
1532 };
1533 
1534 
1535 } } // namespace v8::internal
1536 
1537 #endif // V8_ARM_ASSEMBLER_ARM_H_
byte * Address
Definition: globals.h:157
void cmp(Register src1, const Operand &src2, Condition cond=al)
static bool IsBranch(Instr instr)
const SwVfpRegister s2
void ldrsb(Register dst, const MemOperand &src, Condition cond=al)
const DwVfpRegister d11
const Instr kCmpCmnMask
bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
const SwVfpRegister s28
void sdiv(Register dst, Register src1, Register src2, Condition cond=al)
static const int kNumRegisters
void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
int InstructionsGeneratedSince(Label *label)
static int GetBranchOffset(Instr instr)
const Instr kMovwMask
const SwVfpRegister s12
const SwVfpRegister s29
static const int kDebugBreakSlotInstructions
INLINE(static Address target_pointer_address_at(Address pc))
void db(uint8_t data)
void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const SwVfpRegister s25
const SwVfpRegister s26
const CRegister cr10
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
const Instr kLdrPCMask
int32_t offset() const
const SwVfpRegister s17
static bool IsCmpRegister(Instr instr)
const Register r3
const Instr kMovwLeaveCCFlip
void strh(Register src, const MemOperand &dst, Condition cond=al)
void bic(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void mrs(Register dst, SRegister s, Condition cond=al)
const Instr kLdrPCPattern
const Instr kMovMvnPattern
static void deserialization_set_special_target_at(Address constant_pool_entry, Address target)
static bool IsStrRegFpNegOffset(Instr instr)
void instr_at_put(int pos, Instr instr)
const SwVfpRegister s7
void vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void set_code(int code)
const SwVfpRegister s8
const SwVfpRegister s10
const int kRegister_r7_Code
const DwVfpRegister d8
void sbc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Instr kMovLrPc
static bool IsStrRegisterImmediate(Instr instr)
const int KB
Definition: globals.h:207
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond=al)
static TypeFeedbackId None()
Definition: utils.h:999
void vmov(const DwVfpRegister dst, double imm, const Register scratch=no_reg, const Condition cond=al)
static bool IsMovW(Instr instr)
void pop(Register dst, Condition cond=al)
void mla(Register dst, Register src1, Register src2, Register srcA, SBit s=LeaveCC, Condition cond=al)
const DwVfpRegister d5
const int kRegister_pc_Code
const DwVfpRegister d0
static const int kPatchDebugBreakSlotReturnOffset
int SizeOfCodeGeneratedSince(Label *label)
const SwVfpRegister s16
void bfi(Register dst, Register src, int lsb, int width, Condition cond=al)
const Register r6
static int GetCmpImmediateRawImmediate(Instr instr)
int32_t immediate() const
void dd(uint32_t data)
void push(Register src, Condition cond=al)
void tst(Register src1, Register src2, Condition cond=al)
void b(int branch_offset, Condition cond=al)
int int32_t
Definition: unicode.cc:47
void cmn(Register src1, const Operand &src2, Condition cond=al)
uint32_t RegList
Definition: frames.h:38
void ldrb(Register dst, const MemOperand &src, Condition cond=al)
const Instr kAddSubFlip
void smull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
SwVfpRegister high() const
static const int kNumAllocatableRegisters
static bool IsSupported(CpuFeature f)
void clz(Register dst, Register src, Condition cond=al)
const int kMaxInt
Definition: globals.h:210
static bool enabled()
Definition: serialize.h:481
void vmul(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
bool IsNone() const
Definition: utils.h:1000
const SwVfpRegister s14
const SwVfpRegister s21
const DwVfpRegister d12
bool predictable_code_size() const
static bool IsStrRegFpOffset(Instr instr)
void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
void RecordConstPool(int size)
static Register GetRm(Instr instr)
void bl(Label *L, Condition cond=al)
const DwVfpRegister d6
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:270
static const int kPatchReturnSequenceAddressOffset
void bl(Condition cond, Label *L)
void svc(uint32_t imm24, Condition cond=al)
static bool IsCmpImmediate(Instr instr)
void SetRecordedAstId(TypeFeedbackId ast_id)
void stm(BlockAddrMode am, Register base, RegList src, Condition cond=al)
static Instr instr_at(byte *pc)
const Instr kBlxRegMask
void ldrd(Register dst1, Register dst2, const MemOperand &src, Condition cond=al)
void set_predictable_code_size(bool value)
void ldc2(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short)
static void instr_at_put(byte *pc, Instr instr)
const Register r2
static DwVfpRegister from_code(int code)
const Instr kCmpCmnPattern
void blx(int branch_offset)
void target_at_put(int pos, int target_pos)
void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const DwVfpRegister d9
const SwVfpRegister s23
const int kRegister_r3_Code
void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
bool is(SwVfpRegister reg) const
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset)
void strb(Register src, const MemOperand &dst, Condition cond=al)
bool is_uint12(int x)
Definition: assembler.h:852
const SwVfpRegister s22
void ldrh(Register dst, const MemOperand &src, Condition cond=al)
void BlockConstPoolFor(int instructions)
static const char * AllocationIndexToString(int index)
const SwVfpRegister s6
bool is(CRegister creg) const
static const int kNumRegisters
Definition: assembler-arm.h:73
const CRegister cr12
EnsureSpace(Assembler *assembler)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static Condition GetCondition(Instr instr)
static DwVfpRegister FromAllocationIndex(int index)
const SwVfpRegister s31
uint8_t byte
Definition: globals.h:156
const CRegister cr8
void vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond=al)
const SwVfpRegister s18
void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const int kRegister_r2_Code
static bool IsPush(Instr instr)
void vldm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
const Register sp
const SwVfpRegister s3
bool OffsetIsUint12Encodable() const
DwVfpRegister DoubleRegister
const DwVfpRegister d13
PredictableCodeSizeScope(Assembler *assembler)
const int32_t kDefaultStopCode
void vsub(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
static const int kSpecialTargetSize
const SwVfpRegister s13
const int kRegister_r5_Code
const Register ip
void GetCode(CodeDesc *desc)
const Register r9
const int kPointerSize
Definition: globals.h:220
void strd(Register src1, Register src2, const MemOperand &dst, Condition cond=al)
void orr(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const SwVfpRegister s27
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
const Instr kAndBicFlip
int branch_offset(Label *L, bool jump_elimination_allowed)
const DwVfpRegister d7
static void set_target_address_at(Address pc, Address target)
const int kRegister_r4_Code
static bool use_immediate_embedded_pointer_loads(const Assembler *assembler)
void umlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsPop(Instr instr)
const Instr kMovLeaveCCMask
void movt(Register reg, uint32_t immediate, Condition cond=al)
const CRegister cr11
void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2)
static void set_external_target_at(Address constant_pool_entry, Address target)
const Instr kBlxIp
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
static bool IsMovT(Instr instr)
void vadd(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
const Register pc
static Register from_code(int code)
void set_emit_debug_code(bool value)
const int kRegister_r8_Code
void vmrs(const Register dst, const Condition cond=al)
static int ToAllocationIndex(Register reg)
Definition: assembler-arm.h:77
void str(Register src, const MemOperand &dst, Condition cond=al)
const SwVfpRegister s0
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond=al)
const DwVfpRegister d3
const int kRegister_fp_Code
void split_code(int *vm, int *m) const
void CheckConstPool(bool force_emit, bool require_jump)
void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
const int kRegister_lr_Code
const DwVfpRegister no_dreg
const Register r0
static Register GetRn(Instr instr)
void mov(Register dst, Register src, SBit s=LeaveCC, Condition cond=al)
const SwVfpRegister s19
#define BASE_EMBEDDED
Definition: allocation.h:68
void eor(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const SwVfpRegister s5
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset)
void set_offset(int32_t offset)
static const int kDebugBreakSlotLength
static bool IsTstImmediate(Instr instr)
void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static const int kMaxNumPendingRelocInfo
const int kRegister_r10_Code
void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
const SwVfpRegister s1
void ldr(Register dst, const MemOperand &src, Condition cond=al)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
const CRegister cr6
const CRegister cr15
const Register lr
void b(Condition cond, Label *L)
void movw(Register reg, uint32_t immediate, Condition cond=al)
void sub(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
bool is_valid() const
bool is(Register reg) const
static Address target_address_at(Address pc)
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset)
const Instr kMovLeaveCCPattern
const CRegister cr7
const Register r1
const DwVfpRegister d14
void smlal(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const int kRegister_r6_Code
void vldr(const DwVfpRegister dst, const Register base, int offset, const Condition cond=al)
const CRegister cr2
void RecordComment(const char *msg)
const CRegister cr14
void bl(int branch_offset, Condition cond=al)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
static int ToAllocationIndex(DwVfpRegister reg)
void rsb(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static Register GetCmpImmediateRegister(Instr instr)
void sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
const Instr kBlxRegPattern
uint32_t offset() const
const SwVfpRegister s30
const Instr kMovMvnMask
static Address target_address_from_return_address(Address pc)
bool is_const_pool_blocked() const
friend class PositionsRecorder
INLINE(static Operand Zero())
static bool IsAddRegisterImmediate(Instr instr)
void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond=al)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
const Register r8
static const int kNumAllocatableRegisters
Definition: assembler-arm.h:74
const CRegister cr13
void vmsr(const Register dst, const Condition cond=al)
void vstr(const DwVfpRegister src, const Register base, int offset, const Condition cond=al)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void usat(Register dst, int satpos, const Operand &src, Condition cond=al)
void ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
const Instr kMovwPattern
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void bx(Register target, Condition cond=al)
void ldrsh(Register dst, const MemOperand &src, Condition cond=al)
SwVfpRegister low() const
static const int kJSReturnSequenceInstructions
const SwVfpRegister s4
uint32_t SRegisterFieldMask
const CRegister no_creg
void orr(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const DwVfpRegister d2
const SwVfpRegister s20
bool is(DwVfpRegister reg) const
PositionsRecorder * positions_recorder()
const int kRegister_r0_Code
static const int kInstrSize
void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, CRegister crm, int opcode_2, Condition cond=al)
MemOperand(Register rn, int32_t offset=0)
static const char * AllocationIndexToString(int index)
Definition: assembler-arm.h:87
static const int kSizeInBytes
Definition: assembler-arm.h:75
const int kRegister_r1_Code
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
const CRegister cr3
const SwVfpRegister s9
void cmp(Register src1, Register src2, Condition cond=al)
const Register r10
const Instr kMovMvnFlip
void bfc(Register dst, int lsb, int width, Condition cond=al)
void mls(Register dst, Register src1, Register src2, Register srcA, Condition cond=al)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const DwVfpRegister d1
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
const CRegister cr0
const CRegister cr5
const Register fp
const CRegister cr9
const DwVfpRegister d10
const int kRegister_r9_Code
const int kRegister_ip_Code
const int kRegister_sp_Code
void umull(Register dstL, Register dstH, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const CRegister cr1
static bool IsLdrRegisterImmediate(Instr instr)
void msr(SRegisterFieldMask fields, const Operand &src, Condition cond=al)
const SwVfpRegister s11
void ldc(Coprocessor coproc, CRegister crd, const MemOperand &src, LFlag l=Short, Condition cond=al)
const CRegister cr4
const SwVfpRegister s15
static bool IsLdrRegFpNegOffset(Instr instr)
void bkpt(uint32_t imm16)
static const int kPatchDebugBreakSlotAddressOffset
void vstm(BlockAddrMode am, Register base, DwVfpRegister first, DwVfpRegister last, Condition cond=al)
static int GetLdrRegisterImmediateOffset(Instr instr)
void b(Label *L, Condition cond=al)
TypeFeedbackId recorded_ast_id_
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool emit_debug_code() const
void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode=kDefaultRoundToZero, const Condition cond=al)
void rsc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void tst(Register src1, const Operand &src2, Condition cond=al)
static bool IsLdrRegFpOffset(Instr instr)
void add(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
const Register r5
const int kRegister_no_reg_Code
const DwVfpRegister d15
const DwVfpRegister d4
const Instr kCmpCmnFlip
static const int kNumReservedRegisters
void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLdrPcImmediateOffset(Instr instr)
void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, CRegister crm, int opcode_2=0, Condition cond=al)
const SwVfpRegister s24
void split_code(int *vm, int *m) const
static const int kMaxDistToPool
const Register r4
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const Register r7