v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-mips.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
37 #define V8_MIPS_ASSEMBLER_MIPS_H_
38 
39 #include <stdio.h>
40 #include "assembler.h"
41 #include "constants-mips.h"
42 #include "serialize.h"
43 
44 namespace v8 {
45 namespace internal {
46 
47 // CPU Registers.
48 //
49 // 1) We would prefer to use an enum, but enum values are assignment-
50 // compatible with int, which has caused code-generation bugs.
51 //
52 // 2) We would prefer to use a class instead of a struct but we don't like
53 // the register initialization to depend on the particular initialization
54 // order (which appears to be different on OS X, Linux, and Windows for the
55 // installed versions of C++ we tried). Using a struct permits C-style
56 // "initialization". Also, the Register objects cannot be const as this
57 // forces initialization stubs in MSVC, making us dependent on initialization
58 // order.
59 //
60 // 3) By not using an enum, we are possibly preventing the compiler from
61 // doing certain constant folds, which may significantly reduce the
62 // code generated for some assembly instructions (because they boil down
63 // to a few constants). If this is a problem, we could change the code
64 // such that we use an enum in optimized mode, and the struct in debug
65 // mode. This way we get the compile-time error checking in debug mode
66 // and best performance in optimized code.
67 
68 
69 // -----------------------------------------------------------------------------
70 // Implementation of Register and FPURegister.
71 
72 // Core register.
73 struct Register {
74  static const int kNumRegisters = v8::internal::kNumRegisters;
75  static const int kNumAllocatableRegisters = 14; // v0 through t7.
76  static const int kSizeInBytes = 4;
77 
78  static int ToAllocationIndex(Register reg) {
79  return reg.code() - 2; // zero_reg and 'at' are skipped.
80  }
81 
82  static Register FromAllocationIndex(int index) {
83  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
84  return from_code(index + 2); // zero_reg and 'at' are skipped.
85  }
86 
87  static const char* AllocationIndexToString(int index) {
88  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
89  const char* const names[] = {
90  "v0",
91  "v1",
92  "a0",
93  "a1",
94  "a2",
95  "a3",
96  "t0",
97  "t1",
98  "t2",
99  "t3",
100  "t4",
101  "t5",
102  "t6",
103  "t7",
104  };
105  return names[index];
106  }
107 
108  static Register from_code(int code) {
109  Register r = { code };
110  return r;
111  }
112 
113  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
114  bool is(Register reg) const { return code_ == reg.code_; }
115  int code() const {
116  ASSERT(is_valid());
117  return code_;
118  }
119  int bit() const {
120  ASSERT(is_valid());
121  return 1 << code_;
122  }
123 
124  // Unfortunately we can't make this private in a struct.
125  int code_;
126 };
127 
128 #define REGISTER(N, C) \
129  const int kRegister_ ## N ## _Code = C; \
130  const Register N = { C }
131 
132 REGISTER(no_reg, -1);
133 // Always zero.
134 REGISTER(zero_reg, 0);
135 // at: Reserved for synthetic instructions.
136 REGISTER(at, 1);
137 // v0, v1: Used when returning multiple values from subroutines.
138 REGISTER(v0, 2);
139 REGISTER(v1, 3);
140 // a0 - a4: Used to pass non-FP parameters.
141 REGISTER(a0, 4);
142 REGISTER(a1, 5);
143 REGISTER(a2, 6);
144 REGISTER(a3, 7);
145 // t0 - t9: Can be used without reservation, act as temporary registers and are
146 // allowed to be destroyed by subroutines.
147 REGISTER(t0, 8);
148 REGISTER(t1, 9);
149 REGISTER(t2, 10);
150 REGISTER(t3, 11);
151 REGISTER(t4, 12);
152 REGISTER(t5, 13);
153 REGISTER(t6, 14);
154 REGISTER(t7, 15);
155 // s0 - s7: Subroutine register variables. Subroutines that write to these
156 // registers must restore their values before exiting so that the caller can
157 // expect the values to be preserved.
158 REGISTER(s0, 16);
159 REGISTER(s1, 17);
160 REGISTER(s2, 18);
161 REGISTER(s3, 19);
162 REGISTER(s4, 20);
163 REGISTER(s5, 21);
164 REGISTER(s6, 22);
165 REGISTER(s7, 23);
166 REGISTER(t8, 24);
167 REGISTER(t9, 25);
168 // k0, k1: Reserved for system calls and interrupt handlers.
169 REGISTER(k0, 26);
170 REGISTER(k1, 27);
171 // gp: Reserved.
172 REGISTER(gp, 28);
173 // sp: Stack pointer.
174 REGISTER(sp, 29);
175 // fp: Frame pointer.
176 REGISTER(fp, 30);
177 // ra: Return address pointer.
178 REGISTER(ra, 31);
179 
180 #undef REGISTER
181 
182 
183 int ToNumber(Register reg);
184 
185 Register ToRegister(int num);
186 
187 // Coprocessor register.
188 struct FPURegister {
190 
191  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
192  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
193  // number of Double regs (64-bit regs, or FPU-reg-pairs).
194 
195  // A few double registers are reserved: one as a scratch register and one to
196  // hold 0.0.
197  // f28: 0.0
198  // f30: scratch register.
199  static const int kNumReservedRegisters = 2;
200  static const int kNumAllocatableRegisters = kNumRegisters / 2 -
202 
203 
204  inline static int ToAllocationIndex(FPURegister reg);
205 
206  static FPURegister FromAllocationIndex(int index) {
207  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
208  return from_code(index * 2);
209  }
210 
211  static const char* AllocationIndexToString(int index) {
212  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
213  const char* const names[] = {
214  "f0",
215  "f2",
216  "f4",
217  "f6",
218  "f8",
219  "f10",
220  "f12",
221  "f14",
222  "f16",
223  "f18",
224  "f20",
225  "f22",
226  "f24",
227  "f26"
228  };
229  return names[index];
230  }
231 
232  static FPURegister from_code(int code) {
233  FPURegister r = { code };
234  return r;
235  }
236 
237  bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
238  bool is(FPURegister creg) const { return code_ == creg.code_; }
239  FPURegister low() const {
240  // Find low reg of a Double-reg pair, which is the reg itself.
241  ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
242  FPURegister reg;
243  reg.code_ = code_;
244  ASSERT(reg.is_valid());
245  return reg;
246  }
247  FPURegister high() const {
248  // Find high reg of a Doubel-reg pair, which is reg + 1.
249  ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
250  FPURegister reg;
251  reg.code_ = code_ + 1;
252  ASSERT(reg.is_valid());
253  return reg;
254  }
255 
256  int code() const {
257  ASSERT(is_valid());
258  return code_;
259  }
260  int bit() const {
261  ASSERT(is_valid());
262  return 1 << code_;
263  }
264  void setcode(int f) {
265  code_ = f;
266  ASSERT(is_valid());
267  }
268  // Unfortunately we can't make this private in a struct.
269  int code_;
270 };
271 
272 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32
273 // 32-bit registers, f0 through f31. When used as 'double' they are used
274 // in pairs, starting with the even numbered register. So a double operation
275 // on f0 really uses f0 and f1.
276 // (Modern mips hardware also supports 32 64-bit registers, via setting
277 // (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
278 // but it is not in common use. Someday we will want to support this in v8.)
279 
280 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
283 
284 const FPURegister no_freg = { -1 };
285 
286 const FPURegister f0 = { 0 }; // Return value in hard float mode.
287 const FPURegister f1 = { 1 };
288 const FPURegister f2 = { 2 };
289 const FPURegister f3 = { 3 };
290 const FPURegister f4 = { 4 };
291 const FPURegister f5 = { 5 };
292 const FPURegister f6 = { 6 };
293 const FPURegister f7 = { 7 };
294 const FPURegister f8 = { 8 };
295 const FPURegister f9 = { 9 };
296 const FPURegister f10 = { 10 };
297 const FPURegister f11 = { 11 };
298 const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
299 const FPURegister f13 = { 13 };
300 const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
301 const FPURegister f15 = { 15 };
302 const FPURegister f16 = { 16 };
303 const FPURegister f17 = { 17 };
304 const FPURegister f18 = { 18 };
305 const FPURegister f19 = { 19 };
306 const FPURegister f20 = { 20 };
307 const FPURegister f21 = { 21 };
308 const FPURegister f22 = { 22 };
309 const FPURegister f23 = { 23 };
310 const FPURegister f24 = { 24 };
311 const FPURegister f25 = { 25 };
312 const FPURegister f26 = { 26 };
313 const FPURegister f27 = { 27 };
314 const FPURegister f28 = { 28 };
315 const FPURegister f29 = { 29 };
316 const FPURegister f30 = { 30 };
317 const FPURegister f31 = { 31 };
318 
319 // Register aliases.
320 // cp is assumed to be a callee saved register.
321 // Defined using #define instead of "static const Register&" because Clang
322 // complains otherwise when a compilation unit that includes this header
323 // doesn't use the variables.
324 #define kRootRegister s6
325 #define cp s7
326 #define kLithiumScratchReg s3
327 #define kLithiumScratchReg2 s4
328 #define kLithiumScratchDouble f30
329 #define kDoubleRegZero f28
330 
331 // FPU (coprocessor 1) control registers.
332 // Currently only FCSR (#31) is implemented.
334  bool is_valid() const { return code_ == kFCSRRegister; }
335  bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
336  int code() const {
337  ASSERT(is_valid());
338  return code_;
339  }
340  int bit() const {
341  ASSERT(is_valid());
342  return 1 << code_;
343  }
344  void setcode(int f) {
345  code_ = f;
346  ASSERT(is_valid());
347  }
348  // Unfortunately we can't make this private in a struct.
349  int code_;
350 };
351 
354 
355 
356 // -----------------------------------------------------------------------------
357 // Machine instruction Operands.
358 
359 // Class Operand represents a shifter operand in data processing instructions.
360 class Operand BASE_EMBEDDED {
361  public:
362  // Immediate.
363  INLINE(explicit Operand(int32_t immediate,
364  RelocInfo::Mode rmode = RelocInfo::NONE));
365  INLINE(explicit Operand(const ExternalReference& f));
366  INLINE(explicit Operand(const char* s));
367  INLINE(explicit Operand(Object** opp));
368  INLINE(explicit Operand(Context** cpp));
369  explicit Operand(Handle<Object> handle);
370  INLINE(explicit Operand(Smi* value));
371 
372  // Register.
373  INLINE(explicit Operand(Register rm));
374 
375  // Return true if this is a register operand.
376  INLINE(bool is_reg() const);
377 
378  Register rm() const { return rm_; }
379 
380  private:
381  Register rm_;
382  int32_t imm32_; // Valid if rm_ == no_reg.
383  RelocInfo::Mode rmode_;
384 
385  friend class Assembler;
386  friend class MacroAssembler;
387 };
388 
389 
390 // On MIPS we have only one adressing mode with base_reg + offset.
391 // Class MemOperand represents a memory operand in load and store instructions.
392 class MemOperand : public Operand {
393  public:
394  explicit MemOperand(Register rn, int32_t offset = 0);
395  int32_t offset() const { return offset_; }
396 
397  bool OffsetIsInt16Encodable() const {
398  return is_int16(offset_);
399  }
400 
401  private:
402  int32_t offset_;
403 
404  friend class Assembler;
405 };
406 
407 
408 // CpuFeatures keeps track of which features are supported by the target CPU.
409 // Supported features must be enabled by a Scope before use.
410 class CpuFeatures : public AllStatic {
411  public:
412  // Detect features of the target CPU. Set safe defaults if the serializer
413  // is enabled (snapshots must be portable).
414  static void Probe();
415 
416  // Check whether a feature is supported by the target CPU.
417  static bool IsSupported(CpuFeature f) {
418  ASSERT(initialized_);
419  if (f == FPU && !FLAG_enable_fpu) return false;
420  return (supported_ & (1u << f)) != 0;
421  }
422 
423 
424 #ifdef DEBUG
425  // Check whether a feature is currently enabled.
426  static bool IsEnabled(CpuFeature f) {
427  ASSERT(initialized_);
428  Isolate* isolate = Isolate::UncheckedCurrent();
429  if (isolate == NULL) {
430  // When no isolate is available, work as if we're running in
431  // release mode.
432  return IsSupported(f);
433  }
434  unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
435  return (enabled & (1u << f)) != 0;
436  }
437 #endif
438 
439  // Enable a specified feature within a scope.
440  class Scope BASE_EMBEDDED {
441 #ifdef DEBUG
442 
443  public:
444  explicit Scope(CpuFeature f) {
445  unsigned mask = 1u << f;
448  (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
449  isolate_ = Isolate::UncheckedCurrent();
450  old_enabled_ = 0;
451  if (isolate_ != NULL) {
452  old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
453  isolate_->set_enabled_cpu_features(old_enabled_ | mask);
454  }
455  }
456  ~Scope() {
457  ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
458  if (isolate_ != NULL) {
459  isolate_->set_enabled_cpu_features(old_enabled_);
460  }
461  }
462 
463  private:
464  Isolate* isolate_;
465  unsigned old_enabled_;
466 #else
467 
468  public:
469  explicit Scope(CpuFeature f) {}
470 #endif
471  };
472 
473  class TryForceFeatureScope BASE_EMBEDDED {
474  public:
476  : old_supported_(CpuFeatures::supported_) {
477  if (CanForce()) {
478  CpuFeatures::supported_ |= (1u << f);
479  }
480  }
481 
483  if (CanForce()) {
484  CpuFeatures::supported_ = old_supported_;
485  }
486  }
487 
488  private:
489  static bool CanForce() {
490  // It's only safe to temporarily force support of CPU features
491  // when there's only a single isolate, which is guaranteed when
492  // the serializer is enabled.
493  return Serializer::enabled();
494  }
495 
496  const unsigned old_supported_;
497  };
498 
499  private:
500 #ifdef DEBUG
501  static bool initialized_;
502 #endif
503  static unsigned supported_;
504  static unsigned found_by_runtime_probing_;
505 
506  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
507 };
508 
509 
510 class Assembler : public AssemblerBase {
511  public:
512  // Create an assembler. Instructions and relocation information are emitted
513  // into a buffer, with the instructions starting from the beginning and the
514  // relocation information starting from the end of the buffer. See CodeDesc
515  // for a detailed comment on the layout (globals.h).
516  //
517  // If the provided buffer is NULL, the assembler allocates and grows its own
518  // buffer, and buffer_size determines the initial buffer size. The buffer is
519  // owned by the assembler and deallocated upon destruction of the assembler.
520  //
521  // If the provided buffer is not NULL, the assembler uses the provided buffer
522  // for code generation and assumes its size to be buffer_size. If the buffer
523  // is too small, a fatal error occurs. No deallocation of the buffer is done
524  // upon destruction of the assembler.
525  Assembler(Isolate* isolate, void* buffer, int buffer_size);
526  ~Assembler();
527 
528  // Overrides the default provided by FLAG_debug_code.
529  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
530 
531  // Dummy for cross platform compatibility.
532  void set_predictable_code_size(bool value) { }
533 
534  // GetCode emits any pending (non-emitted) code and fills the descriptor
535  // desc. GetCode() is idempotent; it returns the same result if no other
536  // Assembler functions are invoked in between GetCode() calls.
537  void GetCode(CodeDesc* desc);
538 
539  // Label operations & relative jumps (PPUM Appendix D).
540  //
541  // Takes a branch opcode (cc) and a label (L) and generates
542  // either a backward branch or a forward branch and links it
543  // to the label fixup chain. Usage:
544  //
545  // Label L; // unbound label
546  // j(cc, &L); // forward branch to unbound label
547  // bind(&L); // bind label to the current pc
548  // j(cc, &L); // backward branch to bound label
549  // bind(&L); // illegal: a label may be bound only once
550  //
551  // Note: The same Label can be used for forward and backward branches
552  // but it may be bound only once.
553  void bind(Label* L); // Binds an unbound label L to current code position.
554  // Determines if Label is bound and near enough so that branch instruction
555  // can be used to reach it, instead of jump instruction.
556  bool is_near(Label* L);
557 
558  // Returns the branch offset to the given label from the current code
559  // position. Links the label to the current position if it is still unbound.
560  // Manages the jump elimination optimization if the second parameter is true.
561  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
562  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
563  int32_t o = branch_offset(L, jump_elimination_allowed);
564  ASSERT((o & 3) == 0); // Assert the offset is aligned.
565  return o >> 2;
566  }
567  uint32_t jump_address(Label* L);
568 
569  // Puts a labels target address at the given position.
570  // The high 8 bits are set to zero.
571  void label_at_put(Label* L, int at_offset);
572 
573  // Read/Modify the code target address in the branch/call instruction at pc.
575  static void set_target_address_at(Address pc, Address target);
576 
577  // Return the code target address at a call site from the return address
578  // of that call in the instruction stream.
580 
581  static void JumpLabelToJumpRegister(Address pc);
582 
583  static void QuietNaN(HeapObject* nan);
584 
585  // This sets the branch destination (which gets loaded at the call address).
586  // This is for calls and branches within generated code. The serializer
587  // has already deserialized the lui/ori instructions etc.
589  Address instruction_payload, Address target) {
591  instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
592  target);
593  }
594 
595  // This sets the branch destination.
596  // This is for calls and branches to runtime code.
597  inline static void set_external_target_at(Address instruction_payload,
598  Address target) {
599  set_target_address_at(instruction_payload, target);
600  }
601 
602  // Size of an instruction.
603  static const int kInstrSize = sizeof(Instr);
604 
605  // Difference between address of current opcode and target address offset.
606  static const int kBranchPCOffset = 4;
607 
608  // Here we are patching the address in the LUI/ORI instruction pair.
609  // These values are used in the serialization process and must be zero for
610  // MIPS platform, as Code, Embedded Object or External-reference pointers
611  // are split across two consecutive instructions and don't exist separately
612  // in the code, so the serializer should not step forwards in memory after
613  // a target is resolved and written.
614  static const int kSpecialTargetSize = 0;
615 
616  // Number of consecutive instructions used to store 32bit constant.
617  // Before jump-optimizations, this constant was used in
618  // RelocInfo::target_address_address() function to tell serializer address of
619  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
620  // optimization, where jump-through-register instruction that usually
621  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
622  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
623  static const int kInstructionsFor32BitConstant = 3;
624 
625  // Distance between the instruction referring to the address of the call
626  // target and the return address.
627  static const int kCallTargetAddressOffset = 4 * kInstrSize;
628 
629  // Distance between start of patched return sequence and the emitted address
630  // to jump to.
631  static const int kPatchReturnSequenceAddressOffset = 0;
632 
633  // Distance between start of patched debug break slot and the emitted address
634  // to jump to.
635  static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
636 
637  // Difference between address of current opcode and value read from pc
638  // register.
639  static const int kPcLoadDelta = 4;
640 
641  static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
642 
643  // Number of instructions used for the JS return sequence. The constant is
644  // used by the debugger to patch the JS return sequence.
645  static const int kJSReturnSequenceInstructions = 7;
646  static const int kDebugBreakSlotInstructions = 4;
647  static const int kDebugBreakSlotLength =
649 
650 
651  // ---------------------------------------------------------------------------
652  // Code generation.
653 
654  // Insert the smallest number of nop instructions
655  // possible to align the pc offset to a multiple
656  // of m. m must be a power of 2 (>= 4).
657  void Align(int m);
658  // Aligns code to something that's optimal for a jump target for the platform.
659  void CodeTargetAlign();
660 
661  // Different nop operations are used by the code generator to detect certain
662  // states of the generated code.
664  NON_MARKING_NOP = 0,
666  // IC markers.
670  // Helper values.
672  FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
673  };
674 
675  // Type == 0 is the default non-marking nop. For mips this is a
676  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
677  // marking, to avoid conflict with ssnop and ehb instructions.
678  void nop(unsigned int type = 0) {
679  ASSERT(type < 32);
680  Register nop_rt_reg = (type == 0) ? zero_reg : at;
681  sll(zero_reg, nop_rt_reg, type, true);
682  }
683 
684 
685  // --------Branch-and-jump-instructions----------
686  // We don't use likely variant of instructions.
687  void b(int16_t offset);
688  void b(Label* L) { b(branch_offset(L, false)>>2); }
689  void bal(int16_t offset);
690  void bal(Label* L) { bal(branch_offset(L, false)>>2); }
691 
692  void beq(Register rs, Register rt, int16_t offset);
693  void beq(Register rs, Register rt, Label* L) {
694  beq(rs, rt, branch_offset(L, false) >> 2);
695  }
696  void bgez(Register rs, int16_t offset);
697  void bgezal(Register rs, int16_t offset);
698  void bgtz(Register rs, int16_t offset);
699  void blez(Register rs, int16_t offset);
700  void bltz(Register rs, int16_t offset);
701  void bltzal(Register rs, int16_t offset);
702  void bne(Register rs, Register rt, int16_t offset);
703  void bne(Register rs, Register rt, Label* L) {
704  bne(rs, rt, branch_offset(L, false)>>2);
705  }
706 
707  // Never use the int16_t b(l)cond version with a branch offset
708  // instead of using the Label* version.
709 
710  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
711  void j(int32_t target);
712  void jal(int32_t target);
713  void jalr(Register rs, Register rd = ra);
714  void jr(Register target);
715  void j_or_jr(int32_t target, Register rs);
716  void jal_or_jalr(int32_t target, Register rs);
717 
718 
719  //-------Data-processing-instructions---------
720 
721  // Arithmetic.
722  void addu(Register rd, Register rs, Register rt);
723  void subu(Register rd, Register rs, Register rt);
724  void mult(Register rs, Register rt);
725  void multu(Register rs, Register rt);
726  void div(Register rs, Register rt);
727  void divu(Register rs, Register rt);
728  void mul(Register rd, Register rs, Register rt);
729 
730  void addiu(Register rd, Register rs, int32_t j);
731 
732  // Logical.
733  void and_(Register rd, Register rs, Register rt);
734  void or_(Register rd, Register rs, Register rt);
735  void xor_(Register rd, Register rs, Register rt);
736  void nor(Register rd, Register rs, Register rt);
737 
738  void andi(Register rd, Register rs, int32_t j);
739  void ori(Register rd, Register rs, int32_t j);
740  void xori(Register rd, Register rs, int32_t j);
741  void lui(Register rd, int32_t j);
742 
743  // Shifts.
744  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
745  // and may cause problems in normal code. coming_from_nop makes sure this
746  // doesn't happen.
747  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
748  void sllv(Register rd, Register rt, Register rs);
749  void srl(Register rd, Register rt, uint16_t sa);
750  void srlv(Register rd, Register rt, Register rs);
751  void sra(Register rt, Register rd, uint16_t sa);
752  void srav(Register rt, Register rd, Register rs);
753  void rotr(Register rd, Register rt, uint16_t sa);
754  void rotrv(Register rd, Register rt, Register rs);
755 
756 
757  //------------Memory-instructions-------------
758 
759  void lb(Register rd, const MemOperand& rs);
760  void lbu(Register rd, const MemOperand& rs);
761  void lh(Register rd, const MemOperand& rs);
762  void lhu(Register rd, const MemOperand& rs);
763  void lw(Register rd, const MemOperand& rs);
764  void lwl(Register rd, const MemOperand& rs);
765  void lwr(Register rd, const MemOperand& rs);
766  void sb(Register rd, const MemOperand& rs);
767  void sh(Register rd, const MemOperand& rs);
768  void sw(Register rd, const MemOperand& rs);
769  void swl(Register rd, const MemOperand& rs);
770  void swr(Register rd, const MemOperand& rs);
771 
772 
773  //-------------Misc-instructions--------------
774 
775  // Break / Trap instructions.
776  void break_(uint32_t code, bool break_as_stop = false);
777  void stop(const char* msg, uint32_t code = kMaxStopCode);
778  void tge(Register rs, Register rt, uint16_t code);
779  void tgeu(Register rs, Register rt, uint16_t code);
780  void tlt(Register rs, Register rt, uint16_t code);
781  void tltu(Register rs, Register rt, uint16_t code);
782  void teq(Register rs, Register rt, uint16_t code);
783  void tne(Register rs, Register rt, uint16_t code);
784 
785  // Move from HI/LO register.
786  void mfhi(Register rd);
787  void mflo(Register rd);
788 
789  // Set on less than.
790  void slt(Register rd, Register rs, Register rt);
791  void sltu(Register rd, Register rs, Register rt);
792  void slti(Register rd, Register rs, int32_t j);
793  void sltiu(Register rd, Register rs, int32_t j);
794 
795  // Conditional move.
796  void movz(Register rd, Register rs, Register rt);
797  void movn(Register rd, Register rs, Register rt);
798  void movt(Register rd, Register rs, uint16_t cc = 0);
799  void movf(Register rd, Register rs, uint16_t cc = 0);
800 
801  // Bit twiddling.
802  void clz(Register rd, Register rs);
803  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
804  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
805 
806  //--------Coprocessor-instructions----------------
807 
808  // Load, store, and move.
809  void lwc1(FPURegister fd, const MemOperand& src);
810  void ldc1(FPURegister fd, const MemOperand& src);
811 
812  void swc1(FPURegister fs, const MemOperand& dst);
813  void sdc1(FPURegister fs, const MemOperand& dst);
814 
815  void mtc1(Register rt, FPURegister fs);
816  void mfc1(Register rt, FPURegister fs);
817 
818  void ctc1(Register rt, FPUControlRegister fs);
819  void cfc1(Register rt, FPUControlRegister fs);
820 
821  // Arithmetic.
822  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
823  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
824  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
825  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
826  void abs_d(FPURegister fd, FPURegister fs);
827  void mov_d(FPURegister fd, FPURegister fs);
828  void neg_d(FPURegister fd, FPURegister fs);
829  void sqrt_d(FPURegister fd, FPURegister fs);
830 
831  // Conversion.
832  void cvt_w_s(FPURegister fd, FPURegister fs);
833  void cvt_w_d(FPURegister fd, FPURegister fs);
834  void trunc_w_s(FPURegister fd, FPURegister fs);
835  void trunc_w_d(FPURegister fd, FPURegister fs);
836  void round_w_s(FPURegister fd, FPURegister fs);
837  void round_w_d(FPURegister fd, FPURegister fs);
838  void floor_w_s(FPURegister fd, FPURegister fs);
839  void floor_w_d(FPURegister fd, FPURegister fs);
840  void ceil_w_s(FPURegister fd, FPURegister fs);
841  void ceil_w_d(FPURegister fd, FPURegister fs);
842 
843  void cvt_l_s(FPURegister fd, FPURegister fs);
844  void cvt_l_d(FPURegister fd, FPURegister fs);
845  void trunc_l_s(FPURegister fd, FPURegister fs);
846  void trunc_l_d(FPURegister fd, FPURegister fs);
847  void round_l_s(FPURegister fd, FPURegister fs);
848  void round_l_d(FPURegister fd, FPURegister fs);
849  void floor_l_s(FPURegister fd, FPURegister fs);
850  void floor_l_d(FPURegister fd, FPURegister fs);
851  void ceil_l_s(FPURegister fd, FPURegister fs);
852  void ceil_l_d(FPURegister fd, FPURegister fs);
853 
854  void cvt_s_w(FPURegister fd, FPURegister fs);
855  void cvt_s_l(FPURegister fd, FPURegister fs);
856  void cvt_s_d(FPURegister fd, FPURegister fs);
857 
858  void cvt_d_w(FPURegister fd, FPURegister fs);
859  void cvt_d_l(FPURegister fd, FPURegister fs);
860  void cvt_d_s(FPURegister fd, FPURegister fs);
861 
862  // Conditions and branches.
863  void c(FPUCondition cond, SecondaryField fmt,
864  FPURegister ft, FPURegister fs, uint16_t cc = 0);
865 
866  void bc1f(int16_t offset, uint16_t cc = 0);
867  void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
868  void bc1t(int16_t offset, uint16_t cc = 0);
869  void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
870  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
871 
872  // Check the code size generated from label to here.
873  int SizeOfCodeGeneratedSince(Label* label) {
874  return pc_offset() - label->pos();
875  }
876 
877  // Check the number of instructions generated from label to here.
878  int InstructionsGeneratedSince(Label* label) {
879  return SizeOfCodeGeneratedSince(label) / kInstrSize;
880  }
881 
882  // Class for scoping postponing the trampoline pool generation.
884  public:
885  explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
886  assem_->StartBlockTrampolinePool();
887  }
889  assem_->EndBlockTrampolinePool();
890  }
891 
892  private:
893  Assembler* assem_;
894 
895  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
896  };
897 
898  // Class for postponing the assembly buffer growth. Typically used for
899  // sequences of instructions that must be emitted as a unit, before
900  // buffer growth (and relocation) can occur.
901  // This blocking scope is not nestable.
903  public:
904  explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
905  assem_->StartBlockGrowBuffer();
906  }
908  assem_->EndBlockGrowBuffer();
909  }
910 
911  private:
912  Assembler* assem_;
913 
914  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
915  };
916 
917  // Debugging.
918 
919  // Mark address of the ExitJSFrame code.
920  void RecordJSReturn();
921 
922  // Mark address of a debug break slot.
923  void RecordDebugBreakSlot();
924 
925  // Record the AST id of the CallIC being compiled, so that it can be placed
926  // in the relocation information.
929  recorded_ast_id_ = ast_id;
930  }
931 
934  return recorded_ast_id_;
935  }
936 
938 
939  // Record a comment relocation entry that can be used by a disassembler.
940  // Use --code-comments to enable.
941  void RecordComment(const char* msg);
942 
943  static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
944 
945  // Writes a single byte or word of data in the code stream. Used for
946  // inline tables, e.g., jump-tables.
947  void db(uint8_t data);
948  void dd(uint32_t data);
949 
950  int32_t pc_offset() const { return pc_ - buffer_; }
951 
952  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
953 
954  // Postpone the generation of the trampoline pool for the specified number of
955  // instructions.
957 
958  // Check if there is less than kGap bytes available in the buffer.
959  // If this is the case, we need to grow the buffer before emitting
960  // an instruction or relocation information.
961  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
962 
963  // Get the number of bytes available in the buffer.
964  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
965 
966  // Read/patch instructions.
967  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
968  static void instr_at_put(byte* pc, Instr instr) {
969  *reinterpret_cast<Instr*>(pc) = instr;
970  }
971  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
972  void instr_at_put(int pos, Instr instr) {
973  *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
974  }
975 
976  // Check if an instruction is a branch of some kind.
977  static bool IsBranch(Instr instr);
978  static bool IsBeq(Instr instr);
979  static bool IsBne(Instr instr);
980 
981  static bool IsJump(Instr instr);
982  static bool IsJ(Instr instr);
983  static bool IsLui(Instr instr);
984  static bool IsOri(Instr instr);
985 
986  static bool IsJal(Instr instr);
987  static bool IsJr(Instr instr);
988  static bool IsJalr(Instr instr);
989 
990  static bool IsNop(Instr instr, unsigned int type);
991  static bool IsPop(Instr instr);
992  static bool IsPush(Instr instr);
993  static bool IsLwRegFpOffset(Instr instr);
994  static bool IsSwRegFpOffset(Instr instr);
995  static bool IsLwRegFpNegOffset(Instr instr);
996  static bool IsSwRegFpNegOffset(Instr instr);
997 
998  static Register GetRtReg(Instr instr);
999  static Register GetRsReg(Instr instr);
1000  static Register GetRdReg(Instr instr);
1001 
1002  static uint32_t GetRt(Instr instr);
1003  static uint32_t GetRtField(Instr instr);
1004  static uint32_t GetRs(Instr instr);
1005  static uint32_t GetRsField(Instr instr);
1006  static uint32_t GetRd(Instr instr);
1007  static uint32_t GetRdField(Instr instr);
1008  static uint32_t GetSa(Instr instr);
1009  static uint32_t GetSaField(Instr instr);
1010  static uint32_t GetOpcodeField(Instr instr);
1011  static uint32_t GetFunction(Instr instr);
1012  static uint32_t GetFunctionField(Instr instr);
1013  static uint32_t GetImmediate16(Instr instr);
1014  static uint32_t GetLabelConst(Instr instr);
1015 
1016  static int32_t GetBranchOffset(Instr instr);
1017  static bool IsLw(Instr instr);
1018  static int16_t GetLwOffset(Instr instr);
1019  static Instr SetLwOffset(Instr instr, int16_t offset);
1020 
1021  static bool IsSw(Instr instr);
1022  static Instr SetSwOffset(Instr instr, int16_t offset);
1023  static bool IsAddImmediate(Instr instr);
1024  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1025 
1026  static bool IsAndImmediate(Instr instr);
1027 
1028  void CheckTrampolinePool();
1029 
1030  protected:
1031  // Relocation for a type-recording IC has the AST id added to it. This
1032  // member variable is a way to pass the information from the call site to
1033  // the relocation info.
1035 
1036  bool emit_debug_code() const { return emit_debug_code_; }
1037 
1038  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1039 
1040  // Decode branch instruction at pos and return branch target pos.
1041  int target_at(int32_t pos);
1042 
1043  // Patch branch instruction at pos to branch to given branch target pos.
1044  void target_at_put(int32_t pos, int32_t target_pos);
1045 
1046  // Say if we need to relocate with this mode.
1047  bool MustUseReg(RelocInfo::Mode rmode);
1048 
1049  // Record reloc info for current pc_.
1050  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1051 
1052  // Block the emission of the trampoline pool before pc_offset.
1054  if (no_trampoline_pool_before_ < pc_offset)
1055  no_trampoline_pool_before_ = pc_offset;
1056  }
1057 
1059  trampoline_pool_blocked_nesting_++;
1060  }
1061 
1063  trampoline_pool_blocked_nesting_--;
1064  }
1065 
1067  return trampoline_pool_blocked_nesting_ > 0;
1068  }
1069 
1070  bool has_exception() const {
1071  return internal_trampoline_exception_;
1072  }
1073 
1074  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
1075 
1076  bool is_trampoline_emitted() const {
1077  return trampoline_emitted_;
1078  }
1079 
1080  // Temporarily block automatic assembly buffer growth.
1082  ASSERT(!block_buffer_growth_);
1083  block_buffer_growth_ = true;
1084  }
1085 
1087  ASSERT(block_buffer_growth_);
1088  block_buffer_growth_ = false;
1089  }
1090 
1092  return block_buffer_growth_;
1093  }
1094 
1095  private:
1096  // Code buffer:
1097  // The buffer into which code and relocation info are generated.
1098  byte* buffer_;
1099  int buffer_size_;
1100  // True if the assembler owns the buffer, false if buffer is external.
1101  bool own_buffer_;
1102 
1103  // Buffer size and constant pool distance are checked together at regular
1104  // intervals of kBufferCheckInterval emitted bytes.
1105  static const int kBufferCheckInterval = 1*KB/2;
1106 
1107  // Code generation.
1108  // The relocation writer's position is at least kGap bytes below the end of
1109  // the generated instructions. This is so that multi-instruction sequences do
1110  // not have to check for overflow. The same is true for writes of large
1111  // relocation info entries.
1112  static const int kGap = 32;
1113  byte* pc_; // The program counter - moves forward.
1114 
1115 
1116  // Repeated checking whether the trampoline pool should be emitted is rather
1117  // expensive. By default we only check again once a number of instructions
1118  // has been generated.
1119  static const int kCheckConstIntervalInst = 32;
1120  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
1121 
1122  int next_buffer_check_; // pc offset of next buffer check.
1123 
1124  // Emission of the trampoline pool may be blocked in some code sequences.
1125  int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
1126  int no_trampoline_pool_before_; // Block emission before this pc offset.
1127 
1128  // Keep track of the last emitted pool to guarantee a maximal distance.
1129  int last_trampoline_pool_end_; // pc offset of the end of the last pool.
1130 
1131  // Automatic growth of the assembly buffer may be blocked for some sequences.
1132  bool block_buffer_growth_; // Block growth when true.
1133 
1134  // Relocation information generation.
1135  // Each relocation is encoded as a variable size value.
1136  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1137  RelocInfoWriter reloc_info_writer;
1138 
1139  // The bound position, before this we cannot do instruction elimination.
1140  int last_bound_pos_;
1141 
1142  // Code emission.
1143  inline void CheckBuffer();
1144  void GrowBuffer();
1145  inline void emit(Instr x);
1146  inline void CheckTrampolinePoolQuick();
1147 
1148  // Instruction generation.
1149  // We have 3 different kind of encoding layout on MIPS.
1150  // However due to many different types of objects encoded in the same fields
1151  // we have quite a few aliases for each mode.
1152  // Using the same structure to refer to Register and FPURegister would spare a
1153  // few aliases, but mixing both does not look clean to me.
1154  // Anyway we could surely implement this differently.
1155 
1156  void GenInstrRegister(Opcode opcode,
1157  Register rs,
1158  Register rt,
1159  Register rd,
1160  uint16_t sa = 0,
1161  SecondaryField func = NULLSF);
1162 
1163  void GenInstrRegister(Opcode opcode,
1164  Register rs,
1165  Register rt,
1166  uint16_t msb,
1167  uint16_t lsb,
1168  SecondaryField func);
1169 
1170  void GenInstrRegister(Opcode opcode,
1171  SecondaryField fmt,
1172  FPURegister ft,
1173  FPURegister fs,
1174  FPURegister fd,
1175  SecondaryField func = NULLSF);
1176 
1177  void GenInstrRegister(Opcode opcode,
1178  SecondaryField fmt,
1179  Register rt,
1180  FPURegister fs,
1181  FPURegister fd,
1182  SecondaryField func = NULLSF);
1183 
1184  void GenInstrRegister(Opcode opcode,
1185  SecondaryField fmt,
1186  Register rt,
1187  FPUControlRegister fs,
1188  SecondaryField func = NULLSF);
1189 
1190 
1191  void GenInstrImmediate(Opcode opcode,
1192  Register rs,
1193  Register rt,
1194  int32_t j);
1195  void GenInstrImmediate(Opcode opcode,
1196  Register rs,
1197  SecondaryField SF,
1198  int32_t j);
1199  void GenInstrImmediate(Opcode opcode,
1200  Register r1,
1201  FPURegister r2,
1202  int32_t j);
1203 
1204 
1205  void GenInstrJump(Opcode opcode,
1206  uint32_t address);
1207 
1208  // Helpers.
1209  void LoadRegPlusOffsetToAt(const MemOperand& src);
1210 
1211  // Labels.
1212  void print(Label* L);
1213  void bind_to(Label* L, int pos);
1214  void next(Label* L);
1215 
1216  // One trampoline consists of:
1217  // - space for trampoline slots,
1218  // - space for labels.
1219  //
1220  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
1221  // Space for trampoline slots preceeds space for labels. Each label is of one
1222  // instruction size, so total amount for labels is equal to
1223  // label_count * kInstrSize.
1224  class Trampoline {
1225  public:
1226  Trampoline() {
1227  start_ = 0;
1228  next_slot_ = 0;
1229  free_slot_count_ = 0;
1230  end_ = 0;
1231  }
1232  Trampoline(int start, int slot_count) {
1233  start_ = start;
1234  next_slot_ = start;
1235  free_slot_count_ = slot_count;
1236  end_ = start + slot_count * kTrampolineSlotsSize;
1237  }
1238  int start() {
1239  return start_;
1240  }
1241  int end() {
1242  return end_;
1243  }
1244  int take_slot() {
1245  int trampoline_slot = kInvalidSlotPos;
1246  if (free_slot_count_ <= 0) {
1247  // We have run out of space on trampolines.
1248  // Make sure we fail in debug mode, so we become aware of each case
1249  // when this happens.
1250  ASSERT(0);
1251  // Internal exception will be caught.
1252  } else {
1253  trampoline_slot = next_slot_;
1254  free_slot_count_--;
1255  next_slot_ += kTrampolineSlotsSize;
1256  }
1257  return trampoline_slot;
1258  }
1259 
1260  private:
1261  int start_;
1262  int end_;
1263  int next_slot_;
1264  int free_slot_count_;
1265  };
1266 
1267  int32_t get_trampoline_entry(int32_t pos);
1268  int unbound_labels_count_;
1269  // If trampoline is emitted, generated code is becoming large. As this is
1270  // already a slow case which can possibly break our code generation for the
1271  // extreme case, we use this information to trigger different mode of
1272  // branch instruction generation, where we use jump instructions rather
1273  // than regular branch instructions.
1274  bool trampoline_emitted_;
1275  static const int kTrampolineSlotsSize = 4 * kInstrSize;
1276  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1277  static const int kInvalidSlotPos = -1;
1278 
1279  Trampoline trampoline_;
1280  bool internal_trampoline_exception_;
1281 
1283  friend class RelocInfo;
1284  friend class CodePatcher;
1286 
1287  PositionsRecorder positions_recorder_;
1288  bool emit_debug_code_;
1289  friend class PositionsRecorder;
1290  friend class EnsureSpace;
1291 };
1292 
1293 
1294 class EnsureSpace BASE_EMBEDDED {
1295  public:
1296  explicit EnsureSpace(Assembler* assembler) {
1297  assembler->CheckBuffer();
1298  }
1299 };
1300 
1301 } } // namespace v8::internal
1302 
1303 #endif // V8_ARM_ASSEMBLER_MIPS_H_
byte * Address
Definition: globals.h:157
const FPURegister f4
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
const SwVfpRegister s2
const FPURegister f20
const FPURegister f21
const FPURegister f28
int InstructionsGeneratedSince(Label *label)
static const int kBranchPCOffset
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void trunc_l_d(FPURegister fd, FPURegister fs)
static const int kDebugBreakSlotInstructions
void db(uint8_t data)
static uint32_t GetOpcodeField(Instr instr)
const FPURegister f5
const FPURegister f15
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
FPURegister low() const
const FPURegister f11
int32_t offset() const
void mtc1(Register rt, FPURegister fs)
static bool IsAddImmediate(Instr instr)
void bc1t(Label *L, uint16_t cc=0)
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
static int ToAllocationIndex(FPURegister reg)
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
const SwVfpRegister s7
REGISTER(no_reg,-1)
const int kNumRegisters
Definition: constants-arm.h:92
void neg_d(FPURegister fd, FPURegister fs)
const FPURegister f0
void blez(Register rs, int16_t offset)
void sw(Register rd, const MemOperand &rs)
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
const int KB
Definition: globals.h:207
static TypeFeedbackId None()
Definition: utils.h:999
void rotr(Register rd, Register rt, uint16_t sa)
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
const FPURegister f24
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
static uint32_t GetFunctionField(Instr instr)
const FPURegister f22
static const int kPatchDebugBreakSlotReturnOffset
int SizeOfCodeGeneratedSince(Label *label)
void mflo(Register rd)
const FPURegister f10
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
const FPURegister f18
void round_w_s(FPURegister fd, FPURegister fs)
const int kInvalidFPUControlRegister
void b(int branch_offset, Condition cond=al)
int32_t buffer_space() const
int int32_t
Definition: unicode.cc:47
void floor_l_s(FPURegister fd, FPURegister fs)
static bool IsSupported(CpuFeature f)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
void div(Register rs, Register rt)
static bool enabled()
Definition: serialize.h:481
bool IsNone() const
Definition: utils.h:1000
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
static Register FromAllocationIndex(int index)
bool is(FPURegister creg) const
const uint32_t kMaxStopCode
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:270
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
static const int kPatchReturnSequenceAddressOffset
void SetRecordedAstId(TypeFeedbackId ast_id)
unsigned short uint16_t
Definition: unicode.cc:46
const FPUControlRegister no_fpucreg
static uint32_t GetRdField(Instr instr)
static Instr instr_at(byte *pc)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
const FPURegister f3
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void set_predictable_code_size(bool value)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
static void instr_at_put(byte *pc, Instr instr)
const Register r2
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
const SwVfpRegister s6
bool is_buffer_growth_blocked() const
static const int kNumRegisters
Definition: assembler-arm.h:73
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
const int kNumFPURegisters
void cvt_w_d(FPURegister fd, FPURegister fs)
EnsureSpace(Assembler *assembler)
static void JumpLabelToJumpRegister(Address pc)
static const char * AllocationIndexToString(int index)
uint8_t byte
Definition: globals.h:156
void break_(uint32_t code, bool break_as_stop=false)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
const Register sp
const SwVfpRegister s3
void sh(Register rd, const MemOperand &rs)
static bool IsJr(Instr instr)
DwVfpRegister DoubleRegister
static bool IsOri(Instr instr)
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
static const int kNumReservedRegisters
FPURegister FloatRegister
void ceil_w_d(FPURegister fd, FPURegister fs)
void trunc_l_s(FPURegister fd, FPURegister fs)
FPURegister high() const
void trunc_w_s(FPURegister fd, FPURegister fs)
const FPURegister f16
void srlv(Register rd, Register rt, Register rs)
static const int kSpecialTargetSize
const FPURegister f9
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
const int kFCSRRegister
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void bal(int16_t offset)
void jal_or_jalr(int32_t target, Register rs)
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
int branch_offset(Label *L, bool jump_elimination_allowed)
int available_space() const
static void set_target_address_at(Address pc, Address target)
static bool IsPop(Instr instr)
static const int kInstructionsFor32BitConstant
void movt(Register reg, uint32_t immediate, Condition cond=al)
const FPURegister f29
static void set_external_target_at(Address instruction_payload, Address target)
void lui(Register rd, int32_t j)
const FPURegister f17
const FPURegister f23
const Register pc
bool is_near(Label *L)
static Register from_code(int code)
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
bool is(FPUControlRegister creg) const
void srl(Register rd, Register rt, uint16_t sa)
void set_emit_debug_code(bool value)
static Register GetRdReg(Instr instr)
int32_t pc_offset() const
void bc1f(Label *L, uint16_t cc=0)
void tlt(Register rs, Register rt, uint16_t code)
void slti(Register rd, Register rs, int32_t j)
const SwVfpRegister s0
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
bool is_trampoline_emitted() const
void jal(int32_t target)
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
const FPURegister f2
void floor_l_d(FPURegister fd, FPURegister fs)
void cfc1(Register rt, FPUControlRegister fs)
static const int kCallTargetAddressOffset
#define BASE_EMBEDDED
Definition: allocation.h:68
const SwVfpRegister s5
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
void beq(Register rs, Register rt, Label *L)
static const int kDebugBreakSlotLength
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static uint32_t GetLabelConst(Instr instr)
const SwVfpRegister s1
void ori(Register rd, Register rs, int32_t j)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
const FPURegister f19
void movz(Register rd, Register rs, Register rt)
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
bool is(Register reg) const
static Address target_address_at(Address pc)
static const int kNumRegisters
const Register r1
bool is_trampoline_pool_blocked() const
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
void fcmp(FPURegister src1, const double src2, FPUCondition cond)
const FPURegister f30
bool is_int16(int x)
Definition: assembler.h:837
static void deserialization_set_special_target_at(Address instruction_payload, Address target)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
static void QuietNaN(HeapObject *nan)
const FPUControlRegister FCSR
static const char * AllocationIndexToString(int index)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
const FPURegister no_freg
static Address target_address_from_return_address(Address pc)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
friend class PositionsRecorder
const FPURegister f27
static bool IsSwRegFpOffset(Instr instr)
static bool IsJump(Instr instr)
bool OffsetIsInt16Encodable() const
void mfhi(Register rd)
static const int kNumAllocatableRegisters
Definition: assembler-arm.h:74
void mfc1(Register rt, FPURegister fs)
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
bool MustUseReg(RelocInfo::Mode rmode)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const FPURegister f1
void jr(Register target)
const FPURegister f7
void trunc_w_d(FPURegister fd, FPURegister fs)
static const int kJSReturnSequenceInstructions
const SwVfpRegister s4
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
bool end_
void floor_w_d(FPURegister fd, FPURegister fs)
void bne(Register rs, Register rt, Label *L)
void sdc1(FPURegister fs, const MemOperand &dst)
const FPURegister f12
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
const FPURegister f6
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
static bool IsBeq(Instr instr)
static const int kInstrSize
MemOperand(Register rn, int32_t offset=0)
static const int kSizeInBytes
Definition: assembler-arm.h:75
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static int ToAllocationIndex(Register reg)
static const int kNumAllocatableRegisters
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
const Register fp
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
static FPURegister from_code(int code)
static FPURegister FromAllocationIndex(int index)
signed short int16_t
Definition: unicode.cc:45
void nop(unsigned int type=0)
int32_t shifted_branch_offset(Label *L, bool jump_elimination_allowed)
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
const FPURegister f14
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
static const int kPatchDebugBreakSlotAddressOffset
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
TypeFeedbackId recorded_ast_id_
bool emit_debug_code() const
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
const FPURegister f31
const FPURegister f25
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
const FPURegister f26
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLui(Instr instr)
const FPURegister f13
static bool IsAndImmediate(Instr instr)
const FPURegister f8
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
void movn(Register rd, Register rs, Register rt)
void sb(Register rd, const MemOperand &rs)