v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-mips.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 
36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
37 #define V8_MIPS_ASSEMBLER_MIPS_H_
38 
39 #include <stdio.h>
40 
41 #include "assembler.h"
42 #include "constants-mips.h"
43 #include "serialize.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 // CPU Registers.
49 //
50 // 1) We would prefer to use an enum, but enum values are assignment-
51 // compatible with int, which has caused code-generation bugs.
52 //
53 // 2) We would prefer to use a class instead of a struct but we don't like
54 // the register initialization to depend on the particular initialization
55 // order (which appears to be different on OS X, Linux, and Windows for the
56 // installed versions of C++ we tried). Using a struct permits C-style
57 // "initialization". Also, the Register objects cannot be const as this
58 // forces initialization stubs in MSVC, making us dependent on initialization
59 // order.
60 //
61 // 3) By not using an enum, we are possibly preventing the compiler from
62 // doing certain constant folds, which may significantly reduce the
63 // code generated for some assembly instructions (because they boil down
64 // to a few constants). If this is a problem, we could change the code
65 // such that we use an enum in optimized mode, and the struct in debug
66 // mode. This way we get the compile-time error checking in debug mode
67 // and best performance in optimized code.
68 
69 
70 // -----------------------------------------------------------------------------
71 // Implementation of Register and FPURegister.
72 
73 // Core register.
74 struct Register {
75  static const int kNumRegisters = v8::internal::kNumRegisters;
76  static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
77  static const int kSizeInBytes = 4;
78  static const int kCpRegister = 23; // cp (s7) is the 23rd register.
79 
80  inline static int NumAllocatableRegisters();
81 
82  static int ToAllocationIndex(Register reg) {
83  ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
84  reg.is(from_code(kCpRegister)));
85  return reg.is(from_code(kCpRegister)) ?
86  kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
87  reg.code() - 2; // zero_reg and 'at' are skipped.
88  }
89 
90  static Register FromAllocationIndex(int index) {
91  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
92  return index == kMaxNumAllocatableRegisters - 1 ?
93  from_code(kCpRegister) : // Last index is always the 'cp' register.
94  from_code(index + 2); // zero_reg and 'at' are skipped.
95  }
96 
97  static const char* AllocationIndexToString(int index) {
98  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
99  const char* const names[] = {
100  "v0",
101  "v1",
102  "a0",
103  "a1",
104  "a2",
105  "a3",
106  "t0",
107  "t1",
108  "t2",
109  "t3",
110  "t4",
111  "t5",
112  "t6",
113  "s7",
114  };
115  return names[index];
116  }
117 
118  static Register from_code(int code) {
119  Register r = { code };
120  return r;
121  }
122 
123  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
124  bool is(Register reg) const { return code_ == reg.code_; }
125  int code() const {
126  ASSERT(is_valid());
127  return code_;
128  }
129  int bit() const {
130  ASSERT(is_valid());
131  return 1 << code_;
132  }
133 
134  // Unfortunately we can't make this private in a struct.
135  int code_;
136 };
137 
138 #define REGISTER(N, C) \
139  const int kRegister_ ## N ## _Code = C; \
140  const Register N = { C }
141 
142 REGISTER(no_reg, -1);
143 // Always zero.
144 REGISTER(zero_reg, 0);
145 // at: Reserved for synthetic instructions.
146 REGISTER(at, 1);
147 // v0, v1: Used when returning multiple values from subroutines.
148 REGISTER(v0, 2);
149 REGISTER(v1, 3);
150 // a0 - a4: Used to pass non-FP parameters.
151 REGISTER(a0, 4);
152 REGISTER(a1, 5);
153 REGISTER(a2, 6);
154 REGISTER(a3, 7);
155 // t0 - t9: Can be used without reservation, act as temporary registers and are
156 // allowed to be destroyed by subroutines.
157 REGISTER(t0, 8);
158 REGISTER(t1, 9);
159 REGISTER(t2, 10);
160 REGISTER(t3, 11);
161 REGISTER(t4, 12);
162 REGISTER(t5, 13);
163 REGISTER(t6, 14);
164 REGISTER(t7, 15);
165 // s0 - s7: Subroutine register variables. Subroutines that write to these
166 // registers must restore their values before exiting so that the caller can
167 // expect the values to be preserved.
168 REGISTER(s0, 16);
169 REGISTER(s1, 17);
170 REGISTER(s2, 18);
171 REGISTER(s3, 19);
172 REGISTER(s4, 20);
173 REGISTER(s5, 21);
174 REGISTER(s6, 22);
175 REGISTER(s7, 23);
176 REGISTER(t8, 24);
177 REGISTER(t9, 25);
178 // k0, k1: Reserved for system calls and interrupt handlers.
179 REGISTER(k0, 26);
180 REGISTER(k1, 27);
181 // gp: Reserved.
182 REGISTER(gp, 28);
183 // sp: Stack pointer.
184 REGISTER(sp, 29);
185 // fp: Frame pointer.
186 REGISTER(fp, 30);
187 // ra: Return address pointer.
188 REGISTER(ra, 31);
189 
190 #undef REGISTER
191 
192 
193 int ToNumber(Register reg);
194 
195 Register ToRegister(int num);
196 
197 // Coprocessor register.
198 struct FPURegister {
200 
201  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
202  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
203  // number of Double regs (64-bit regs, or FPU-reg-pairs).
204 
205  // A few double registers are reserved: one as a scratch register and one to
206  // hold 0.0.
207  // f28: 0.0
208  // f30: scratch register.
209  static const int kNumReservedRegisters = 2;
212 
213  inline static int NumRegisters();
214  inline static int NumAllocatableRegisters();
215  inline static int ToAllocationIndex(FPURegister reg);
216  static const char* AllocationIndexToString(int index);
217 
218  static FPURegister FromAllocationIndex(int index) {
219  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
220  return from_code(index * 2);
221  }
222 
223  static FPURegister from_code(int code) {
224  FPURegister r = { code };
225  return r;
226  }
227 
228  bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
229  bool is(FPURegister creg) const { return code_ == creg.code_; }
230  FPURegister low() const {
231  // Find low reg of a Double-reg pair, which is the reg itself.
232  ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
233  FPURegister reg;
234  reg.code_ = code_;
235  ASSERT(reg.is_valid());
236  return reg;
237  }
238  FPURegister high() const {
239  // Find high reg of a Doubel-reg pair, which is reg + 1.
240  ASSERT(code_ % 2 == 0); // Specified Double reg must be even.
241  FPURegister reg;
242  reg.code_ = code_ + 1;
243  ASSERT(reg.is_valid());
244  return reg;
245  }
246 
247  int code() const {
248  ASSERT(is_valid());
249  return code_;
250  }
251  int bit() const {
252  ASSERT(is_valid());
253  return 1 << code_;
254  }
255  void setcode(int f) {
256  code_ = f;
257  ASSERT(is_valid());
258  }
259  // Unfortunately we can't make this private in a struct.
260  int code_;
261 };
262 
263 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32
264 // 32-bit registers, f0 through f31. When used as 'double' they are used
265 // in pairs, starting with the even numbered register. So a double operation
266 // on f0 really uses f0 and f1.
267 // (Modern mips hardware also supports 32 64-bit registers, via setting
268 // (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
269 // but it is not in common use. Someday we will want to support this in v8.)
270 
271 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
274 
275 const FPURegister no_freg = { -1 };
276 
277 const FPURegister f0 = { 0 }; // Return value in hard float mode.
278 const FPURegister f1 = { 1 };
279 const FPURegister f2 = { 2 };
280 const FPURegister f3 = { 3 };
281 const FPURegister f4 = { 4 };
282 const FPURegister f5 = { 5 };
283 const FPURegister f6 = { 6 };
284 const FPURegister f7 = { 7 };
285 const FPURegister f8 = { 8 };
286 const FPURegister f9 = { 9 };
287 const FPURegister f10 = { 10 };
288 const FPURegister f11 = { 11 };
289 const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
290 const FPURegister f13 = { 13 };
291 const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
292 const FPURegister f15 = { 15 };
293 const FPURegister f16 = { 16 };
294 const FPURegister f17 = { 17 };
295 const FPURegister f18 = { 18 };
296 const FPURegister f19 = { 19 };
297 const FPURegister f20 = { 20 };
298 const FPURegister f21 = { 21 };
299 const FPURegister f22 = { 22 };
300 const FPURegister f23 = { 23 };
301 const FPURegister f24 = { 24 };
302 const FPURegister f25 = { 25 };
303 const FPURegister f26 = { 26 };
304 const FPURegister f27 = { 27 };
305 const FPURegister f28 = { 28 };
306 const FPURegister f29 = { 29 };
307 const FPURegister f30 = { 30 };
308 const FPURegister f31 = { 31 };
309 
310 // Register aliases.
311 // cp is assumed to be a callee saved register.
312 // Defined using #define instead of "static const Register&" because Clang
313 // complains otherwise when a compilation unit that includes this header
314 // doesn't use the variables.
315 #define kRootRegister s6
316 #define cp s7
317 #define kLithiumScratchReg s3
318 #define kLithiumScratchReg2 s4
319 #define kLithiumScratchDouble f30
320 #define kDoubleRegZero f28
321 
322 // FPU (coprocessor 1) control registers.
323 // Currently only FCSR (#31) is implemented.
325  bool is_valid() const { return code_ == kFCSRRegister; }
326  bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
327  int code() const {
328  ASSERT(is_valid());
329  return code_;
330  }
331  int bit() const {
332  ASSERT(is_valid());
333  return 1 << code_;
334  }
335  void setcode(int f) {
336  code_ = f;
337  ASSERT(is_valid());
338  }
339  // Unfortunately we can't make this private in a struct.
340  int code_;
341 };
342 
345 
346 
347 // -----------------------------------------------------------------------------
348 // Machine instruction Operands.
349 
350 // Class Operand represents a shifter operand in data processing instructions.
351 class Operand BASE_EMBEDDED {
352  public:
353  // Immediate.
354  INLINE(explicit Operand(int32_t immediate,
355  RelocInfo::Mode rmode = RelocInfo::NONE32));
356  INLINE(explicit Operand(const ExternalReference& f));
357  INLINE(explicit Operand(const char* s));
358  INLINE(explicit Operand(Object** opp));
359  INLINE(explicit Operand(Context** cpp));
360  explicit Operand(Handle<Object> handle);
361  INLINE(explicit Operand(Smi* value));
362 
363  // Register.
364  INLINE(explicit Operand(Register rm));
365 
366  // Return true if this is a register operand.
367  INLINE(bool is_reg() const);
368 
369  inline int32_t immediate() const {
370  ASSERT(!is_reg());
371  return imm32_;
372  }
373 
374  Register rm() const { return rm_; }
375 
376  private:
377  Register rm_;
378  int32_t imm32_; // Valid if rm_ == no_reg.
379  RelocInfo::Mode rmode_;
380 
381  friend class Assembler;
382  friend class MacroAssembler;
383 };
384 
385 
386 // On MIPS we have only one adressing mode with base_reg + offset.
387 // Class MemOperand represents a memory operand in load and store instructions.
388 class MemOperand : public Operand {
389  public:
390  // Immediate value attached to offset.
394  };
395 
396  explicit MemOperand(Register rn, int32_t offset = 0);
397  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
398  OffsetAddend offset_addend = offset_zero);
399  int32_t offset() const { return offset_; }
400 
401  bool OffsetIsInt16Encodable() const {
402  return is_int16(offset_);
403  }
404 
405  private:
406  int32_t offset_;
407 
408  friend class Assembler;
409 };
410 
411 
412 // CpuFeatures keeps track of which features are supported by the target CPU.
413 // Supported features must be enabled by a CpuFeatureScope before use.
414 class CpuFeatures : public AllStatic {
415  public:
416  // Detect features of the target CPU. Set safe defaults if the serializer
417  // is enabled (snapshots must be portable).
418  static void Probe();
419 
420  // Check whether a feature is supported by the target CPU.
421  static bool IsSupported(CpuFeature f) {
422  ASSERT(initialized_);
423  return Check(f, supported_);
424  }
425 
427  ASSERT(initialized_);
428  return Check(f, found_by_runtime_probing_only_);
429  }
430 
431  static bool IsSafeForSnapshot(CpuFeature f) {
432  return Check(f, cross_compile_) ||
433  (IsSupported(f) &&
435  }
436 
437  static bool VerifyCrossCompiling() {
438  return cross_compile_ == 0;
439  }
440 
442  unsigned mask = flag2set(f);
443  return cross_compile_ == 0 ||
444  (cross_compile_ & mask) == mask;
445  }
446 
447  private:
448  static bool Check(CpuFeature f, unsigned set) {
449  return (set & flag2set(f)) != 0;
450  }
451 
452  static unsigned flag2set(CpuFeature f) {
453  return 1u << f;
454  }
455 
456 #ifdef DEBUG
457  static bool initialized_;
458 #endif
459  static unsigned supported_;
460  static unsigned found_by_runtime_probing_only_;
461 
462  static unsigned cross_compile_;
463 
464  friend class ExternalReference;
465  friend class PlatformFeatureScope;
466  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
467 };
468 
469 
470 class Assembler : public AssemblerBase {
471  public:
472  // Create an assembler. Instructions and relocation information are emitted
473  // into a buffer, with the instructions starting from the beginning and the
474  // relocation information starting from the end of the buffer. See CodeDesc
475  // for a detailed comment on the layout (globals.h).
476  //
477  // If the provided buffer is NULL, the assembler allocates and grows its own
478  // buffer, and buffer_size determines the initial buffer size. The buffer is
479  // owned by the assembler and deallocated upon destruction of the assembler.
480  //
481  // If the provided buffer is not NULL, the assembler uses the provided buffer
482  // for code generation and assumes its size to be buffer_size. If the buffer
483  // is too small, a fatal error occurs. No deallocation of the buffer is done
484  // upon destruction of the assembler.
485  Assembler(Isolate* isolate, void* buffer, int buffer_size);
486  virtual ~Assembler() { }
487 
488  // GetCode emits any pending (non-emitted) code and fills the descriptor
489  // desc. GetCode() is idempotent; it returns the same result if no other
490  // Assembler functions are invoked in between GetCode() calls.
491  void GetCode(CodeDesc* desc);
492 
493  // Label operations & relative jumps (PPUM Appendix D).
494  //
495  // Takes a branch opcode (cc) and a label (L) and generates
496  // either a backward branch or a forward branch and links it
497  // to the label fixup chain. Usage:
498  //
499  // Label L; // unbound label
500  // j(cc, &L); // forward branch to unbound label
501  // bind(&L); // bind label to the current pc
502  // j(cc, &L); // backward branch to bound label
503  // bind(&L); // illegal: a label may be bound only once
504  //
505  // Note: The same Label can be used for forward and backward branches
506  // but it may be bound only once.
507  void bind(Label* L); // Binds an unbound label L to current code position.
508  // Determines if Label is bound and near enough so that branch instruction
509  // can be used to reach it, instead of jump instruction.
510  bool is_near(Label* L);
511 
512  // Returns the branch offset to the given label from the current code
513  // position. Links the label to the current position if it is still unbound.
514  // Manages the jump elimination optimization if the second parameter is true.
515  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
516  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
517  int32_t o = branch_offset(L, jump_elimination_allowed);
518  ASSERT((o & 3) == 0); // Assert the offset is aligned.
519  return o >> 2;
520  }
521  uint32_t jump_address(Label* L);
522 
523  // Puts a labels target address at the given position.
524  // The high 8 bits are set to zero.
525  void label_at_put(Label* L, int at_offset);
526 
527  // Read/Modify the code target address in the branch/call instruction at pc.
529  static void set_target_address_at(Address pc, Address target);
530  // On MIPS there is no Constant Pool so we skip that parameter.
532  ConstantPoolArray* constant_pool)) {
533  return target_address_at(pc);
534  }
536  ConstantPoolArray* constant_pool,
537  Address target)) {
538  set_target_address_at(pc, target);
539  }
541  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
542  return target_address_at(pc, constant_pool);
543  }
545  Code* code,
546  Address target)) {
547  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
548  set_target_address_at(pc, constant_pool, target);
549  }
550 
551  // Return the code target address at a call site from the return address
552  // of that call in the instruction stream.
554 
555  static void JumpLabelToJumpRegister(Address pc);
556 
557  static void QuietNaN(HeapObject* nan);
558 
559  // This sets the branch destination (which gets loaded at the call address).
560  // This is for calls and branches within generated code. The serializer
561  // has already deserialized the lui/ori instructions etc.
563  Address instruction_payload, Code* code, Address target) {
565  instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
566  code,
567  target);
568  }
569 
570  // Size of an instruction.
571  static const int kInstrSize = sizeof(Instr);
572 
573  // Difference between address of current opcode and target address offset.
574  static const int kBranchPCOffset = 4;
575 
576  // Here we are patching the address in the LUI/ORI instruction pair.
577  // These values are used in the serialization process and must be zero for
578  // MIPS platform, as Code, Embedded Object or External-reference pointers
579  // are split across two consecutive instructions and don't exist separately
580  // in the code, so the serializer should not step forwards in memory after
581  // a target is resolved and written.
582  static const int kSpecialTargetSize = 0;
583 
584  // Number of consecutive instructions used to store 32bit constant.
585  // Before jump-optimizations, this constant was used in
586  // RelocInfo::target_address_address() function to tell serializer address of
587  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
588  // optimization, where jump-through-register instruction that usually
589  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
590  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
591  static const int kInstructionsFor32BitConstant = 3;
592 
593  // Distance between the instruction referring to the address of the call
594  // target and the return address.
595  static const int kCallTargetAddressOffset = 4 * kInstrSize;
596 
597  // Distance between start of patched return sequence and the emitted address
598  // to jump to.
599  static const int kPatchReturnSequenceAddressOffset = 0;
600 
601  // Distance between start of patched debug break slot and the emitted address
602  // to jump to.
603  static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
604 
605  // Difference between address of current opcode and value read from pc
606  // register.
607  static const int kPcLoadDelta = 4;
608 
609  static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
610 
611  // Number of instructions used for the JS return sequence. The constant is
612  // used by the debugger to patch the JS return sequence.
613  static const int kJSReturnSequenceInstructions = 7;
614  static const int kDebugBreakSlotInstructions = 4;
615  static const int kDebugBreakSlotLength =
617 
618 
619  // ---------------------------------------------------------------------------
620  // Code generation.
621 
622  // Insert the smallest number of nop instructions
623  // possible to align the pc offset to a multiple
624  // of m. m must be a power of 2 (>= 4).
625  void Align(int m);
626  // Aligns code to something that's optimal for a jump target for the platform.
627  void CodeTargetAlign();
628 
629  // Different nop operations are used by the code generator to detect certain
630  // states of the generated code.
632  NON_MARKING_NOP = 0,
634  // IC markers.
638  // Helper values.
641  // Code aging
644  };
645 
646  // Type == 0 is the default non-marking nop. For mips this is a
647  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
648  // marking, to avoid conflict with ssnop and ehb instructions.
649  void nop(unsigned int type = 0) {
650  ASSERT(type < 32);
651  Register nop_rt_reg = (type == 0) ? zero_reg : at;
652  sll(zero_reg, nop_rt_reg, type, true);
653  }
654 
655 
656  // --------Branch-and-jump-instructions----------
657  // We don't use likely variant of instructions.
658  void b(int16_t offset);
659  void b(Label* L) { b(branch_offset(L, false)>>2); }
660  void bal(int16_t offset);
661  void bal(Label* L) { bal(branch_offset(L, false)>>2); }
662 
663  void beq(Register rs, Register rt, int16_t offset);
664  void beq(Register rs, Register rt, Label* L) {
665  beq(rs, rt, branch_offset(L, false) >> 2);
666  }
667  void bgez(Register rs, int16_t offset);
668  void bgezal(Register rs, int16_t offset);
669  void bgtz(Register rs, int16_t offset);
670  void blez(Register rs, int16_t offset);
671  void bltz(Register rs, int16_t offset);
672  void bltzal(Register rs, int16_t offset);
673  void bne(Register rs, Register rt, int16_t offset);
674  void bne(Register rs, Register rt, Label* L) {
675  bne(rs, rt, branch_offset(L, false)>>2);
676  }
677 
678  // Never use the int16_t b(l)cond version with a branch offset
679  // instead of using the Label* version.
680 
681  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
682  void j(int32_t target);
683  void jal(int32_t target);
684  void jalr(Register rs, Register rd = ra);
685  void jr(Register target);
686  void j_or_jr(int32_t target, Register rs);
687  void jal_or_jalr(int32_t target, Register rs);
688 
689 
690  //-------Data-processing-instructions---------
691 
692  // Arithmetic.
693  void addu(Register rd, Register rs, Register rt);
694  void subu(Register rd, Register rs, Register rt);
695  void mult(Register rs, Register rt);
696  void multu(Register rs, Register rt);
697  void div(Register rs, Register rt);
698  void divu(Register rs, Register rt);
699  void mul(Register rd, Register rs, Register rt);
700 
701  void addiu(Register rd, Register rs, int32_t j);
702 
703  // Logical.
704  void and_(Register rd, Register rs, Register rt);
705  void or_(Register rd, Register rs, Register rt);
706  void xor_(Register rd, Register rs, Register rt);
707  void nor(Register rd, Register rs, Register rt);
708 
709  void andi(Register rd, Register rs, int32_t j);
710  void ori(Register rd, Register rs, int32_t j);
711  void xori(Register rd, Register rs, int32_t j);
712  void lui(Register rd, int32_t j);
713 
714  // Shifts.
715  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
716  // and may cause problems in normal code. coming_from_nop makes sure this
717  // doesn't happen.
718  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
719  void sllv(Register rd, Register rt, Register rs);
720  void srl(Register rd, Register rt, uint16_t sa);
721  void srlv(Register rd, Register rt, Register rs);
722  void sra(Register rt, Register rd, uint16_t sa);
723  void srav(Register rt, Register rd, Register rs);
724  void rotr(Register rd, Register rt, uint16_t sa);
725  void rotrv(Register rd, Register rt, Register rs);
726 
727 
728  //------------Memory-instructions-------------
729 
730  void lb(Register rd, const MemOperand& rs);
731  void lbu(Register rd, const MemOperand& rs);
732  void lh(Register rd, const MemOperand& rs);
733  void lhu(Register rd, const MemOperand& rs);
734  void lw(Register rd, const MemOperand& rs);
735  void lwl(Register rd, const MemOperand& rs);
736  void lwr(Register rd, const MemOperand& rs);
737  void sb(Register rd, const MemOperand& rs);
738  void sh(Register rd, const MemOperand& rs);
739  void sw(Register rd, const MemOperand& rs);
740  void swl(Register rd, const MemOperand& rs);
741  void swr(Register rd, const MemOperand& rs);
742 
743 
744  //----------------Prefetch--------------------
745 
746  void pref(int32_t hint, const MemOperand& rs);
747 
748 
749  //-------------Misc-instructions--------------
750 
751  // Break / Trap instructions.
752  void break_(uint32_t code, bool break_as_stop = false);
753  void stop(const char* msg, uint32_t code = kMaxStopCode);
754  void tge(Register rs, Register rt, uint16_t code);
755  void tgeu(Register rs, Register rt, uint16_t code);
756  void tlt(Register rs, Register rt, uint16_t code);
757  void tltu(Register rs, Register rt, uint16_t code);
758  void teq(Register rs, Register rt, uint16_t code);
759  void tne(Register rs, Register rt, uint16_t code);
760 
761  // Move from HI/LO register.
762  void mfhi(Register rd);
763  void mflo(Register rd);
764 
765  // Set on less than.
766  void slt(Register rd, Register rs, Register rt);
767  void sltu(Register rd, Register rs, Register rt);
768  void slti(Register rd, Register rs, int32_t j);
769  void sltiu(Register rd, Register rs, int32_t j);
770 
771  // Conditional move.
772  void movz(Register rd, Register rs, Register rt);
773  void movn(Register rd, Register rs, Register rt);
774  void movt(Register rd, Register rs, uint16_t cc = 0);
775  void movf(Register rd, Register rs, uint16_t cc = 0);
776 
777  // Bit twiddling.
778  void clz(Register rd, Register rs);
779  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
780  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
781 
782  //--------Coprocessor-instructions----------------
783 
784  // Load, store, and move.
785  void lwc1(FPURegister fd, const MemOperand& src);
786  void ldc1(FPURegister fd, const MemOperand& src);
787 
788  void swc1(FPURegister fs, const MemOperand& dst);
789  void sdc1(FPURegister fs, const MemOperand& dst);
790 
791  void mtc1(Register rt, FPURegister fs);
792  void mfc1(Register rt, FPURegister fs);
793 
794  void ctc1(Register rt, FPUControlRegister fs);
795  void cfc1(Register rt, FPUControlRegister fs);
796 
797  // Arithmetic.
798  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
799  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
800  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
802  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
803  void abs_d(FPURegister fd, FPURegister fs);
804  void mov_d(FPURegister fd, FPURegister fs);
805  void neg_d(FPURegister fd, FPURegister fs);
806  void sqrt_d(FPURegister fd, FPURegister fs);
807 
808  // Conversion.
809  void cvt_w_s(FPURegister fd, FPURegister fs);
810  void cvt_w_d(FPURegister fd, FPURegister fs);
811  void trunc_w_s(FPURegister fd, FPURegister fs);
812  void trunc_w_d(FPURegister fd, FPURegister fs);
813  void round_w_s(FPURegister fd, FPURegister fs);
814  void round_w_d(FPURegister fd, FPURegister fs);
815  void floor_w_s(FPURegister fd, FPURegister fs);
816  void floor_w_d(FPURegister fd, FPURegister fs);
817  void ceil_w_s(FPURegister fd, FPURegister fs);
818  void ceil_w_d(FPURegister fd, FPURegister fs);
819 
820  void cvt_l_s(FPURegister fd, FPURegister fs);
821  void cvt_l_d(FPURegister fd, FPURegister fs);
822  void trunc_l_s(FPURegister fd, FPURegister fs);
823  void trunc_l_d(FPURegister fd, FPURegister fs);
824  void round_l_s(FPURegister fd, FPURegister fs);
825  void round_l_d(FPURegister fd, FPURegister fs);
826  void floor_l_s(FPURegister fd, FPURegister fs);
827  void floor_l_d(FPURegister fd, FPURegister fs);
828  void ceil_l_s(FPURegister fd, FPURegister fs);
829  void ceil_l_d(FPURegister fd, FPURegister fs);
830 
831  void cvt_s_w(FPURegister fd, FPURegister fs);
832  void cvt_s_l(FPURegister fd, FPURegister fs);
833  void cvt_s_d(FPURegister fd, FPURegister fs);
834 
835  void cvt_d_w(FPURegister fd, FPURegister fs);
836  void cvt_d_l(FPURegister fd, FPURegister fs);
837  void cvt_d_s(FPURegister fd, FPURegister fs);
838 
839  // Conditions and branches.
840  void c(FPUCondition cond, SecondaryField fmt,
841  FPURegister ft, FPURegister fs, uint16_t cc = 0);
842 
843  void bc1f(int16_t offset, uint16_t cc = 0);
844  void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
845  void bc1t(int16_t offset, uint16_t cc = 0);
846  void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
847  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
848 
849  // Check the code size generated from label to here.
850  int SizeOfCodeGeneratedSince(Label* label) {
851  return pc_offset() - label->pos();
852  }
853 
854  // Check the number of instructions generated from label to here.
855  int InstructionsGeneratedSince(Label* label) {
856  return SizeOfCodeGeneratedSince(label) / kInstrSize;
857  }
858 
859  // Class for scoping postponing the trampoline pool generation.
861  public:
862  explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
863  assem_->StartBlockTrampolinePool();
864  }
866  assem_->EndBlockTrampolinePool();
867  }
868 
869  private:
870  Assembler* assem_;
871 
872  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
873  };
874 
875  // Class for postponing the assembly buffer growth. Typically used for
876  // sequences of instructions that must be emitted as a unit, before
877  // buffer growth (and relocation) can occur.
878  // This blocking scope is not nestable.
880  public:
881  explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
882  assem_->StartBlockGrowBuffer();
883  }
885  assem_->EndBlockGrowBuffer();
886  }
887 
888  private:
889  Assembler* assem_;
890 
891  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
892  };
893 
894  // Debugging.
895 
896  // Mark address of the ExitJSFrame code.
897  void RecordJSReturn();
898 
899  // Mark address of a debug break slot.
900  void RecordDebugBreakSlot();
901 
902  // Record the AST id of the CallIC being compiled, so that it can be placed
903  // in the relocation information.
906  recorded_ast_id_ = ast_id;
907  }
908 
911  return recorded_ast_id_;
912  }
913 
915 
916  // Record a comment relocation entry that can be used by a disassembler.
917  // Use --code-comments to enable.
918  void RecordComment(const char* msg);
919 
920  static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
921 
922  // Writes a single byte or word of data in the code stream. Used for
923  // inline tables, e.g., jump-tables.
924  void db(uint8_t data);
925  void dd(uint32_t data);
926 
927  // Emits the address of the code stub's first instruction.
928  void emit_code_stub_address(Code* stub);
929 
930  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
931 
932  // Postpone the generation of the trampoline pool for the specified number of
933  // instructions.
934  void BlockTrampolinePoolFor(int instructions);
935 
936  // Check if there is less than kGap bytes available in the buffer.
937  // If this is the case, we need to grow the buffer before emitting
938  // an instruction or relocation information.
939  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
940 
941  // Get the number of bytes available in the buffer.
942  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
943 
944  // Read/patch instructions.
945  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
946  static void instr_at_put(byte* pc, Instr instr) {
947  *reinterpret_cast<Instr*>(pc) = instr;
948  }
949  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
950  void instr_at_put(int pos, Instr instr) {
951  *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
952  }
953 
954  // Check if an instruction is a branch of some kind.
955  static bool IsBranch(Instr instr);
956  static bool IsBeq(Instr instr);
957  static bool IsBne(Instr instr);
958 
959  static bool IsJump(Instr instr);
960  static bool IsJ(Instr instr);
961  static bool IsLui(Instr instr);
962  static bool IsOri(Instr instr);
963 
964  static bool IsJal(Instr instr);
965  static bool IsJr(Instr instr);
966  static bool IsJalr(Instr instr);
967 
968  static bool IsNop(Instr instr, unsigned int type);
969  static bool IsPop(Instr instr);
970  static bool IsPush(Instr instr);
971  static bool IsLwRegFpOffset(Instr instr);
972  static bool IsSwRegFpOffset(Instr instr);
973  static bool IsLwRegFpNegOffset(Instr instr);
974  static bool IsSwRegFpNegOffset(Instr instr);
975 
976  static Register GetRtReg(Instr instr);
977  static Register GetRsReg(Instr instr);
978  static Register GetRdReg(Instr instr);
979 
980  static uint32_t GetRt(Instr instr);
981  static uint32_t GetRtField(Instr instr);
982  static uint32_t GetRs(Instr instr);
983  static uint32_t GetRsField(Instr instr);
984  static uint32_t GetRd(Instr instr);
985  static uint32_t GetRdField(Instr instr);
986  static uint32_t GetSa(Instr instr);
987  static uint32_t GetSaField(Instr instr);
988  static uint32_t GetOpcodeField(Instr instr);
989  static uint32_t GetFunction(Instr instr);
990  static uint32_t GetFunctionField(Instr instr);
991  static uint32_t GetImmediate16(Instr instr);
992  static uint32_t GetLabelConst(Instr instr);
993 
994  static int32_t GetBranchOffset(Instr instr);
995  static bool IsLw(Instr instr);
996  static int16_t GetLwOffset(Instr instr);
997  static Instr SetLwOffset(Instr instr, int16_t offset);
998 
999  static bool IsSw(Instr instr);
1000  static Instr SetSwOffset(Instr instr, int16_t offset);
1001  static bool IsAddImmediate(Instr instr);
1002  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1003 
1004  static bool IsAndImmediate(Instr instr);
1005  static bool IsEmittedConstant(Instr instr);
1006 
1007  void CheckTrampolinePool();
1008 
1009  // Allocate a constant pool of the correct size for the generated code.
1010  MaybeObject* AllocateConstantPool(Heap* heap);
1011 
1012  // Generate the constant pool for the generated code.
1013  void PopulateConstantPool(ConstantPoolArray* constant_pool);
1014 
1015  protected:
1016  // Relocation for a type-recording IC has the AST id added to it. This
1017  // member variable is a way to pass the information from the call site to
1018  // the relocation info.
1020 
1021  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1022 
1023  // Decode branch instruction at pos and return branch target pos.
1024  int target_at(int32_t pos);
1025 
1026  // Patch branch instruction at pos to branch to given branch target pos.
1027  void target_at_put(int32_t pos, int32_t target_pos);
1028 
1029  // Say if we need to relocate with this mode.
1030  bool MustUseReg(RelocInfo::Mode rmode);
1031 
1032  // Record reloc info for current pc_.
1033  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1034 
1035  // Block the emission of the trampoline pool before pc_offset.
1037  if (no_trampoline_pool_before_ < pc_offset)
1038  no_trampoline_pool_before_ = pc_offset;
1039  }
1040 
1042  trampoline_pool_blocked_nesting_++;
1043  }
1044 
1046  trampoline_pool_blocked_nesting_--;
1047  }
1048 
1050  return trampoline_pool_blocked_nesting_ > 0;
1051  }
1052 
1053  bool has_exception() const {
1054  return internal_trampoline_exception_;
1055  }
1056 
1057  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
1058 
1059  bool is_trampoline_emitted() const {
1060  return trampoline_emitted_;
1061  }
1062 
1063  // Temporarily block automatic assembly buffer growth.
1065  ASSERT(!block_buffer_growth_);
1066  block_buffer_growth_ = true;
1067  }
1068 
1070  ASSERT(block_buffer_growth_);
1071  block_buffer_growth_ = false;
1072  }
1073 
1075  return block_buffer_growth_;
1076  }
1077 
1078  private:
1079  // Buffer size and constant pool distance are checked together at regular
1080  // intervals of kBufferCheckInterval emitted bytes.
1081  static const int kBufferCheckInterval = 1*KB/2;
1082 
1083  // Code generation.
1084  // The relocation writer's position is at least kGap bytes below the end of
1085  // the generated instructions. This is so that multi-instruction sequences do
1086  // not have to check for overflow. The same is true for writes of large
1087  // relocation info entries.
1088  static const int kGap = 32;
1089 
1090 
1091  // Repeated checking whether the trampoline pool should be emitted is rather
1092  // expensive. By default we only check again once a number of instructions
1093  // has been generated.
1094  static const int kCheckConstIntervalInst = 32;
1095  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
1096 
1097  int next_buffer_check_; // pc offset of next buffer check.
1098 
1099  // Emission of the trampoline pool may be blocked in some code sequences.
1100  int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
1101  int no_trampoline_pool_before_; // Block emission before this pc offset.
1102 
1103  // Keep track of the last emitted pool to guarantee a maximal distance.
1104  int last_trampoline_pool_end_; // pc offset of the end of the last pool.
1105 
1106  // Automatic growth of the assembly buffer may be blocked for some sequences.
1107  bool block_buffer_growth_; // Block growth when true.
1108 
1109  // Relocation information generation.
1110  // Each relocation is encoded as a variable size value.
1111  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1112  RelocInfoWriter reloc_info_writer;
1113 
1114  // The bound position, before this we cannot do instruction elimination.
1115  int last_bound_pos_;
1116 
1117  // Code emission.
1118  inline void CheckBuffer();
1119  void GrowBuffer();
1120  inline void emit(Instr x);
1121  inline void CheckTrampolinePoolQuick();
1122 
1123  // Instruction generation.
1124  // We have 3 different kind of encoding layout on MIPS.
1125  // However due to many different types of objects encoded in the same fields
1126  // we have quite a few aliases for each mode.
1127  // Using the same structure to refer to Register and FPURegister would spare a
1128  // few aliases, but mixing both does not look clean to me.
1129  // Anyway we could surely implement this differently.
1130 
1131  void GenInstrRegister(Opcode opcode,
1132  Register rs,
1133  Register rt,
1134  Register rd,
1135  uint16_t sa = 0,
1136  SecondaryField func = NULLSF);
1137 
1138  void GenInstrRegister(Opcode opcode,
1139  Register rs,
1140  Register rt,
1141  uint16_t msb,
1142  uint16_t lsb,
1143  SecondaryField func);
1144 
1145  void GenInstrRegister(Opcode opcode,
1146  SecondaryField fmt,
1147  FPURegister ft,
1148  FPURegister fs,
1149  FPURegister fd,
1150  SecondaryField func = NULLSF);
1151 
1152  void GenInstrRegister(Opcode opcode,
1153  FPURegister fr,
1154  FPURegister ft,
1155  FPURegister fs,
1156  FPURegister fd,
1157  SecondaryField func = NULLSF);
1158 
1159  void GenInstrRegister(Opcode opcode,
1160  SecondaryField fmt,
1161  Register rt,
1162  FPURegister fs,
1163  FPURegister fd,
1164  SecondaryField func = NULLSF);
1165 
1166  void GenInstrRegister(Opcode opcode,
1167  SecondaryField fmt,
1168  Register rt,
1169  FPUControlRegister fs,
1170  SecondaryField func = NULLSF);
1171 
1172 
1173  void GenInstrImmediate(Opcode opcode,
1174  Register rs,
1175  Register rt,
1176  int32_t j);
1177  void GenInstrImmediate(Opcode opcode,
1178  Register rs,
1180  int32_t j);
1181  void GenInstrImmediate(Opcode opcode,
1182  Register r1,
1183  FPURegister r2,
1184  int32_t j);
1185 
1186 
1187  void GenInstrJump(Opcode opcode,
1188  uint32_t address);
1189 
1190  // Helpers.
1191  void LoadRegPlusOffsetToAt(const MemOperand& src);
1192 
1193  // Labels.
1194  void print(Label* L);
1195  void bind_to(Label* L, int pos);
1196  void next(Label* L);
1197 
1198  // One trampoline consists of:
1199  // - space for trampoline slots,
1200  // - space for labels.
1201  //
1202  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
1203  // Space for trampoline slots preceeds space for labels. Each label is of one
1204  // instruction size, so total amount for labels is equal to
1205  // label_count * kInstrSize.
1206  class Trampoline {
1207  public:
1208  Trampoline() {
1209  start_ = 0;
1210  next_slot_ = 0;
1211  free_slot_count_ = 0;
1212  end_ = 0;
1213  }
1214  Trampoline(int start, int slot_count) {
1215  start_ = start;
1216  next_slot_ = start;
1217  free_slot_count_ = slot_count;
1218  end_ = start + slot_count * kTrampolineSlotsSize;
1219  }
1220  int start() {
1221  return start_;
1222  }
1223  int end() {
1224  return end_;
1225  }
1226  int take_slot() {
1227  int trampoline_slot = kInvalidSlotPos;
1228  if (free_slot_count_ <= 0) {
1229  // We have run out of space on trampolines.
1230  // Make sure we fail in debug mode, so we become aware of each case
1231  // when this happens.
1232  ASSERT(0);
1233  // Internal exception will be caught.
1234  } else {
1235  trampoline_slot = next_slot_;
1236  free_slot_count_--;
1237  next_slot_ += kTrampolineSlotsSize;
1238  }
1239  return trampoline_slot;
1240  }
1241 
1242  private:
1243  int start_;
1244  int end_;
1245  int next_slot_;
1246  int free_slot_count_;
1247  };
1248 
1249  int32_t get_trampoline_entry(int32_t pos);
1250  int unbound_labels_count_;
1251  // If trampoline is emitted, generated code is becoming large. As this is
1252  // already a slow case which can possibly break our code generation for the
1253  // extreme case, we use this information to trigger different mode of
1254  // branch instruction generation, where we use jump instructions rather
1255  // than regular branch instructions.
1256  bool trampoline_emitted_;
1257  static const int kTrampolineSlotsSize = 4 * kInstrSize;
1258  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1259  static const int kInvalidSlotPos = -1;
1260 
1261  Trampoline trampoline_;
1262  bool internal_trampoline_exception_;
1263 
1265  friend class RelocInfo;
1266  friend class CodePatcher;
1268 
1269  PositionsRecorder positions_recorder_;
1270  friend class PositionsRecorder;
1271  friend class EnsureSpace;
1272 };
1273 
1274 
1275 class EnsureSpace BASE_EMBEDDED {
1276  public:
1277  explicit EnsureSpace(Assembler* assembler) {
1278  assembler->CheckBuffer();
1279  }
1280 };
1281 
1282 } } // namespace v8::internal
1283 
1284 #endif // V8_ARM_ASSEMBLER_MIPS_H_
byte * Address
Definition: globals.h:186
const FPURegister f4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void addu(Register rd, Register rs, Register rt)
static bool IsBranch(Instr instr)
const SwVfpRegister s2
const FPURegister f20
const FPURegister f21
const FPURegister f28
Isolate * isolate() const
Definition: assembler.h:62
int InstructionsGeneratedSince(Label *label)
static const int kBranchPCOffset
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft)
void andi(Register rd, Register rs, int32_t j)
void beq(Register rs, Register rt, int16_t offset)
void cvt_l_d(FPURegister fd, FPURegister fs)
static int GetBranchOffset(Instr instr)
static uint32_t GetRt(Instr instr)
void trunc_l_d(FPURegister fd, FPURegister fs)
static const int kDebugBreakSlotInstructions
static const int kMaxNumRegisters
void db(uint8_t data)
static uint32_t GetOpcodeField(Instr instr)
const FPURegister f5
const FPURegister f15
FPURegister low() const
const FPURegister f11
int32_t offset() const
void mtc1(Register rt, FPURegister fs)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
static bool IsAddImmediate(Instr instr)
void bc1t(Label *L, uint16_t cc=0)
static Register GetRsReg(Instr instr)
void round_l_s(FPURegister fd, FPURegister fs)
static int ToAllocationIndex(FPURegister reg)
void swc1(FPURegister fs, const MemOperand &dst)
void round_w_d(FPURegister fd, FPURegister fs)
void bgezal(Register rs, int16_t offset)
void instr_at_put(int pos, Instr instr)
const SwVfpRegister s7
REGISTER(no_reg,-1)
const int kNumRegisters
Definition: constants-arm.h:57
void neg_d(FPURegister fd, FPURegister fs)
const FPURegister f0
void blez(Register rs, int16_t offset)
ptrdiff_t offset() const
void sw(Register rd, const MemOperand &rs)
void cvt_s_l(FPURegister fd, FPURegister fs)
void mov_d(FPURegister fd, FPURegister fs)
const int KB
Definition: globals.h:245
static TypeFeedbackId None()
Definition: utils.h:1149
void rotr(Register rd, Register rt, uint16_t sa)
static uint32_t GetImmediate16(Instr instr)
void sqrt_d(FPURegister fd, FPURegister fs)
const FPURegister f24
static bool IsSw(Instr instr)
static Instr SetAddImmediateOffset(Instr instr, int16_t offset)
static uint32_t GetFunctionField(Instr instr)
const FPURegister f22
static const int kPatchDebugBreakSlotReturnOffset
int SizeOfCodeGeneratedSince(Label *label)
void mflo(Register rd)
const FPURegister f10
void tne(Register rs, Register rt, uint16_t code)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
const FPURegister f18
void round_w_s(FPURegister fd, FPURegister fs)
static int NumAllocatableRegisters()
const int kInvalidFPUControlRegister
void b(int branch_offset, Condition cond=al)
int32_t buffer_space() const
int int32_t
Definition: unicode.cc:47
void floor_l_s(FPURegister fd, FPURegister fs)
static bool IsSupported(CpuFeature f)
static uint32_t GetRsField(Instr instr)
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft)
void clz(Register dst, Register src, Condition cond=al)
void bc1t(int16_t offset, uint16_t cc=0)
void div(Register rs, Register rt)
static bool enabled()
Definition: serialize.h:485
bool IsNone() const
Definition: utils.h:1150
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static uint32_t GetRs(Instr instr)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
static bool IsLwRegFpOffset(Instr instr)
static Register FromAllocationIndex(int index)
bool is(FPURegister creg) const
const uint32_t kMaxStopCode
TypeFeedbackId RecordedAstId()
#define ASSERT(condition)
Definition: checks.h:329
void swr(Register rd, const MemOperand &rs)
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size)
static const int kPatchReturnSequenceAddressOffset
void pref(int32_t hint, const MemOperand &rs)
void SetRecordedAstId(TypeFeedbackId ast_id)
static bool IsSafeForSnapshot(CpuFeature f)
unsigned short uint16_t
Definition: unicode.cc:46
const FPUControlRegister no_fpucreg
static uint32_t GetRdField(Instr instr)
static Instr instr_at(byte *pc)
void DoubleAsTwoUInt32(double d, uint32_t *lo, uint32_t *hi)
const FPURegister f3
static Instr SetSwOffset(Instr instr, int16_t offset)
void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop=false)
void cvt_d_s(FPURegister fd, FPURegister fs)
static bool IsJalr(Instr instr)
void floor_w_s(FPURegister fd, FPURegister fs)
static bool IsJ(Instr instr)
void addiu(Register rd, Register rs, int32_t j)
void cvt_d_l(FPURegister fd, FPURegister fs)
static void instr_at_put(byte *pc, Instr instr)
const Register r2
static const int kMaxNumAllocatableRegisters
static bool IsLwRegFpNegOffset(Instr instr)
void target_at_put(int pos, int target_pos)
void multu(Register rs, Register rt)
void add_d(FPURegister fd, FPURegister fs, FPURegister ft)
MemOperand(Register base, ptrdiff_t offset=0, AddrMode addrmode=Offset)
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4589
const SwVfpRegister s6
bool is_buffer_growth_blocked() const
static const int kNumRegisters
void ldc1(FPURegister fd, const MemOperand &src)
void cvt_d_w(FPURegister fd, FPURegister fs)
const int kNumFPURegisters
void cvt_w_d(FPURegister fd, FPURegister fs)
EnsureSpace(Assembler *assembler)
static void JumpLabelToJumpRegister(Address pc)
static const char * AllocationIndexToString(int index)
uint8_t byte
Definition: globals.h:185
void break_(uint32_t code, bool break_as_stop=false)
static bool IsPush(Instr instr)
void ceil_w_s(FPURegister fd, FPURegister fs)
const Register sp
const SwVfpRegister s3
void sh(Register rd, const MemOperand &rs)
static const int kMaxNumAllocatableRegisters
static bool IsJr(Instr instr)
DwVfpRegister DoubleRegister
static bool IsOri(Instr instr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
void sra(Register rt, Register rd, uint16_t sa)
void slt(Register rd, Register rs, Register rt)
void swl(Register rd, const MemOperand &rs)
static Instr SF(Register rd)
void lwr(Register rd, const MemOperand &rs)
void BlockTrampolinePoolBefore(int pc_offset)
void lbu(Register rd, const MemOperand &rs)
static bool IsJal(Instr instr)
static const int kNumReservedRegisters
FPURegister FloatRegister
void ceil_w_d(FPURegister fd, FPURegister fs)
INLINE(static Address target_address_at(Address pc, ConstantPoolArray *constant_pool))
void trunc_l_s(FPURegister fd, FPURegister fs)
FPURegister high() const
void trunc_w_s(FPURegister fd, FPURegister fs)
const FPURegister f16
void srlv(Register rd, Register rt, Register rs)
INLINE(static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target))
static const int kSpecialTargetSize
const FPURegister f9
friend class ExternalReference
void div_d(FPURegister fd, FPURegister fs, FPURegister ft)
void abs_d(FPURegister fd, FPURegister fs)
void sltu(Register rd, Register rs, Register rt)
const int kFCSRRegister
void GetCode(CodeDesc *desc)
void xori(Register rd, Register rs, int32_t j)
void bal(int16_t offset)
void jal_or_jalr(int32_t target, Register rs)
static const int kPcLoadDelta
void teq(Register src1, const Operand &src2, Condition cond=al)
int branch_offset(Label *L, bool jump_elimination_allowed)
int available_space() const
static bool IsPop(Instr instr)
static const int kInstructionsFor32BitConstant
void movt(Register reg, uint32_t immediate, Condition cond=al)
const FPURegister f29
void lui(Register rd, int32_t j)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const FPURegister f17
const FPURegister f23
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
const Register pc
bool is_near(Label *L)
INLINE(static Address target_address_at(Address pc, Code *code))
static Register from_code(int code)
static bool IsLw(Instr instr)
static uint32_t GetFunction(Instr instr)
bool is(FPUControlRegister creg) const
void srl(Register rd, Register rt, uint16_t sa)
static Register GetRdReg(Instr instr)
void bc1f(Label *L, uint16_t cc=0)
MaybeObject * AllocateConstantPool(Heap *heap)
static int NumAllocatableRegisters()
void tlt(Register rs, Register rt, uint16_t code)
static bool IsFoundByRuntimeProbingOnly(CpuFeature f)
void slti(Register rd, Register rs, int32_t j)
const SwVfpRegister s0
void srav(Register rt, Register rd, Register rs)
static uint32_t GetRtField(Instr instr)
bool is_trampoline_emitted() const
void jal(int32_t target)
void sltiu(Register rd, Register rs, int32_t j)
void jalr(Register rs, Register rd=ra)
void movz(const Register &rd, uint64_t imm, int shift=-1)
const FPURegister f2
void floor_l_d(FPURegister fd, FPURegister fs)
void cfc1(Register rt, FPUControlRegister fs)
static const int kCallTargetAddressOffset
#define BASE_EMBEDDED
Definition: allocation.h:68
const SwVfpRegister s5
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size)
static Register GetRtReg(Instr instr)
void beq(Register rs, Register rt, Label *L)
static const int kDebugBreakSlotLength
void lw(Register rd, const MemOperand &rs)
void ceil_l_d(FPURegister fd, FPURegister fs)
static Register GetRd(Instr instr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static uint32_t GetLabelConst(Instr instr)
const SwVfpRegister s1
void ori(Register rd, Register rs, int32_t j)
void stop(const char *msg, Condition cond=al, int32_t code=kDefaultStopCode)
void cvt_s_w(FPURegister fd, FPURegister fs)
static const int kCpRegister
const FPURegister f19
int ToNumber(Register reg)
void round_l_d(FPURegister fd, FPURegister fs)
bool is(Register reg) const
friend class PlatformFeatureScope
const Register r1
bool is_trampoline_pool_blocked() const
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void movf(Register rd, Register rs, uint16_t cc=0)
void RecordComment(const char *msg)
const FPURegister f30
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
static void QuietNaN(HeapObject *nan)
const FPUControlRegister FCSR
static const char * AllocationIndexToString(int index)
void hint(SystemHint code)
void emit_code_stub_address(Code *stub)
void bltzal(Register rs, int16_t offset)
void cvt_w_s(FPURegister fd, FPURegister fs)
void lwl(Register rd, const MemOperand &rs)
void bne(Register rs, Register rt, int16_t offset)
const FPURegister no_freg
static Address target_address_from_return_address(Address pc)
void xor_(Register dst, int32_t imm32)
void BlockTrampolinePoolFor(int instructions)
friend class PositionsRecorder
const FPURegister f27
static bool IsSwRegFpOffset(Instr instr)
static bool IsJump(Instr instr)
bool OffsetIsInt16Encodable() const
void mfhi(Register rd)
void mfc1(Register rt, FPURegister fs)
static bool VerifyCrossCompiling()
void mult(Register rs, Register rt)
void subu(Register rd, Register rs, Register rt)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
void tgeu(Register rs, Register rt, uint16_t code)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static uint32_t GetSa(Instr instr)
bool MustUseReg(RelocInfo::Mode rmode)
const FPURegister f1
void jr(Register target)
const FPURegister f7
void trunc_w_d(FPURegister fd, FPURegister fs)
static const int kJSReturnSequenceInstructions
static bool IsEmittedConstant(Instr instr)
const SwVfpRegister s4
INLINE(static void set_target_address_at(Address pc, Code *code, Address target))
void sllv(Register rd, Register rt, Register rs)
void ctc1(Register rt, FPUControlRegister fs)
bool end_
void floor_w_d(FPURegister fd, FPURegister fs)
void bne(Register rs, Register rt, Label *L)
void sdc1(FPURegister fs, const MemOperand &dst)
const FPURegister f12
void lh(Register rd, const MemOperand &rs)
static bool IsBne(Instr instr)
const FPURegister f6
void bc1f(int16_t offset, uint16_t cc=0)
PositionsRecorder * positions_recorder()
void movn(const Register &rd, uint64_t imm, int shift=-1)
static bool IsBeq(Instr instr)
static const int kInstrSize
static const int kSizeInBytes
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
void fcmp(const FPRegister &fn, const FPRegister &fm)
void ceil_l_s(FPURegister fd, FPURegister fs)
void tge(Register rs, Register rt, uint16_t code)
void cvt_s_d(FPURegister fd, FPURegister fs)
const Register no_reg
static int ToAllocationIndex(Register reg)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void label_at_put(Label *L, int at_offset)
void bgtz(Register rs, int16_t offset)
const Register fp
void nor(Register rd, Register rs, Register rt)
static int16_t GetLwOffset(Instr instr)
static FPURegister from_code(int code)
static FPURegister FromAllocationIndex(int index)
signed short int16_t
Definition: unicode.cc:45
void nop(unsigned int type=0)
int32_t shifted_branch_offset(Label *L, bool jump_elimination_allowed)
static bool IsSwRegFpNegOffset(Instr instr)
void lb(Register rd, const MemOperand &rs)
Register ToRegister(int num)
int64_t immediate() const
uint32_t jump_address(Label *L)
void j_or_jr(int32_t target, Register rs)
const FPURegister f14
void rotrv(Register rd, Register rt, Register rs)
void bgez(Register rs, int16_t offset)
void cvt_l_s(FPURegister fd, FPURegister fs)
void lhu(Register rd, const MemOperand &rs)
static bool VerifyCrossCompiling(CpuFeature f)
static const int kPatchDebugBreakSlotAddressOffset
void tltu(Register rs, Register rt, uint16_t code)
static Instr SetLwOffset(Instr instr, int16_t offset)
TypeFeedbackId recorded_ast_id_
static uint32_t GetSaField(Instr instr)
void divu(Register rs, Register rt)
const FPURegister f31
const FPURegister f25
RelocInfo::Mode rmode() const
void bltz(Register rs, int16_t offset)
void lwc1(FPURegister fd, const MemOperand &src)
const FPURegister f26
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
static bool IsLui(Instr instr)
const FPURegister f13
static bool IsAndImmediate(Instr instr)
const FPURegister f8
void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, uint16_t cc=0)
static void deserialization_set_special_target_at(Address instruction_payload, Code *code, Address target)
void sb(Register rd, const MemOperand &rs)