v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-ia32.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2011 the V8 project authors. All rights reserved.
34 
35 // A light-weight IA32 Assembler.
36 
37 #ifndef V8_IA32_ASSEMBLER_IA32_H_
38 #define V8_IA32_ASSEMBLER_IA32_H_
39 
40 #include "isolate.h"
41 #include "serialize.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 // CPU Registers.
47 //
48 // 1) We would prefer to use an enum, but enum values are assignment-
49 // compatible with int, which has caused code-generation bugs.
50 //
51 // 2) We would prefer to use a class instead of a struct but we don't like
52 // the register initialization to depend on the particular initialization
53 // order (which appears to be different on OS X, Linux, and Windows for the
54 // installed versions of C++ we tried). Using a struct permits C-style
55 // "initialization". Also, the Register objects cannot be const as this
56 // forces initialization stubs in MSVC, making us dependent on initialization
57 // order.
58 //
59 // 3) By not using an enum, we are possibly preventing the compiler from
60 // doing certain constant folds, which may significantly reduce the
61 // code generated for some assembly instructions (because they boil down
62 // to a few constants). If this is a problem, we could change the code
63 // such that we use an enum in optimized mode, and the struct in debug
64 // mode. This way we get the compile-time error checking in debug mode
65 // and best performance in optimized code.
66 //
67 struct Register {
68  static const int kMaxNumAllocatableRegisters = 6;
69  static int NumAllocatableRegisters() {
71  }
72  static const int kNumRegisters = 8;
73 
74  static inline const char* AllocationIndexToString(int index);
75 
76  static inline int ToAllocationIndex(Register reg);
77 
78  static inline Register FromAllocationIndex(int index);
79 
80  static Register from_code(int code) {
81  ASSERT(code >= 0);
82  ASSERT(code < kNumRegisters);
83  Register r = { code };
84  return r;
85  }
86  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
87  bool is(Register reg) const { return code_ == reg.code_; }
88  // eax, ebx, ecx and edx are byte registers, the rest are not.
89  bool is_byte_register() const { return code_ <= 3; }
90  int code() const {
91  ASSERT(is_valid());
92  return code_;
93  }
94  int bit() const {
95  ASSERT(is_valid());
96  return 1 << code_;
97  }
98 
99  // Unfortunately we can't make this private in a struct.
100  int code_;
101 };
102 
103 const int kRegister_eax_Code = 0;
104 const int kRegister_ecx_Code = 1;
105 const int kRegister_edx_Code = 2;
106 const int kRegister_ebx_Code = 3;
107 const int kRegister_esp_Code = 4;
108 const int kRegister_ebp_Code = 5;
109 const int kRegister_esi_Code = 6;
110 const int kRegister_edi_Code = 7;
111 const int kRegister_no_reg_Code = -1;
112 
121 const Register no_reg = { kRegister_no_reg_Code };
122 
123 
124 inline const char* Register::AllocationIndexToString(int index) {
125  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
126  // This is the mapping of allocation indices to registers.
127  const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
128  return kNames[index];
129 }
130 
131 
132 inline int Register::ToAllocationIndex(Register reg) {
133  ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
134  return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
135 }
136 
137 
138 inline Register Register::FromAllocationIndex(int index) {
139  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
140  return (index >= 4) ? from_code(index + 2) : from_code(index);
141 }
142 
143 
145  static const int kMaxNumRegisters = 8;
146  static const int kMaxNumAllocatableRegisters = 7;
147  static int NumAllocatableRegisters();
148  static int NumRegisters();
149  static const char* AllocationIndexToString(int index);
150 
152  ASSERT(reg.code() != 0);
153  return reg.code() - 1;
154  }
155 
157  ASSERT(index >= 0 && index < NumAllocatableRegisters());
158  return from_code(index + 1);
159  }
160 
162  IntelDoubleRegister result = { code };
163  return result;
164  }
165 
166  bool is_valid() const {
167  return 0 <= code_ && code_ < NumRegisters();
168  }
169  int code() const {
170  ASSERT(is_valid());
171  return code_;
172  }
173 
174  int code_;
175 };
176 
177 
187 
188 
190  static const int kNumAllocatableRegisters = 7;
191  static const int kNumRegisters = 8;
192 
193  static XMMRegister from_code(int code) {
194  STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
195  XMMRegister result;
196  result.code_ = code;
197  return result;
198  }
199 
200  bool is(XMMRegister reg) const { return code_ == reg.code_; }
201 
202  static XMMRegister FromAllocationIndex(int index) {
203  ASSERT(index >= 0 && index < NumAllocatableRegisters());
204  return from_code(index + 1);
205  }
206 
207  static const char* AllocationIndexToString(int index) {
208  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
209  const char* const names[] = {
210  "xmm1",
211  "xmm2",
212  "xmm3",
213  "xmm4",
214  "xmm5",
215  "xmm6",
216  "xmm7"
217  };
218  return names[index];
219  }
220 };
221 
222 
223 #define xmm0 (static_cast<const XMMRegister&>(double_register_0))
224 #define xmm1 (static_cast<const XMMRegister&>(double_register_1))
225 #define xmm2 (static_cast<const XMMRegister&>(double_register_2))
226 #define xmm3 (static_cast<const XMMRegister&>(double_register_3))
227 #define xmm4 (static_cast<const XMMRegister&>(double_register_4))
228 #define xmm5 (static_cast<const XMMRegister&>(double_register_5))
229 #define xmm6 (static_cast<const XMMRegister&>(double_register_6))
230 #define xmm7 (static_cast<const XMMRegister&>(double_register_7))
231 #define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
232 
233 
235  static const int kNumAllocatableRegisters = 5;
236  static const int kNumRegisters = 5;
237 
238  bool is(X87Register reg) const {
239  return code_ == reg.code_;
240  }
241 
242  static const char* AllocationIndexToString(int index) {
243  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
244  const char* const names[] = {
245  "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
246  };
247  return names[index];
248  }
249 
250  static X87Register FromAllocationIndex(int index) {
251  STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
252  ASSERT(index >= 0 && index < NumAllocatableRegisters());
253  X87Register result;
254  result.code_ = index;
255  return result;
256  }
257 
258  static int ToAllocationIndex(X87Register reg) {
259  return reg.code_;
260  }
261 };
262 
263 #define stX_0 static_cast<const X87Register&>(double_register_0)
264 #define stX_1 static_cast<const X87Register&>(double_register_1)
265 #define stX_2 static_cast<const X87Register&>(double_register_2)
266 #define stX_3 static_cast<const X87Register&>(double_register_3)
267 #define stX_4 static_cast<const X87Register&>(double_register_4)
268 
269 
270 typedef IntelDoubleRegister DoubleRegister;
271 
272 
273 enum Condition {
274  // any value < 0 is considered no_condition
276 
277  overflow = 0,
279  below = 2,
281  equal = 4,
284  above = 7,
285  negative = 8,
286  positive = 9,
289  less = 12,
292  greater = 15,
293 
294  // aliases
301 };
302 
303 
304 // Returns the equivalent of !cc.
305 // Negation of the default no_condition (-1) results in a non-default
306 // no_condition value (-2). As long as tests for no_condition check
307 // for condition < 0, this will work as expected.
309  return static_cast<Condition>(cc ^ 1);
310 }
311 
312 
313 // Corresponds to transposing the operands of a comparison.
315  switch (cc) {
316  case below:
317  return above;
318  case above:
319  return below;
320  case above_equal:
321  return below_equal;
322  case below_equal:
323  return above_equal;
324  case less:
325  return greater;
326  case greater:
327  return less;
328  case greater_equal:
329  return less_equal;
330  case less_equal:
331  return greater_equal;
332  default:
333  return cc;
334  };
335 }
336 
337 
338 // -----------------------------------------------------------------------------
339 // Machine instruction Immediates
340 
341 class Immediate BASE_EMBEDDED {
342  public:
343  inline explicit Immediate(int x);
344  inline explicit Immediate(const ExternalReference& ext);
345  inline explicit Immediate(Handle<Object> handle);
346  inline explicit Immediate(Smi* value);
347  inline explicit Immediate(Address addr);
348 
349  static Immediate CodeRelativeOffset(Label* label) {
350  return Immediate(label);
351  }
352 
353  bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
354  bool is_int8() const {
355  return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
356  }
357  bool is_int16() const {
358  return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
359  }
360 
361  private:
362  inline explicit Immediate(Label* value);
363 
364  int x_;
365  RelocInfo::Mode rmode_;
366 
367  friend class Assembler;
368  friend class MacroAssembler;
369 };
370 
371 
372 // -----------------------------------------------------------------------------
373 // Machine instruction Operands
374 
376  times_1 = 0,
377  times_2 = 1,
378  times_4 = 2,
379  times_8 = 3,
384 };
385 
386 
387 class Operand BASE_EMBEDDED {
388  public:
389  // XMM reg
390  INLINE(explicit Operand(XMMRegister xmm_reg));
391 
392  // [disp/r]
393  INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
394  // disp only must always be relocated
395 
396  // [base + disp/r]
397  explicit Operand(Register base, int32_t disp,
398  RelocInfo::Mode rmode = RelocInfo::NONE32);
399 
400  // [base + index*scale + disp/r]
401  explicit Operand(Register base,
402  Register index,
403  ScaleFactor scale,
404  int32_t disp,
405  RelocInfo::Mode rmode = RelocInfo::NONE32);
406 
407  // [index*scale + disp/r]
408  explicit Operand(Register index,
409  ScaleFactor scale,
410  int32_t disp,
411  RelocInfo::Mode rmode = RelocInfo::NONE32);
412 
413  static Operand StaticVariable(const ExternalReference& ext) {
414  return Operand(reinterpret_cast<int32_t>(ext.address()),
415  RelocInfo::EXTERNAL_REFERENCE);
416  }
417 
419  ScaleFactor scale,
420  const ExternalReference& arr) {
421  return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
422  RelocInfo::EXTERNAL_REFERENCE);
423  }
424 
425  static Operand ForCell(Handle<Cell> cell) {
426  AllowDeferredHandleDereference embedding_raw_address;
427  return Operand(reinterpret_cast<int32_t>(cell.location()),
428  RelocInfo::CELL);
429  }
430 
431  // Returns true if this Operand is a wrapper for the specified register.
432  bool is_reg(Register reg) const;
433 
434  // Returns true if this Operand is a wrapper for one register.
435  bool is_reg_only() const;
436 
437  // Asserts that this Operand is a wrapper for one register and returns the
438  // register.
439  Register reg() const;
440 
441  private:
442  // reg
443  INLINE(explicit Operand(Register reg));
444 
445  // Set the ModRM byte without an encoded 'reg' register. The
446  // register is encoded later as part of the emit_operand operation.
447  inline void set_modrm(int mod, Register rm);
448 
449  inline void set_sib(ScaleFactor scale, Register index, Register base);
450  inline void set_disp8(int8_t disp);
451  inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
452 
453  byte buf_[6];
454  // The number of bytes in buf_.
455  unsigned int len_;
456  // Only valid if len_ > 4.
457  RelocInfo::Mode rmode_;
458 
459  friend class Assembler;
460  friend class MacroAssembler;
461  friend class LCodeGen;
462 };
463 
464 
465 // -----------------------------------------------------------------------------
466 // A Displacement describes the 32bit immediate field of an instruction which
467 // may be used together with a Label in order to refer to a yet unknown code
468 // position. Displacements stored in the instruction stream are used to describe
469 // the instruction and to chain a list of instructions using the same Label.
470 // A Displacement contains 2 different fields:
471 //
472 // next field: position of next displacement in the chain (0 = end of list)
473 // type field: instruction type
474 //
475 // A next value of null (0) indicates the end of a chain (note that there can
476 // be no displacement at position zero, because there is always at least one
477 // instruction byte before the displacement).
478 //
479 // Displacement _data field layout
480 //
481 // |31.....2|1......0|
482 // [ next | type |
483 
484 class Displacement BASE_EMBEDDED {
485  public:
486  enum Type {
490  };
491 
492  int data() const { return data_; }
493  Type type() const { return TypeField::decode(data_); }
494  void next(Label* L) const {
495  int n = NextField::decode(data_);
496  n > 0 ? L->link_to(n) : L->Unuse();
497  }
498  void link_to(Label* L) { init(L, type()); }
499 
500  explicit Displacement(int data) { data_ = data; }
501 
502  Displacement(Label* L, Type type) { init(L, type); }
503 
504  void print() {
505  PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
506  NextField::decode(data_));
507  }
508 
509  private:
510  int data_;
511 
512  class TypeField: public BitField<Type, 0, 2> {};
513  class NextField: public BitField<int, 2, 32-2> {};
514 
515  void init(Label* L, Type type);
516 };
517 
518 
519 
520 // CpuFeatures keeps track of which features are supported by the target CPU.
521 // Supported features must be enabled by a CpuFeatureScope before use.
522 // Example:
523 // if (assembler->IsSupported(SSE2)) {
524 // CpuFeatureScope fscope(assembler, SSE2);
525 // // Generate SSE2 floating point code.
526 // } else {
527 // // Generate standard x87 floating point code.
528 // }
529 class CpuFeatures : public AllStatic {
530  public:
531  // Detect features of the target CPU. Set safe defaults if the serializer
532  // is enabled (snapshots must be portable).
533  static void Probe();
534 
535  // Check whether a feature is supported by the target CPU.
536  static bool IsSupported(CpuFeature f) {
537  ASSERT(initialized_);
538  if (Check(f, cross_compile_)) return true;
539  if (f == SSE2 && !FLAG_enable_sse2) return false;
540  if (f == SSE3 && !FLAG_enable_sse3) return false;
541  if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
542  if (f == CMOV && !FLAG_enable_cmov) return false;
543  return Check(f, supported_);
544  }
545 
547  ASSERT(initialized_);
548  return Check(f, found_by_runtime_probing_only_);
549  }
550 
551  static bool IsSafeForSnapshot(CpuFeature f) {
552  return Check(f, cross_compile_) ||
553  (IsSupported(f) &&
555  }
556 
557  static bool VerifyCrossCompiling() {
558  return cross_compile_ == 0;
559  }
560 
562  uint64_t mask = flag2set(f);
563  return cross_compile_ == 0 ||
564  (cross_compile_ & mask) == mask;
565  }
566 
567  private:
568  static bool Check(CpuFeature f, uint64_t set) {
569  return (set & flag2set(f)) != 0;
570  }
571 
572  static uint64_t flag2set(CpuFeature f) {
573  return static_cast<uint64_t>(1) << f;
574  }
575 
576 #ifdef DEBUG
577  static bool initialized_;
578 #endif
579  static uint64_t supported_;
580  static uint64_t found_by_runtime_probing_only_;
581 
582  static uint64_t cross_compile_;
583 
584  friend class ExternalReference;
585  friend class PlatformFeatureScope;
586  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
587 };
588 
589 
590 class Assembler : public AssemblerBase {
591  private:
592  // We check before assembling an instruction that there is sufficient
593  // space to write an instruction and its relocation information.
594  // The relocation writer's position must be kGap bytes above the end of
595  // the generated instructions. This leaves enough space for the
596  // longest possible ia32 instruction, 15 bytes, and the longest possible
597  // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
598  // (There is a 15 byte limit on ia32 instruction length that rules out some
599  // otherwise valid instructions.)
600  // This allows for a single, fast space check per instruction.
601  static const int kGap = 32;
602 
603  public:
604  // Create an assembler. Instructions and relocation information are emitted
605  // into a buffer, with the instructions starting from the beginning and the
606  // relocation information starting from the end of the buffer. See CodeDesc
607  // for a detailed comment on the layout (globals.h).
608  //
609  // If the provided buffer is NULL, the assembler allocates and grows its own
610  // buffer, and buffer_size determines the initial buffer size. The buffer is
611  // owned by the assembler and deallocated upon destruction of the assembler.
612  //
613  // If the provided buffer is not NULL, the assembler uses the provided buffer
614  // for code generation and assumes its size to be buffer_size. If the buffer
615  // is too small, a fatal error occurs. No deallocation of the buffer is done
616  // upon destruction of the assembler.
617  // TODO(vitalyr): the assembler does not need an isolate.
618  Assembler(Isolate* isolate, void* buffer, int buffer_size);
619  virtual ~Assembler() { }
620 
621  // GetCode emits any pending (non-emitted) code and fills the descriptor
622  // desc. GetCode() is idempotent; it returns the same result if no other
623  // Assembler functions are invoked in between GetCode() calls.
624  void GetCode(CodeDesc* desc);
625 
626  // Read/Modify the code target in the branch/call instruction at pc.
627  inline static Address target_address_at(Address pc,
628  ConstantPoolArray* constant_pool);
629  inline static void set_target_address_at(Address pc,
630  ConstantPoolArray* constant_pool,
631  Address target);
633  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
634  return target_address_at(pc, constant_pool);
635  }
636  static inline void set_target_address_at(Address pc,
637  Code* code,
638  Address target) {
639  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
640  set_target_address_at(pc, constant_pool, target);
641  }
642 
643  // Return the code target address at a call site from the return address
644  // of that call in the instruction stream.
646 
647  // This sets the branch destination (which is in the instruction on x86).
648  // This is for calls and branches within generated code.
650  Address instruction_payload, Code* code, Address target) {
651  set_target_address_at(instruction_payload, code, target);
652  }
653 
654  static const int kSpecialTargetSize = kPointerSize;
655 
656  // Distance between the address of the code target in the call instruction
657  // and the return address
659  // Distance between start of patched return sequence and the emitted address
660  // to jump to.
661  static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
662 
663  // Distance between start of patched debug break slot and the emitted address
664  // to jump to.
665  static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
666 
667  static const int kCallInstructionLength = 5;
669  static const int kJSReturnSequenceLength = 6;
670 
671  // The debug break slot must be able to contain a call instruction.
673 
674  // One byte opcode for test al, 0xXX.
675  static const byte kTestAlByte = 0xA8;
676  // One byte opcode for nop.
677  static const byte kNopByte = 0x90;
678 
679  // One byte opcode for a short unconditional jump.
680  static const byte kJmpShortOpcode = 0xEB;
681  // One byte prefix for a short conditional jump.
682  static const byte kJccShortPrefix = 0x70;
687 
688 
689  // ---------------------------------------------------------------------------
690  // Code generation
691  //
692  // - function names correspond one-to-one to ia32 instruction mnemonics
693  // - unless specified otherwise, instructions operate on 32bit operands
694  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
695  // - instructions on 16bit (word) operands/registers have a trailing '_w'
696  // - naming conflicts with C++ keywords are resolved via a trailing '_'
697 
698  // NOTE ON INTERFACE: Currently, the interface is not very consistent
699  // in the sense that some operations (e.g. mov()) can be called in more
700  // the one way to generate the same instruction: The Register argument
701  // can in some cases be replaced with an Operand(Register) argument.
702  // This should be cleaned up and made more orthogonal. The questions
703  // is: should we always use Operands instead of Registers where an
704  // Operand is possible, or should we have a Register (overloaded) form
705  // instead? We must be careful to make sure that the selected instruction
706  // is obvious from the parameters to avoid hard-to-find code generation
707  // bugs.
708 
709  // Insert the smallest number of nop instructions
710  // possible to align the pc offset to a multiple
711  // of m. m must be a power of 2.
712  void Align(int m);
713  void Nop(int bytes = 1);
714  // Aligns code to something that's optimal for a jump target for the platform.
715  void CodeTargetAlign();
716 
717  // Stack
718  void pushad();
719  void popad();
720 
721  void pushfd();
722  void popfd();
723 
724  void push(const Immediate& x);
725  void push_imm32(int32_t imm32);
726  void push(Register src);
727  void push(const Operand& src);
728 
729  void pop(Register dst);
730  void pop(const Operand& dst);
731 
732  void enter(const Immediate& size);
733  void leave();
734 
735  // Moves
736  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
737  void mov_b(Register dst, const Operand& src);
738  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
739  void mov_b(const Operand& dst, int8_t imm8);
740  void mov_b(const Operand& dst, Register src);
741 
742  void mov_w(Register dst, const Operand& src);
743  void mov_w(const Operand& dst, Register src);
744  void mov_w(const Operand& dst, int16_t imm16);
745 
746  void mov(Register dst, int32_t imm32);
747  void mov(Register dst, const Immediate& x);
748  void mov(Register dst, Handle<Object> handle);
749  void mov(Register dst, const Operand& src);
750  void mov(Register dst, Register src);
751  void mov(const Operand& dst, const Immediate& x);
752  void mov(const Operand& dst, Handle<Object> handle);
753  void mov(const Operand& dst, Register src);
754 
755  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
756  void movsx_b(Register dst, const Operand& src);
757 
758  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
759  void movsx_w(Register dst, const Operand& src);
760 
761  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
762  void movzx_b(Register dst, const Operand& src);
763 
764  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
765  void movzx_w(Register dst, const Operand& src);
766 
767  // Conditional moves
768  void cmov(Condition cc, Register dst, Register src) {
769  cmov(cc, dst, Operand(src));
770  }
771  void cmov(Condition cc, Register dst, const Operand& src);
772 
773  // Flag management.
774  void cld();
775 
776  // Repetitive string instructions.
777  void rep_movs();
778  void rep_stos();
779  void stos();
780 
781  // Exchange two registers
782  void xchg(Register dst, Register src);
783 
784  // Arithmetics
785  void adc(Register dst, int32_t imm32);
786  void adc(Register dst, const Operand& src);
787 
788  void add(Register dst, Register src) { add(dst, Operand(src)); }
789  void add(Register dst, const Operand& src);
790  void add(const Operand& dst, Register src);
791  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
792  void add(const Operand& dst, const Immediate& x);
793 
794  void and_(Register dst, int32_t imm32);
795  void and_(Register dst, const Immediate& x);
796  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
797  void and_(Register dst, const Operand& src);
798  void and_(const Operand& dst, Register src);
799  void and_(const Operand& dst, const Immediate& x);
800 
801  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
802  void cmpb(const Operand& op, int8_t imm8);
803  void cmpb(Register reg, const Operand& op);
804  void cmpb(const Operand& op, Register reg);
805  void cmpb_al(const Operand& op);
806  void cmpw_ax(const Operand& op);
807  void cmpw(const Operand& op, Immediate imm16);
808  void cmp(Register reg, int32_t imm32);
809  void cmp(Register reg, Handle<Object> handle);
810  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
811  void cmp(Register reg, const Operand& op);
812  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
813  void cmp(const Operand& op, const Immediate& imm);
814  void cmp(const Operand& op, Handle<Object> handle);
815 
816  void dec_b(Register dst);
817  void dec_b(const Operand& dst);
818 
819  void dec(Register dst);
820  void dec(const Operand& dst);
821 
822  void cdq();
823 
824  void idiv(Register src);
825 
826  // Signed multiply instructions.
827  void imul(Register src); // edx:eax = eax * src.
828  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
829  void imul(Register dst, const Operand& src); // dst = dst * src.
830  void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
831 
832  void inc(Register dst);
833  void inc(const Operand& dst);
834 
835  void lea(Register dst, const Operand& src);
836 
837  // Unsigned multiply instruction.
838  void mul(Register src); // edx:eax = eax * reg.
839 
840  void neg(Register dst);
841 
842  void not_(Register dst);
843 
844  void or_(Register dst, int32_t imm32);
845  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
846  void or_(Register dst, const Operand& src);
847  void or_(const Operand& dst, Register src);
848  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
849  void or_(const Operand& dst, const Immediate& x);
850 
851  void rcl(Register dst, uint8_t imm8);
852  void rcr(Register dst, uint8_t imm8);
853  void ror(Register dst, uint8_t imm8);
854  void ror_cl(Register dst);
855 
856  void sar(Register dst, uint8_t imm8);
857  void sar_cl(Register dst);
858 
859  void sbb(Register dst, const Operand& src);
860 
861  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
862  void shld(Register dst, const Operand& src);
863 
864  void shl(Register dst, uint8_t imm8);
865  void shl_cl(Register dst);
866 
867  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
868  void shrd(Register dst, const Operand& src);
869 
870  void shr(Register dst, uint8_t imm8);
871  void shr_cl(Register dst);
872 
873  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
874  void sub(const Operand& dst, const Immediate& x);
875  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
876  void sub(Register dst, const Operand& src);
877  void sub(const Operand& dst, Register src);
878 
879  void test(Register reg, const Immediate& imm);
880  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
881  void test(Register reg, const Operand& op);
882  void test_b(Register reg, const Operand& op);
883  void test(const Operand& op, const Immediate& imm);
884  void test_b(Register reg, uint8_t imm8);
885  void test_b(const Operand& op, uint8_t imm8);
886 
887  void xor_(Register dst, int32_t imm32);
888  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
889  void xor_(Register dst, const Operand& src);
890  void xor_(const Operand& dst, Register src);
891  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
892  void xor_(const Operand& dst, const Immediate& x);
893 
894  // Bit operations.
895  void bt(const Operand& dst, Register src);
896  void bts(Register dst, Register src) { bts(Operand(dst), src); }
897  void bts(const Operand& dst, Register src);
898  void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
899  void bsr(Register dst, const Operand& src);
900 
901  // Miscellaneous
902  void hlt();
903  void int3();
904  void nop();
905  void ret(int imm16);
906 
907  // Label operations & relative jumps (PPUM Appendix D)
908  //
909  // Takes a branch opcode (cc) and a label (L) and generates
910  // either a backward branch or a forward branch and links it
911  // to the label fixup chain. Usage:
912  //
913  // Label L; // unbound label
914  // j(cc, &L); // forward branch to unbound label
915  // bind(&L); // bind label to the current pc
916  // j(cc, &L); // backward branch to bound label
917  // bind(&L); // illegal: a label may be bound only once
918  //
919  // Note: The same Label can be used for forward and backward branches
920  // but it may be bound only once.
921 
922  void bind(Label* L); // binds an unbound label L to the current code position
923 
924  // Calls
925  void call(Label* L);
926  void call(byte* entry, RelocInfo::Mode rmode);
927  int CallSize(const Operand& adr);
928  void call(Register reg) { call(Operand(reg)); }
929  void call(const Operand& adr);
930  int CallSize(Handle<Code> code, RelocInfo::Mode mode);
931  void call(Handle<Code> code,
932  RelocInfo::Mode rmode,
934 
935  // Jumps
936  // unconditional jump to L
937  void jmp(Label* L, Label::Distance distance = Label::kFar);
938  void jmp(byte* entry, RelocInfo::Mode rmode);
939  void jmp(Register reg) { jmp(Operand(reg)); }
940  void jmp(const Operand& adr);
941  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
942 
943  // Conditional jumps
944  void j(Condition cc,
945  Label* L,
946  Label::Distance distance = Label::kFar);
947  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
948  void j(Condition cc, Handle<Code> code);
949 
950  // Floating-point operations
951  void fld(int i);
952  void fstp(int i);
953 
954  void fld1();
955  void fldz();
956  void fldpi();
957  void fldln2();
958 
959  void fld_s(const Operand& adr);
960  void fld_d(const Operand& adr);
961 
962  void fstp_s(const Operand& adr);
963  void fst_s(const Operand& adr);
964  void fstp_d(const Operand& adr);
965  void fst_d(const Operand& adr);
966 
967  void fild_s(const Operand& adr);
968  void fild_d(const Operand& adr);
969 
970  void fist_s(const Operand& adr);
971 
972  void fistp_s(const Operand& adr);
973  void fistp_d(const Operand& adr);
974 
975  // The fisttp instructions require SSE3.
976  void fisttp_s(const Operand& adr);
977  void fisttp_d(const Operand& adr);
978 
979  void fabs();
980  void fchs();
981  void fcos();
982  void fsin();
983  void fptan();
984  void fyl2x();
985  void f2xm1();
986  void fscale();
987  void fninit();
988 
989  void fadd(int i);
990  void fadd_i(int i);
991  void fsub(int i);
992  void fsub_i(int i);
993  void fmul(int i);
994  void fmul_i(int i);
995  void fdiv(int i);
996  void fdiv_i(int i);
997 
998  void fisub_s(const Operand& adr);
999 
1000  void faddp(int i = 1);
1001  void fsubp(int i = 1);
1002  void fsubrp(int i = 1);
1003  void fmulp(int i = 1);
1004  void fdivp(int i = 1);
1005  void fprem();
1006  void fprem1();
1007 
1008  void fxch(int i = 1);
1009  void fincstp();
1010  void ffree(int i = 0);
1011 
1012  void ftst();
1013  void fucomp(int i);
1014  void fucompp();
1015  void fucomi(int i);
1016  void fucomip();
1017  void fcompp();
1018  void fnstsw_ax();
1019  void fwait();
1020  void fnclex();
1021 
1022  void frndint();
1023 
1024  void sahf();
1025  void setcc(Condition cc, Register reg);
1026 
1027  void cpuid();
1028 
1029  // SSE instructions
1030  void movaps(XMMRegister dst, XMMRegister src);
1031  void shufps(XMMRegister dst, XMMRegister src, byte imm8);
1032 
1033  void andps(XMMRegister dst, const Operand& src);
1034  void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
1035  void xorps(XMMRegister dst, const Operand& src);
1036  void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
1037  void orps(XMMRegister dst, const Operand& src);
1038  void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
1039 
1040  void addps(XMMRegister dst, const Operand& src);
1041  void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
1042  void subps(XMMRegister dst, const Operand& src);
1043  void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
1044  void mulps(XMMRegister dst, const Operand& src);
1045  void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
1046  void divps(XMMRegister dst, const Operand& src);
1047  void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
1048 
1049  // SSE2 instructions
1050  void cvttss2si(Register dst, const Operand& src);
1051  void cvttss2si(Register dst, XMMRegister src) {
1052  cvttss2si(dst, Operand(src));
1053  }
1054  void cvttsd2si(Register dst, const Operand& src);
1055  void cvtsd2si(Register dst, XMMRegister src);
1056 
1057  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
1058  void cvtsi2sd(XMMRegister dst, const Operand& src);
1059  void cvtss2sd(XMMRegister dst, XMMRegister src);
1060  void cvtsd2ss(XMMRegister dst, XMMRegister src);
1061 
1062  void addsd(XMMRegister dst, XMMRegister src);
1063  void addsd(XMMRegister dst, const Operand& src);
1064  void subsd(XMMRegister dst, XMMRegister src);
1065  void mulsd(XMMRegister dst, XMMRegister src);
1066  void mulsd(XMMRegister dst, const Operand& src);
1067  void divsd(XMMRegister dst, XMMRegister src);
1068  void xorpd(XMMRegister dst, XMMRegister src);
1069  void sqrtsd(XMMRegister dst, XMMRegister src);
1070 
1071  void andpd(XMMRegister dst, XMMRegister src);
1072  void orpd(XMMRegister dst, XMMRegister src);
1073 
1074  void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
1075  void ucomisd(XMMRegister dst, const Operand& src);
1076 
1079  kRoundDown = 0x1,
1080  kRoundUp = 0x2,
1082  };
1083 
1085 
1086  void movmskpd(Register dst, XMMRegister src);
1087  void movmskps(Register dst, XMMRegister src);
1088 
1089  void cmpltsd(XMMRegister dst, XMMRegister src);
1090  void pcmpeqd(XMMRegister dst, XMMRegister src);
1091 
1092  void movdqa(XMMRegister dst, const Operand& src);
1093  void movdqa(const Operand& dst, XMMRegister src);
1094  void movdqu(XMMRegister dst, const Operand& src);
1095  void movdqu(const Operand& dst, XMMRegister src);
1096  void movdq(bool aligned, XMMRegister dst, const Operand& src) {
1097  if (aligned) {
1098  movdqa(dst, src);
1099  } else {
1100  movdqu(dst, src);
1101  }
1102  }
1103 
1104  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
1105  void movd(XMMRegister dst, const Operand& src);
1106  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
1107  void movd(const Operand& dst, XMMRegister src);
1108  void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
1109  void movsd(XMMRegister dst, const Operand& src);
1110  void movsd(const Operand& dst, XMMRegister src);
1111 
1112 
1113  void movss(XMMRegister dst, const Operand& src);
1114  void movss(const Operand& dst, XMMRegister src);
1115  void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
1116  void extractps(Register dst, XMMRegister src, byte imm8);
1117 
1118  void pand(XMMRegister dst, XMMRegister src);
1119  void pxor(XMMRegister dst, XMMRegister src);
1120  void por(XMMRegister dst, XMMRegister src);
1121  void ptest(XMMRegister dst, XMMRegister src);
1122 
1123  void psllq(XMMRegister reg, int8_t shift);
1124  void psllq(XMMRegister dst, XMMRegister src);
1125  void psrlq(XMMRegister reg, int8_t shift);
1126  void psrlq(XMMRegister dst, XMMRegister src);
1127  void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1128  void pextrd(Register dst, XMMRegister src, int8_t offset) {
1129  pextrd(Operand(dst), src, offset);
1130  }
1131  void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
1132  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
1133  pinsrd(dst, Operand(src), offset);
1134  }
1135  void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
1136 
1137  // Parallel XMM operations.
1138  void movntdqa(XMMRegister dst, const Operand& src);
1139  void movntdq(const Operand& dst, XMMRegister src);
1140  // Prefetch src position into cache level.
1141  // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
1142  // non-temporal
1143  void prefetch(const Operand& src, int level);
1144  // TODO(lrn): Need SFENCE for movnt?
1145 
1146  // Debugging
1147  void Print();
1148 
1149  // Check the code size generated from label to here.
1150  int SizeOfCodeGeneratedSince(Label* label) {
1151  return pc_offset() - label->pos();
1152  }
1153 
1154  // Mark address of the ExitJSFrame code.
1155  void RecordJSReturn();
1156 
1157  // Mark address of a debug break slot.
1158  void RecordDebugBreakSlot();
1159 
1160  // Record a comment relocation entry that can be used by a disassembler.
1161  // Use --code-comments to enable, or provide "force = true" flag to always
1162  // write a comment.
1163  void RecordComment(const char* msg, bool force = false);
1164 
1165  // Writes a single byte or word of data in the code stream. Used for
1166  // inline tables, e.g., jump-tables.
1167  void db(uint8_t data);
1168  void dd(uint32_t data);
1169 
1170  // Check if there is less than kGap bytes available in the buffer.
1171  // If this is the case, we need to grow the buffer before emitting
1172  // an instruction or relocation information.
1173  inline bool buffer_overflow() const {
1174  return pc_ >= reloc_info_writer.pos() - kGap;
1175  }
1176 
1177  // Get the number of bytes available in the buffer.
1178  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
1179 
1180  static bool IsNop(Address addr);
1181 
1182  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1183 
1185  return (buffer_ + buffer_size_) - reloc_info_writer.pos();
1186  }
1187 
1188  // Avoid overflows for displacements etc.
1189  static const int kMaximalBufferSize = 512*MB;
1190 
1191  byte byte_at(int pos) { return buffer_[pos]; }
1192  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
1193 
1194  // Allocate a constant pool of the correct size for the generated code.
1195  MaybeObject* AllocateConstantPool(Heap* heap);
1196 
1197  // Generate the constant pool for the generated code.
1198  void PopulateConstantPool(ConstantPoolArray* constant_pool);
1199 
1200  protected:
1201  void emit_sse_operand(XMMRegister reg, const Operand& adr);
1202  void emit_sse_operand(XMMRegister dst, XMMRegister src);
1203  void emit_sse_operand(Register dst, XMMRegister src);
1204  void emit_sse_operand(XMMRegister dst, Register src);
1205 
1206  byte* addr_at(int pos) { return buffer_ + pos; }
1207 
1208 
1209  private:
1210  uint32_t long_at(int pos) {
1211  return *reinterpret_cast<uint32_t*>(addr_at(pos));
1212  }
1213  void long_at_put(int pos, uint32_t x) {
1214  *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
1215  }
1216 
1217  // code emission
1218  void GrowBuffer();
1219  inline void emit(uint32_t x);
1220  inline void emit(Handle<Object> handle);
1221  inline void emit(uint32_t x,
1222  RelocInfo::Mode rmode,
1223  TypeFeedbackId id = TypeFeedbackId::None());
1224  inline void emit(Handle<Code> code,
1225  RelocInfo::Mode rmode,
1226  TypeFeedbackId id = TypeFeedbackId::None());
1227  inline void emit(const Immediate& x);
1228  inline void emit_w(const Immediate& x);
1229 
1230  // Emit the code-object-relative offset of the label's position
1231  inline void emit_code_relative_offset(Label* label);
1232 
1233  // instruction generation
1234  void emit_arith_b(int op1, int op2, Register dst, int imm8);
1235 
1236  // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
1237  // with a given destination expression and an immediate operand. It attempts
1238  // to use the shortest encoding possible.
1239  // sel specifies the /n in the modrm byte (see the Intel PRM).
1240  void emit_arith(int sel, Operand dst, const Immediate& x);
1241 
1242  void emit_operand(Register reg, const Operand& adr);
1243 
1244  void emit_farith(int b1, int b2, int i);
1245 
1246  // labels
1247  void print(Label* L);
1248  void bind_to(Label* L, int pos);
1249 
1250  // displacements
1251  inline Displacement disp_at(Label* L);
1252  inline void disp_at_put(Label* L, Displacement disp);
1253  inline void emit_disp(Label* L, Displacement::Type type);
1254  inline void emit_near_disp(Label* L);
1255 
1256  // record reloc info for current pc_
1257  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1258 
1259  friend class CodePatcher;
1260  friend class EnsureSpace;
1261 
1262  // code generation
1263  RelocInfoWriter reloc_info_writer;
1264 
1265  PositionsRecorder positions_recorder_;
1266  friend class PositionsRecorder;
1267 };
1268 
1269 
1270 // Helper class that ensures that there is enough space for generating
1271 // instructions and relocation information. The constructor makes
1272 // sure that there is enough space and (in debug mode) the destructor
1273 // checks that we did not generate too much.
1274 class EnsureSpace BASE_EMBEDDED {
1275  public:
1276  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
1277  if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
1278 #ifdef DEBUG
1279  space_before_ = assembler_->available_space();
1280 #endif
1281  }
1282 
1283 #ifdef DEBUG
1284  ~EnsureSpace() {
1285  int bytes_generated = space_before_ - assembler_->available_space();
1286  ASSERT(bytes_generated < assembler_->kGap);
1287  }
1288 #endif
1289 
1290  private:
1291  Assembler* assembler_;
1292 #ifdef DEBUG
1293  int space_before_;
1294 #endif
1295 };
1296 
1297 } } // namespace v8::internal
1298 
1299 #endif // V8_IA32_ASSEMBLER_IA32_H_
byte * Address
Definition: globals.h:186
void cmp(Register src1, const Operand &src2, Condition cond=al)
void psllq(XMMRegister reg, int8_t shift)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static const int kMaximalBufferSize
static const int kNumAllocatableRegisters
Isolate * isolate() const
Definition: assembler.h:62
const int kRegister_ebp_Code
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void test(Register reg0, Register reg1)
void db(uint8_t data)
void fst_d(const Operand &adr)
static const byte kJccShortPrefix
void ucomisd(XMMRegister dst, XMMRegister src)
static Immediate CodeRelativeOffset(Label *label)
void pcmpeqd(XMMRegister dst, XMMRegister src)
static void set_target_address_at(Address pc, Code *code, Address target)
void cmpb(Register reg, int8_t imm8)
void cvttss2si(Register dst, const Operand &src)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void or_(Register dst, Register src)
static Address target_address_at(Address pc, Code *code)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
static const char * AllocationIndexToString(int index)
void por(XMMRegister dst, XMMRegister src)
static const int kMaxNumAllocatableRegisters
void idiv(Register src)
bool buffer_overflow() const
void add(Register dst, const Immediate &imm)
void mulsd(XMMRegister dst, XMMRegister src)
void addps(XMMRegister dst, const Operand &src)
bool is_byte_register() const
void cvtsd2si(Register dst, XMMRegister src)
static TypeFeedbackId None()
Definition: utils.h:1149
void or_(Register dst, const Immediate &imm)
void movdq(bool aligned, XMMRegister dst, const Operand &src)
const IntelDoubleRegister no_double_reg
void imul(Register dst, Register src)
const Register esp
void subps(XMMRegister dst, XMMRegister src)
void orpd(XMMRegister dst, XMMRegister src)
static const int kPatchDebugBreakSlotReturnOffset
static Operand StaticVariable(const ExternalReference &ext)
int SizeOfCodeGeneratedSince(Label *label)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
void push(Register src, Condition cond=al)
void movntdq(const Operand &dst, XMMRegister src)
void cvtsi2sd(XMMRegister dst, Register src)
void cvtss2sd(XMMRegister dst, XMMRegister src)
TypeImpl< ZoneTypeConfig > Type
void sqrtsd(XMMRegister dst, XMMRegister src)
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
void fst_s(const Operand &adr)
static bool IsSupported(CpuFeature f)
const int kRegister_ebx_Code
const IntelDoubleRegister double_register_1
static Operand StaticArray(Register index, ScaleFactor scale, const ExternalReference &arr)
static bool enabled()
Definition: serialize.h:485
void sbb(Register dst, const Operand &src)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
void mulps(XMMRegister dst, const Operand &src)
static X87Register FromAllocationIndex(int index)
void andpd(XMMRegister dst, XMMRegister src)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void sub(Register dst, Register src)
#define ASSERT(condition)
Definition: checks.h:329
void dec_b(Register dst)
void ptest(XMMRegister dst, XMMRegister src)
static const int kPatchReturnSequenceAddressOffset
static const byte kJmpShortOpcode
static const int kNumRegisters
static bool IsSafeForSnapshot(CpuFeature f)
static const char * AllocationIndexToString(int index)
void xorpd(XMMRegister dst, XMMRegister src)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void bt(const Operand &dst, Register src)
void sar(Register dst, uint8_t imm8)
static const byte kTestAlByte
const int kRegister_esp_Code
void fistp_s(const Operand &adr)
void movsx_b(Register dst, Register src)
void imul(Register src)
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4589
void pxor(XMMRegister dst, XMMRegister src)
void addsd(XMMRegister dst, XMMRegister src)
void movzx_b(Register dst, Register src)
void shr_cl(Register dst)
void fld_d(const Operand &adr)
static const int kNumRegisters
void cmpb_al(const Operand &op)
const Register edi
void xchg(Register dst, Register src)
void fild_s(const Operand &adr)
EnsureSpace(Assembler *assembler)
void movntdqa(XMMRegister dst, const Operand &src)
uint8_t byte
Definition: globals.h:185
void rcl(Register dst, uint8_t imm8)
void enter(const Immediate &size)
void ret(const Register &xn=lr)
Condition ReverseCondition(Condition cond)
static const byte kJcShortOpcode
void movd(Register dst, XMMRegister src)
void jmp(Register reg)
static const int kMaxNumAllocatableRegisters
const Register ebp
void shld(Register dst, Register src)
void neg(const Register &rd, const Operand &operand)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
static const int kNumAllocatableRegisters
void andps(XMMRegister dst, const Operand &src)
void fisttp_d(const Operand &adr)
bool is(X87Register reg) const
void bsr(Register dst, Register src)
static int ToAllocationIndex(X87Register reg)
void movss(XMMRegister dst, const Operand &src)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const Register eax
void set_byte_at(int pos, byte value)
void cvtsd2ss(XMMRegister dst, XMMRegister src)
void pinsrd(XMMRegister dst, Register src, int8_t offset)
void mov_b(Register dst, int8_t imm8)
static const int kSpecialTargetSize
const IntelDoubleRegister double_register_5
friend class ExternalReference
void movsd(XMMRegister dst, XMMRegister src)
void GetCode(CodeDesc *desc)
void movdqa(XMMRegister dst, const Operand &src)
const int kPointerSize
Definition: globals.h:268
static const int kJSReturnSequenceLength
static Operand ForCell(Handle< Cell > cell)
void movdqu(XMMRegister dst, const Operand &src)
static const byte kNopByte
static IntelDoubleRegister FromAllocationIndex(int index)
const Register ecx
static const byte kJzShortOpcode
void movmskpd(Register dst, XMMRegister src)
void fisttp_s(const Operand &adr)
const int kRegister_edx_Code
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static IntelDoubleRegister from_code(int code)
static Register FromAllocationIndex(int index)
void xor_(Register dst, const Immediate &imm)
void orps(XMMRegister dst, const Operand &src)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
void shl(Register dst, uint8_t imm8)
const Register pc
void cmpw_ax(const Operand &op)
void not_(Register dst)
void ror(const Register &rd, const Register &rs, unsigned shift)
static Register from_code(int code)
static int ToAllocationIndex(Register reg)
MaybeObject * AllocateConstantPool(Heap *heap)
static bool IsFoundByRuntimeProbingOnly(CpuFeature f)
void emit_sse_operand(XMMRegister reg, const Operand &adr)
void rcr(Register dst, uint8_t imm8)
void cvttss2si(Register dst, XMMRegister src)
void xorps(XMMRegister dst, XMMRegister src)
static const int kCallTargetAddressOffset
#define BASE_EMBEDDED
Definition: allocation.h:68
void mulps(XMMRegister dst, XMMRegister src)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const int kDebugBreakSlotLength
void Nop(int bytes=1)
void setcc(Condition cc, Register reg)
void fld_s(const Operand &adr)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
const IntelDoubleRegister double_register_2
const IntelDoubleRegister double_register_3
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void pand(XMMRegister dst, XMMRegister src)
void mov_w(Register dst, const Operand &src)
void fstp_d(const Operand &adr)
void orps(XMMRegister dst, XMMRegister src)
const int kRegister_eax_Code
static int NumAllocatableRegisters()
bool is_valid() const
void push_imm32(int32_t imm32)
void fistp_d(const Operand &adr)
bool is(Register reg) const
void pextrd(Register dst, XMMRegister src, int8_t offset)
static const int kCallInstructionLength
static const int kNumRegisters
friend class PlatformFeatureScope
void ror_cl(Register dst)
static const byte kJncShortOpcode
const Register ebx
void shrd(Register dst, Register src)
void movaps(XMMRegister dst, XMMRegister src)
void movmskps(Register dst, XMMRegister src)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
void psrlq(XMMRegister reg, int8_t shift)
void RecordComment(const char *msg)
int CallSize(const Operand &adr)
void sub(Register dst, const Immediate &imm)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
void mov_b(Register dst, Register src)
void fstp_s(const Operand &adr)
void divsd(XMMRegister dst, XMMRegister src)
const IntelDoubleRegister double_register_0
void lea(Register dst, const Operand &src)
static Address target_address_from_return_address(Address pc)
void fild_d(const Operand &adr)
void xor_(Register dst, int32_t imm32)
friend class PositionsRecorder
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void inc(Register dst)
static bool VerifyCrossCompiling()
void andps(XMMRegister dst, XMMRegister src)
Displacement(Label *L, Type type)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static int NumAllocatableRegisters()
void call(Register reg)
void cmpltsd(XMMRegister dst, XMMRegister src)
Condition NegateCondition(Condition cond)
void test(Register reg, const Immediate &imm)
const Register esi
void shr(Register dst, uint8_t imm8)
const int kRegister_ecx_Code
static XMMRegister from_code(int code)
void divps(XMMRegister dst, const Operand &src)
void add(Register dst, Register src)
const IntelDoubleRegister double_register_7
void sar_cl(Register dst)
void movd(XMMRegister dst, Register src)
void xorps(XMMRegister dst, const Operand &src)
void cmov(Condition cc, Register dst, Register src)
static int ToAllocationIndex(IntelDoubleRegister reg)
PositionsRecorder * positions_recorder()
void movss(XMMRegister dst, XMMRegister src)
void movsx_w(Register dst, Register src)
void subps(XMMRegister dst, const Operand &src)
static const char * AllocationIndexToString(int index)
void cmp(Register reg, const Immediate &imm)
void fisub_s(const Operand &adr)
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void cmp(Register reg0, Register reg1)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
void extractps(Register dst, XMMRegister src, byte imm8)
void shufps(XMMRegister dst, XMMRegister src, byte imm8)
static const byte kJnzShortOpcode
void divps(XMMRegister dst, XMMRegister src)
const Register no_reg
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fist_s(const Operand &adr)
bool is(XMMRegister reg) const
const int kRegister_esi_Code
const IntelDoubleRegister double_register_6
void adr(const Register &rd, Label *label)
const Register edx
signed short int16_t
Definition: unicode.cc:45
void bts(Register dst, Register src)
const int kRegister_edi_Code
void prefetch(const Operand &src, int level)
static bool VerifyCrossCompiling(CpuFeature f)
static const int kPatchDebugBreakSlotAddressOffset
void movzx_w(Register dst, Register src)
void dec(Register dst)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const char * AllocationIndexToString(int index)
void subsd(XMMRegister dst, XMMRegister src)
void addps(XMMRegister dst, XMMRegister src)
void cmpw(const Operand &op, Immediate imm16)
RelocInfo::Mode rmode() const
const int kRegister_no_reg_Code
void xor_(Register dst, Register src)
void next(Label *L) const
const IntelDoubleRegister double_register_4
void test_b(Register reg, const Operand &op)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void cvttsd2si(Register dst, const Operand &src)
void and_(Register dst, Register src)
static void deserialization_set_special_target_at(Address instruction_payload, Code *code, Address target)
void shl_cl(Register dst)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const int MB
Definition: globals.h:246