v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-ia32.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2011 the V8 project authors. All rights reserved.
34 
35 // A light-weight IA32 Assembler.
36 
37 #ifndef V8_IA32_ASSEMBLER_IA32_H_
38 #define V8_IA32_ASSEMBLER_IA32_H_
39 
40 #include "isolate.h"
41 #include "serialize.h"
42 
43 namespace v8 {
44 namespace internal {
45 
46 // CPU Registers.
47 //
48 // 1) We would prefer to use an enum, but enum values are assignment-
49 // compatible with int, which has caused code-generation bugs.
50 //
51 // 2) We would prefer to use a class instead of a struct but we don't like
52 // the register initialization to depend on the particular initialization
53 // order (which appears to be different on OS X, Linux, and Windows for the
54 // installed versions of C++ we tried). Using a struct permits C-style
55 // "initialization". Also, the Register objects cannot be const as this
56 // forces initialization stubs in MSVC, making us dependent on initialization
57 // order.
58 //
59 // 3) By not using an enum, we are possibly preventing the compiler from
60 // doing certain constant folds, which may significantly reduce the
61 // code generated for some assembly instructions (because they boil down
62 // to a few constants). If this is a problem, we could change the code
63 // such that we use an enum in optimized mode, and the struct in debug
64 // mode. This way we get the compile-time error checking in debug mode
65 // and best performance in optimized code.
66 //
67 struct Register {
68  static const int kNumAllocatableRegisters = 6;
69  static const int kNumRegisters = 8;
70 
71  static inline const char* AllocationIndexToString(int index);
72 
73  static inline int ToAllocationIndex(Register reg);
74 
75  static inline Register FromAllocationIndex(int index);
76 
77  static Register from_code(int code) {
78  ASSERT(code >= 0);
79  ASSERT(code < kNumRegisters);
80  Register r = { code };
81  return r;
82  }
83  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
84  bool is(Register reg) const { return code_ == reg.code_; }
85  // eax, ebx, ecx and edx are byte registers, the rest are not.
86  bool is_byte_register() const { return code_ <= 3; }
87  int code() const {
88  ASSERT(is_valid());
89  return code_;
90  }
91  int bit() const {
92  ASSERT(is_valid());
93  return 1 << code_;
94  }
95 
96  // Unfortunately we can't make this private in a struct.
97  int code_;
98 };
99 
100 const int kRegister_eax_Code = 0;
101 const int kRegister_ecx_Code = 1;
102 const int kRegister_edx_Code = 2;
103 const int kRegister_ebx_Code = 3;
104 const int kRegister_esp_Code = 4;
105 const int kRegister_ebp_Code = 5;
106 const int kRegister_esi_Code = 6;
107 const int kRegister_edi_Code = 7;
108 const int kRegister_no_reg_Code = -1;
109 
118 const Register no_reg = { kRegister_no_reg_Code };
119 
120 
121 inline const char* Register::AllocationIndexToString(int index) {
122  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
123  // This is the mapping of allocation indices to registers.
124  const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
125  return kNames[index];
126 }
127 
128 
129 inline int Register::ToAllocationIndex(Register reg) {
130  ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
131  return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
132 }
133 
134 
135 inline Register Register::FromAllocationIndex(int index) {
136  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
137  return (index >= 4) ? from_code(index + 2) : from_code(index);
138 }
139 
140 
141 struct XMMRegister {
142  static const int kNumAllocatableRegisters = 7;
143  static const int kNumRegisters = 8;
144 
145  static int ToAllocationIndex(XMMRegister reg) {
146  ASSERT(reg.code() != 0);
147  return reg.code() - 1;
148  }
149 
150  static XMMRegister FromAllocationIndex(int index) {
151  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
152  return from_code(index + 1);
153  }
154 
155  static const char* AllocationIndexToString(int index) {
156  ASSERT(index >= 0 && index < kNumAllocatableRegisters);
157  const char* const names[] = {
158  "xmm1",
159  "xmm2",
160  "xmm3",
161  "xmm4",
162  "xmm5",
163  "xmm6",
164  "xmm7"
165  };
166  return names[index];
167  }
168 
169  static XMMRegister from_code(int code) {
170  XMMRegister r = { code };
171  return r;
172  }
173 
174  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
175  bool is(XMMRegister reg) const { return code_ == reg.code_; }
176  int code() const {
177  ASSERT(is_valid());
178  return code_;
179  }
180 
181  int code_;
182 };
183 
184 
185 const XMMRegister xmm0 = { 0 };
186 const XMMRegister xmm1 = { 1 };
187 const XMMRegister xmm2 = { 2 };
188 const XMMRegister xmm3 = { 3 };
189 const XMMRegister xmm4 = { 4 };
190 const XMMRegister xmm5 = { 5 };
191 const XMMRegister xmm6 = { 6 };
192 const XMMRegister xmm7 = { 7 };
193 
194 
195 typedef XMMRegister DoubleRegister;
196 
197 
198 enum Condition {
199  // any value < 0 is considered no_condition
201 
202  overflow = 0,
204  below = 2,
206  equal = 4,
209  above = 7,
210  negative = 8,
211  positive = 9,
214  less = 12,
217  greater = 15,
218 
219  // aliases
226 };
227 
228 
229 // Returns the equivalent of !cc.
230 // Negation of the default no_condition (-1) results in a non-default
231 // no_condition value (-2). As long as tests for no_condition check
232 // for condition < 0, this will work as expected.
234  return static_cast<Condition>(cc ^ 1);
235 }
236 
237 
238 // Corresponds to transposing the operands of a comparison.
240  switch (cc) {
241  case below:
242  return above;
243  case above:
244  return below;
245  case above_equal:
246  return below_equal;
247  case below_equal:
248  return above_equal;
249  case less:
250  return greater;
251  case greater:
252  return less;
253  case greater_equal:
254  return less_equal;
255  case less_equal:
256  return greater_equal;
257  default:
258  return cc;
259  };
260 }
261 
262 
263 // -----------------------------------------------------------------------------
264 // Machine instruction Immediates
265 
266 class Immediate BASE_EMBEDDED {
267  public:
268  inline explicit Immediate(int x);
269  inline explicit Immediate(const ExternalReference& ext);
270  inline explicit Immediate(Handle<Object> handle);
271  inline explicit Immediate(Smi* value);
272  inline explicit Immediate(Address addr);
273 
274  static Immediate CodeRelativeOffset(Label* label) {
275  return Immediate(label);
276  }
277 
278  bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
279  bool is_int8() const {
280  return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
281  }
282  bool is_int16() const {
283  return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
284  }
285 
286  private:
287  inline explicit Immediate(Label* value);
288 
289  int x_;
290  RelocInfo::Mode rmode_;
291 
292  friend class Assembler;
293  friend class MacroAssembler;
294 };
295 
296 
297 // -----------------------------------------------------------------------------
298 // Machine instruction Operands
299 
301  times_1 = 0,
302  times_2 = 1,
303  times_4 = 2,
304  times_8 = 3,
309 };
310 
311 
312 class Operand BASE_EMBEDDED {
313  public:
314  // XMM reg
315  INLINE(explicit Operand(XMMRegister xmm_reg));
316 
317  // [disp/r]
318  INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
319  // disp only must always be relocated
320 
321  // [base + disp/r]
322  explicit Operand(Register base, int32_t disp,
323  RelocInfo::Mode rmode = RelocInfo::NONE);
324 
325  // [base + index*scale + disp/r]
326  explicit Operand(Register base,
327  Register index,
328  ScaleFactor scale,
329  int32_t disp,
330  RelocInfo::Mode rmode = RelocInfo::NONE);
331 
332  // [index*scale + disp/r]
333  explicit Operand(Register index,
334  ScaleFactor scale,
335  int32_t disp,
336  RelocInfo::Mode rmode = RelocInfo::NONE);
337 
338  static Operand StaticVariable(const ExternalReference& ext) {
339  return Operand(reinterpret_cast<int32_t>(ext.address()),
340  RelocInfo::EXTERNAL_REFERENCE);
341  }
342 
343  static Operand StaticArray(Register index,
344  ScaleFactor scale,
345  const ExternalReference& arr) {
346  return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
347  RelocInfo::EXTERNAL_REFERENCE);
348  }
349 
350  static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
351  return Operand(reinterpret_cast<int32_t>(cell.location()),
352  RelocInfo::GLOBAL_PROPERTY_CELL);
353  }
354 
355  // Returns true if this Operand is a wrapper for the specified register.
356  bool is_reg(Register reg) const;
357 
358  // Returns true if this Operand is a wrapper for one register.
359  bool is_reg_only() const;
360 
361  // Asserts that this Operand is a wrapper for one register and returns the
362  // register.
363  Register reg() const;
364 
365  private:
366  // reg
367  INLINE(explicit Operand(Register reg));
368 
369  // Set the ModRM byte without an encoded 'reg' register. The
370  // register is encoded later as part of the emit_operand operation.
371  inline void set_modrm(int mod, Register rm);
372 
373  inline void set_sib(ScaleFactor scale, Register index, Register base);
374  inline void set_disp8(int8_t disp);
375  inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
376 
377  byte buf_[6];
378  // The number of bytes in buf_.
379  unsigned int len_;
380  // Only valid if len_ > 4.
381  RelocInfo::Mode rmode_;
382 
383  friend class Assembler;
384  friend class MacroAssembler;
385  friend class LCodeGen;
386 };
387 
388 
389 // -----------------------------------------------------------------------------
390 // A Displacement describes the 32bit immediate field of an instruction which
391 // may be used together with a Label in order to refer to a yet unknown code
392 // position. Displacements stored in the instruction stream are used to describe
393 // the instruction and to chain a list of instructions using the same Label.
394 // A Displacement contains 2 different fields:
395 //
396 // next field: position of next displacement in the chain (0 = end of list)
397 // type field: instruction type
398 //
399 // A next value of null (0) indicates the end of a chain (note that there can
400 // be no displacement at position zero, because there is always at least one
401 // instruction byte before the displacement).
402 //
403 // Displacement _data field layout
404 //
405 // |31.....2|1......0|
406 // [ next | type |
407 
408 class Displacement BASE_EMBEDDED {
409  public:
410  enum Type {
413  OTHER
414  };
415 
416  int data() const { return data_; }
417  Type type() const { return TypeField::decode(data_); }
418  void next(Label* L) const {
419  int n = NextField::decode(data_);
420  n > 0 ? L->link_to(n) : L->Unuse();
421  }
422  void link_to(Label* L) { init(L, type()); }
423 
424  explicit Displacement(int data) { data_ = data; }
425 
426  Displacement(Label* L, Type type) { init(L, type); }
427 
428  void print() {
429  PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
430  NextField::decode(data_));
431  }
432 
433  private:
434  int data_;
435 
436  class TypeField: public BitField<Type, 0, 2> {};
437  class NextField: public BitField<int, 2, 32-2> {};
438 
439  void init(Label* L, Type type);
440 };
441 
442 
443 
444 // CpuFeatures keeps track of which features are supported by the target CPU.
445 // Supported features must be enabled by a Scope before use.
446 // Example:
447 // if (CpuFeatures::IsSupported(SSE2)) {
448 // CpuFeatures::Scope fscope(SSE2);
449 // // Generate SSE2 floating point code.
450 // } else {
451 // // Generate standard x87 floating point code.
452 // }
453 class CpuFeatures : public AllStatic {
454  public:
455  // Detect features of the target CPU. Set safe defaults if the serializer
456  // is enabled (snapshots must be portable).
457  static void Probe();
458 
459  // Check whether a feature is supported by the target CPU.
460  static bool IsSupported(CpuFeature f) {
461  ASSERT(initialized_);
462  if (f == SSE2 && !FLAG_enable_sse2) return false;
463  if (f == SSE3 && !FLAG_enable_sse3) return false;
464  if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
465  if (f == CMOV && !FLAG_enable_cmov) return false;
466  if (f == RDTSC && !FLAG_enable_rdtsc) return false;
467  return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
468  }
469 
470 #ifdef DEBUG
471  // Check whether a feature is currently enabled.
472  static bool IsEnabled(CpuFeature f) {
473  ASSERT(initialized_);
474  Isolate* isolate = Isolate::UncheckedCurrent();
475  if (isolate == NULL) {
476  // When no isolate is available, work as if we're running in
477  // release mode.
478  return IsSupported(f);
479  }
480  uint64_t enabled = isolate->enabled_cpu_features();
481  return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
482  }
483 #endif
484 
485  // Enable a specified feature within a scope.
486  class Scope BASE_EMBEDDED {
487 #ifdef DEBUG
488 
489  public:
490  explicit Scope(CpuFeature f) {
491  uint64_t mask = static_cast<uint64_t>(1) << f;
494  (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
495  isolate_ = Isolate::UncheckedCurrent();
496  old_enabled_ = 0;
497  if (isolate_ != NULL) {
498  old_enabled_ = isolate_->enabled_cpu_features();
499  isolate_->set_enabled_cpu_features(old_enabled_ | mask);
500  }
501  }
502  ~Scope() {
503  ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
504  if (isolate_ != NULL) {
505  isolate_->set_enabled_cpu_features(old_enabled_);
506  }
507  }
508 
509  private:
510  Isolate* isolate_;
511  uint64_t old_enabled_;
512 #else
513 
514  public:
515  explicit Scope(CpuFeature f) {}
516 #endif
517  };
518 
519  class TryForceFeatureScope BASE_EMBEDDED {
520  public:
522  : old_supported_(CpuFeatures::supported_) {
523  if (CanForce()) {
524  CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
525  }
526  }
527 
529  if (CanForce()) {
530  CpuFeatures::supported_ = old_supported_;
531  }
532  }
533 
534  private:
535  static bool CanForce() {
536  // It's only safe to temporarily force support of CPU features
537  // when there's only a single isolate, which is guaranteed when
538  // the serializer is enabled.
539  return Serializer::enabled();
540  }
541 
542  const uint64_t old_supported_;
543  };
544 
545  private:
546 #ifdef DEBUG
547  static bool initialized_;
548 #endif
549  static uint64_t supported_;
550  static uint64_t found_by_runtime_probing_;
551 
552  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
553 };
554 
555 
556 class Assembler : public AssemblerBase {
557  private:
558  // We check before assembling an instruction that there is sufficient
559  // space to write an instruction and its relocation information.
560  // The relocation writer's position must be kGap bytes above the end of
561  // the generated instructions. This leaves enough space for the
562  // longest possible ia32 instruction, 15 bytes, and the longest possible
563  // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
564  // (There is a 15 byte limit on ia32 instruction length that rules out some
565  // otherwise valid instructions.)
566  // This allows for a single, fast space check per instruction.
567  static const int kGap = 32;
568 
569  public:
570  // Create an assembler. Instructions and relocation information are emitted
571  // into a buffer, with the instructions starting from the beginning and the
572  // relocation information starting from the end of the buffer. See CodeDesc
573  // for a detailed comment on the layout (globals.h).
574  //
575  // If the provided buffer is NULL, the assembler allocates and grows its own
576  // buffer, and buffer_size determines the initial buffer size. The buffer is
577  // owned by the assembler and deallocated upon destruction of the assembler.
578  //
579  // If the provided buffer is not NULL, the assembler uses the provided buffer
580  // for code generation and assumes its size to be buffer_size. If the buffer
581  // is too small, a fatal error occurs. No deallocation of the buffer is done
582  // upon destruction of the assembler.
583  // TODO(vitalyr): the assembler does not need an isolate.
584  Assembler(Isolate* isolate, void* buffer, int buffer_size);
585  ~Assembler();
586 
587  // Overrides the default provided by FLAG_debug_code.
588  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
589 
590  // Avoids using instructions that vary in size in unpredictable ways between
591  // the snapshot and the running VM. This is needed by the full compiler so
592  // that it can recompile code with debug support and fix the PC.
593  void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
594 
595  // GetCode emits any pending (non-emitted) code and fills the descriptor
596  // desc. GetCode() is idempotent; it returns the same result if no other
597  // Assembler functions are invoked in between GetCode() calls.
598  void GetCode(CodeDesc* desc);
599 
600  // Read/Modify the code target in the branch/call instruction at pc.
601  inline static Address target_address_at(Address pc);
602  inline static void set_target_address_at(Address pc, Address target);
603 
604  // Return the code target address at a call site from the return address
605  // of that call in the instruction stream.
607 
608  // This sets the branch destination (which is in the instruction on x86).
609  // This is for calls and branches within generated code.
611  Address instruction_payload, Address target) {
612  set_target_address_at(instruction_payload, target);
613  }
614 
615  // This sets the branch destination (which is in the instruction on x86).
616  // This is for calls and branches to runtime code.
617  inline static void set_external_target_at(Address instruction_payload,
618  Address target) {
619  set_target_address_at(instruction_payload, target);
620  }
621 
622  static const int kSpecialTargetSize = kPointerSize;
623 
624  // Distance between the address of the code target in the call instruction
625  // and the return address
627  // Distance between start of patched return sequence and the emitted address
628  // to jump to.
629  static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
630 
631  // Distance between start of patched debug break slot and the emitted address
632  // to jump to.
633  static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
634 
635  static const int kCallInstructionLength = 5;
637  static const int kJSReturnSequenceLength = 6;
638 
639  // The debug break slot must be able to contain a call instruction.
641 
642  // One byte opcode for test al, 0xXX.
643  static const byte kTestAlByte = 0xA8;
644  // One byte opcode for nop.
645  static const byte kNopByte = 0x90;
646 
647  // One byte opcode for a short unconditional jump.
648  static const byte kJmpShortOpcode = 0xEB;
649  // One byte prefix for a short conditional jump.
650  static const byte kJccShortPrefix = 0x70;
655 
656 
657  // ---------------------------------------------------------------------------
658  // Code generation
659  //
660  // - function names correspond one-to-one to ia32 instruction mnemonics
661  // - unless specified otherwise, instructions operate on 32bit operands
662  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
663  // - instructions on 16bit (word) operands/registers have a trailing '_w'
664  // - naming conflicts with C++ keywords are resolved via a trailing '_'
665 
666  // NOTE ON INTERFACE: Currently, the interface is not very consistent
667  // in the sense that some operations (e.g. mov()) can be called in more
668  // the one way to generate the same instruction: The Register argument
669  // can in some cases be replaced with an Operand(Register) argument.
670  // This should be cleaned up and made more orthogonal. The questions
671  // is: should we always use Operands instead of Registers where an
672  // Operand is possible, or should we have a Register (overloaded) form
673  // instead? We must be careful to make sure that the selected instruction
674  // is obvious from the parameters to avoid hard-to-find code generation
675  // bugs.
676 
677  // Insert the smallest number of nop instructions
678  // possible to align the pc offset to a multiple
679  // of m. m must be a power of 2.
680  void Align(int m);
681  void Nop(int bytes = 1);
682  // Aligns code to something that's optimal for a jump target for the platform.
683  void CodeTargetAlign();
684 
685  // Stack
686  void pushad();
687  void popad();
688 
689  void pushfd();
690  void popfd();
691 
692  void push(const Immediate& x);
693  void push_imm32(int32_t imm32);
694  void push(Register src);
695  void push(const Operand& src);
696 
697  void pop(Register dst);
698  void pop(const Operand& dst);
699 
700  void enter(const Immediate& size);
701  void leave();
702 
703  // Moves
704  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
705  void mov_b(Register dst, const Operand& src);
706  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
707  void mov_b(const Operand& dst, int8_t imm8);
708  void mov_b(const Operand& dst, Register src);
709 
710  void mov_w(Register dst, const Operand& src);
711  void mov_w(const Operand& dst, Register src);
712 
713  void mov(Register dst, int32_t imm32);
714  void mov(Register dst, const Immediate& x);
715  void mov(Register dst, Handle<Object> handle);
716  void mov(Register dst, const Operand& src);
717  void mov(Register dst, Register src);
718  void mov(const Operand& dst, const Immediate& x);
719  void mov(const Operand& dst, Handle<Object> handle);
720  void mov(const Operand& dst, Register src);
721 
722  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
723  void movsx_b(Register dst, const Operand& src);
724 
725  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
726  void movsx_w(Register dst, const Operand& src);
727 
728  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
729  void movzx_b(Register dst, const Operand& src);
730 
731  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
732  void movzx_w(Register dst, const Operand& src);
733 
734  // Conditional moves
735  void cmov(Condition cc, Register dst, Register src) {
736  cmov(cc, dst, Operand(src));
737  }
738  void cmov(Condition cc, Register dst, const Operand& src);
739 
740  // Flag management.
741  void cld();
742 
743  // Repetitive string instructions.
744  void rep_movs();
745  void rep_stos();
746  void stos();
747 
748  // Exchange two registers
749  void xchg(Register dst, Register src);
750 
751  // Arithmetics
752  void adc(Register dst, int32_t imm32);
753  void adc(Register dst, const Operand& src);
754 
755  void add(Register dst, Register src) { add(dst, Operand(src)); }
756  void add(Register dst, const Operand& src);
757  void add(const Operand& dst, Register src);
758  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
759  void add(const Operand& dst, const Immediate& x);
760 
761  void and_(Register dst, int32_t imm32);
762  void and_(Register dst, const Immediate& x);
763  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
764  void and_(Register dst, const Operand& src);
765  void and_(const Operand& dst, Register src);
766  void and_(const Operand& dst, const Immediate& x);
767 
768  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
769  void cmpb(const Operand& op, int8_t imm8);
770  void cmpb(Register reg, const Operand& op);
771  void cmpb(const Operand& op, Register reg);
772  void cmpb_al(const Operand& op);
773  void cmpw_ax(const Operand& op);
774  void cmpw(const Operand& op, Immediate imm16);
775  void cmp(Register reg, int32_t imm32);
776  void cmp(Register reg, Handle<Object> handle);
777  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
778  void cmp(Register reg, const Operand& op);
779  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
780  void cmp(const Operand& op, const Immediate& imm);
781  void cmp(const Operand& op, Handle<Object> handle);
782 
783  void dec_b(Register dst);
784  void dec_b(const Operand& dst);
785 
786  void dec(Register dst);
787  void dec(const Operand& dst);
788 
789  void cdq();
790 
791  void idiv(Register src);
792 
793  // Signed multiply instructions.
794  void imul(Register src); // edx:eax = eax * src.
795  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
796  void imul(Register dst, const Operand& src); // dst = dst * src.
797  void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
798 
799  void inc(Register dst);
800  void inc(const Operand& dst);
801 
802  void lea(Register dst, const Operand& src);
803 
804  // Unsigned multiply instruction.
805  void mul(Register src); // edx:eax = eax * reg.
806 
807  void neg(Register dst);
808 
809  void not_(Register dst);
810 
811  void or_(Register dst, int32_t imm32);
812  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
813  void or_(Register dst, const Operand& src);
814  void or_(const Operand& dst, Register src);
815  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
816  void or_(const Operand& dst, const Immediate& x);
817 
818  void rcl(Register dst, uint8_t imm8);
819  void rcr(Register dst, uint8_t imm8);
820 
821  void sar(Register dst, uint8_t imm8);
822  void sar_cl(Register dst);
823 
824  void sbb(Register dst, const Operand& src);
825 
826  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
827  void shld(Register dst, const Operand& src);
828 
829  void shl(Register dst, uint8_t imm8);
830  void shl_cl(Register dst);
831 
832  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
833  void shrd(Register dst, const Operand& src);
834 
835  void shr(Register dst, uint8_t imm8);
836  void shr_cl(Register dst);
837 
838  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
839  void sub(const Operand& dst, const Immediate& x);
840  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
841  void sub(Register dst, const Operand& src);
842  void sub(const Operand& dst, Register src);
843 
844  void test(Register reg, const Immediate& imm);
845  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
846  void test(Register reg, const Operand& op);
847  void test_b(Register reg, const Operand& op);
848  void test(const Operand& op, const Immediate& imm);
849  void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
850  void test_b(const Operand& op, uint8_t imm8);
851 
852  void xor_(Register dst, int32_t imm32);
853  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
854  void xor_(Register dst, const Operand& src);
855  void xor_(const Operand& dst, Register src);
856  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
857  void xor_(const Operand& dst, const Immediate& x);
858 
859  // Bit operations.
860  void bt(const Operand& dst, Register src);
861  void bts(Register dst, Register src) { bts(Operand(dst), src); }
862  void bts(const Operand& dst, Register src);
863 
864  // Miscellaneous
865  void hlt();
866  void int3();
867  void nop();
868  void rdtsc();
869  void ret(int imm16);
870 
871  // Label operations & relative jumps (PPUM Appendix D)
872  //
873  // Takes a branch opcode (cc) and a label (L) and generates
874  // either a backward branch or a forward branch and links it
875  // to the label fixup chain. Usage:
876  //
877  // Label L; // unbound label
878  // j(cc, &L); // forward branch to unbound label
879  // bind(&L); // bind label to the current pc
880  // j(cc, &L); // backward branch to bound label
881  // bind(&L); // illegal: a label may be bound only once
882  //
883  // Note: The same Label can be used for forward and backward branches
884  // but it may be bound only once.
885 
886  void bind(Label* L); // binds an unbound label L to the current code position
887 
888  // Calls
889  void call(Label* L);
890  void call(byte* entry, RelocInfo::Mode rmode);
891  int CallSize(const Operand& adr);
892  void call(Register reg) { call(Operand(reg)); }
893  void call(const Operand& adr);
894  int CallSize(Handle<Code> code, RelocInfo::Mode mode);
895  void call(Handle<Code> code,
896  RelocInfo::Mode rmode,
898 
899  // Jumps
900  // unconditional jump to L
901  void jmp(Label* L, Label::Distance distance = Label::kFar);
902  void jmp(byte* entry, RelocInfo::Mode rmode);
903  void jmp(Register reg) { jmp(Operand(reg)); }
904  void jmp(const Operand& adr);
905  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
906 
907  // Conditional jumps
908  void j(Condition cc,
909  Label* L,
910  Label::Distance distance = Label::kFar);
911  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
912  void j(Condition cc, Handle<Code> code);
913 
914  // Floating-point operations
915  void fld(int i);
916  void fstp(int i);
917 
918  void fld1();
919  void fldz();
920  void fldpi();
921  void fldln2();
922 
923  void fld_s(const Operand& adr);
924  void fld_d(const Operand& adr);
925 
926  void fstp_s(const Operand& adr);
927  void fstp_d(const Operand& adr);
928  void fst_d(const Operand& adr);
929 
930  void fild_s(const Operand& adr);
931  void fild_d(const Operand& adr);
932 
933  void fist_s(const Operand& adr);
934 
935  void fistp_s(const Operand& adr);
936  void fistp_d(const Operand& adr);
937 
938  // The fisttp instructions require SSE3.
939  void fisttp_s(const Operand& adr);
940  void fisttp_d(const Operand& adr);
941 
942  void fabs();
943  void fchs();
944  void fcos();
945  void fsin();
946  void fptan();
947  void fyl2x();
948  void f2xm1();
949  void fscale();
950  void fninit();
951 
952  void fadd(int i);
953  void fsub(int i);
954  void fmul(int i);
955  void fdiv(int i);
956 
957  void fisub_s(const Operand& adr);
958 
959  void faddp(int i = 1);
960  void fsubp(int i = 1);
961  void fsubrp(int i = 1);
962  void fmulp(int i = 1);
963  void fdivp(int i = 1);
964  void fprem();
965  void fprem1();
966 
967  void fxch(int i = 1);
968  void fincstp();
969  void ffree(int i = 0);
970 
971  void ftst();
972  void fucomp(int i);
973  void fucompp();
974  void fucomi(int i);
975  void fucomip();
976  void fcompp();
977  void fnstsw_ax();
978  void fwait();
979  void fnclex();
980 
981  void frndint();
982 
983  void sahf();
984  void setcc(Condition cc, Register reg);
985 
986  void cpuid();
987 
988  // SSE2 instructions
989  void cvttss2si(Register dst, const Operand& src);
990  void cvttsd2si(Register dst, const Operand& src);
991  void cvtsd2si(Register dst, XMMRegister src);
992 
993  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
994  void cvtsi2sd(XMMRegister dst, const Operand& src);
995  void cvtss2sd(XMMRegister dst, XMMRegister src);
996  void cvtsd2ss(XMMRegister dst, XMMRegister src);
997 
998  void addsd(XMMRegister dst, XMMRegister src);
999  void subsd(XMMRegister dst, XMMRegister src);
1000  void mulsd(XMMRegister dst, XMMRegister src);
1001  void divsd(XMMRegister dst, XMMRegister src);
1002  void xorpd(XMMRegister dst, XMMRegister src);
1003  void xorps(XMMRegister dst, XMMRegister src);
1004  void sqrtsd(XMMRegister dst, XMMRegister src);
1005 
1006  void andpd(XMMRegister dst, XMMRegister src);
1007  void orpd(XMMRegister dst, XMMRegister src);
1008 
1009  void ucomisd(XMMRegister dst, XMMRegister src);
1010  void ucomisd(XMMRegister dst, const Operand& src);
1011 
1014  kRoundDown = 0x1,
1015  kRoundUp = 0x2,
1017  };
1018 
1019  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1020 
1021  void movmskpd(Register dst, XMMRegister src);
1022 
1023  void cmpltsd(XMMRegister dst, XMMRegister src);
1024  void pcmpeqd(XMMRegister dst, XMMRegister src);
1025 
1026  void movaps(XMMRegister dst, XMMRegister src);
1027 
1028  void movdqa(XMMRegister dst, const Operand& src);
1029  void movdqa(const Operand& dst, XMMRegister src);
1030  void movdqu(XMMRegister dst, const Operand& src);
1031  void movdqu(const Operand& dst, XMMRegister src);
1032 
1033  // Use either movsd or movlpd.
1034  void movdbl(XMMRegister dst, const Operand& src);
1035  void movdbl(const Operand& dst, XMMRegister src);
1036 
1037  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
1038  void movd(XMMRegister dst, const Operand& src);
1039  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
1040  void movd(const Operand& dst, XMMRegister src);
1041  void movsd(XMMRegister dst, XMMRegister src);
1042 
1043  void movss(XMMRegister dst, const Operand& src);
1044  void movss(const Operand& dst, XMMRegister src);
1045  void movss(XMMRegister dst, XMMRegister src);
1046  void extractps(Register dst, XMMRegister src, byte imm8);
1047 
1048  void pand(XMMRegister dst, XMMRegister src);
1049  void pxor(XMMRegister dst, XMMRegister src);
1050  void por(XMMRegister dst, XMMRegister src);
1051  void ptest(XMMRegister dst, XMMRegister src);
1052 
1053  void psllq(XMMRegister reg, int8_t shift);
1054  void psllq(XMMRegister dst, XMMRegister src);
1055  void psrlq(XMMRegister reg, int8_t shift);
1056  void psrlq(XMMRegister dst, XMMRegister src);
1057  void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
1058  void pextrd(Register dst, XMMRegister src, int8_t offset) {
1059  pextrd(Operand(dst), src, offset);
1060  }
1061  void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
1062  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
1063  pinsrd(dst, Operand(src), offset);
1064  }
1065  void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
1066 
1067  // Parallel XMM operations.
1068  void movntdqa(XMMRegister dst, const Operand& src);
1069  void movntdq(const Operand& dst, XMMRegister src);
1070  // Prefetch src position into cache level.
1071  // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
1072  // non-temporal
1073  void prefetch(const Operand& src, int level);
1074  // TODO(lrn): Need SFENCE for movnt?
1075 
1076  // Debugging
1077  void Print();
1078 
1079  // Check the code size generated from label to here.
1080  int SizeOfCodeGeneratedSince(Label* label) {
1081  return pc_offset() - label->pos();
1082  }
1083 
1084  // Mark address of the ExitJSFrame code.
1085  void RecordJSReturn();
1086 
1087  // Mark address of a debug break slot.
1088  void RecordDebugBreakSlot();
1089 
1090  // Record a comment relocation entry that can be used by a disassembler.
1091  // Use --code-comments to enable, or provide "force = true" flag to always
1092  // write a comment.
1093  void RecordComment(const char* msg, bool force = false);
1094 
1095  // Writes a single byte or word of data in the code stream. Used for
1096  // inline tables, e.g., jump-tables.
1097  void db(uint8_t data);
1098  void dd(uint32_t data);
1099 
1100  int pc_offset() const { return pc_ - buffer_; }
1101 
1102  // Check if there is less than kGap bytes available in the buffer.
1103  // If this is the case, we need to grow the buffer before emitting
1104  // an instruction or relocation information.
1105  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
1106 
1107  // Get the number of bytes available in the buffer.
1108  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
1109 
1110  static bool IsNop(Address addr);
1111 
1112  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1113 
1115  return (buffer_ + buffer_size_) - reloc_info_writer.pos();
1116  }
1117 
1118  // Avoid overflows for displacements etc.
1119  static const int kMaximalBufferSize = 512*MB;
1120  static const int kMinimalBufferSize = 4*KB;
1121 
1122  byte byte_at(int pos) { return buffer_[pos]; }
1123  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
1124 
1125  protected:
1126  bool emit_debug_code() const { return emit_debug_code_; }
1127  bool predictable_code_size() const { return predictable_code_size_ ; }
1128 
1129  void movsd(XMMRegister dst, const Operand& src);
1130  void movsd(const Operand& dst, XMMRegister src);
1131 
1132  void emit_sse_operand(XMMRegister reg, const Operand& adr);
1133  void emit_sse_operand(XMMRegister dst, XMMRegister src);
1134  void emit_sse_operand(Register dst, XMMRegister src);
1135 
1136  byte* addr_at(int pos) { return buffer_ + pos; }
1137 
1138 
1139  private:
1140  uint32_t long_at(int pos) {
1141  return *reinterpret_cast<uint32_t*>(addr_at(pos));
1142  }
1143  void long_at_put(int pos, uint32_t x) {
1144  *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
1145  }
1146 
1147  // code emission
1148  void GrowBuffer();
1149  inline void emit(uint32_t x);
1150  inline void emit(Handle<Object> handle);
1151  inline void emit(uint32_t x,
1152  RelocInfo::Mode rmode,
1153  TypeFeedbackId id = TypeFeedbackId::None());
1154  inline void emit(const Immediate& x);
1155  inline void emit_w(const Immediate& x);
1156 
1157  // Emit the code-object-relative offset of the label's position
1158  inline void emit_code_relative_offset(Label* label);
1159 
1160  // instruction generation
1161  void emit_arith_b(int op1, int op2, Register dst, int imm8);
1162 
1163  // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
1164  // with a given destination expression and an immediate operand. It attempts
1165  // to use the shortest encoding possible.
1166  // sel specifies the /n in the modrm byte (see the Intel PRM).
1167  void emit_arith(int sel, Operand dst, const Immediate& x);
1168 
1169  void emit_operand(Register reg, const Operand& adr);
1170 
1171  void emit_farith(int b1, int b2, int i);
1172 
1173  // labels
1174  void print(Label* L);
1175  void bind_to(Label* L, int pos);
1176 
1177  // displacements
1178  inline Displacement disp_at(Label* L);
1179  inline void disp_at_put(Label* L, Displacement disp);
1180  inline void emit_disp(Label* L, Displacement::Type type);
1181  inline void emit_near_disp(Label* L);
1182 
1183  // record reloc info for current pc_
1184  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1185 
1186  friend class CodePatcher;
1187  friend class EnsureSpace;
1188 
1189  // Code buffer:
1190  // The buffer into which code and relocation info are generated.
1191  byte* buffer_;
1192  int buffer_size_;
1193  // True if the assembler owns the buffer, false if buffer is external.
1194  bool own_buffer_;
1195 
1196  // code generation
1197  byte* pc_; // the program counter; moves forward
1198  RelocInfoWriter reloc_info_writer;
1199 
1200  PositionsRecorder positions_recorder_;
1201 
1202  bool emit_debug_code_;
1203  bool predictable_code_size_;
1204 
1205  friend class PositionsRecorder;
1206 };
1207 
1208 
1209 // Helper class that ensures that there is enough space for generating
1210 // instructions and relocation information. The constructor makes
1211 // sure that there is enough space and (in debug mode) the destructor
1212 // checks that we did not generate too much.
1213 class EnsureSpace BASE_EMBEDDED {
1214  public:
1215  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
1216  if (assembler_->overflow()) assembler_->GrowBuffer();
1217 #ifdef DEBUG
1218  space_before_ = assembler_->available_space();
1219 #endif
1220  }
1221 
1222 #ifdef DEBUG
1223  ~EnsureSpace() {
1224  int bytes_generated = space_before_ - assembler_->available_space();
1225  ASSERT(bytes_generated < assembler_->kGap);
1226  }
1227 #endif
1228 
1229  private:
1230  Assembler* assembler_;
1231 #ifdef DEBUG
1232  int space_before_;
1233 #endif
1234 };
1235 
1236 } } // namespace v8::internal
1237 
1238 #endif // V8_IA32_ASSEMBLER_IA32_H_
byte * Address
Definition: globals.h:157
void cmp(Register src1, const Operand &src2, Condition cond=al)
void psllq(XMMRegister reg, int8_t shift)
static const int kMaximalBufferSize
const int kRegister_ebp_Code
void test(Register reg0, Register reg1)
static Operand Cell(Handle< JSGlobalPropertyCell > cell)
void db(uint8_t data)
void fst_d(const Operand &adr)
static const byte kJccShortPrefix
void ucomisd(XMMRegister dst, XMMRegister src)
static Immediate CodeRelativeOffset(Label *label)
void pcmpeqd(XMMRegister dst, XMMRegister src)
void cmpb(Register reg, int8_t imm8)
void cvttss2si(Register dst, const Operand &src)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
void or_(Register dst, Register src)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
void por(XMMRegister dst, XMMRegister src)
const XMMRegister xmm4
void idiv(Register src)
void add(Register dst, const Immediate &imm)
void mulsd(XMMRegister dst, XMMRegister src)
bool is_byte_register() const
const int KB
Definition: globals.h:207
void cvtsd2si(Register dst, XMMRegister src)
static TypeFeedbackId None()
Definition: utils.h:999
void or_(Register dst, const Immediate &imm)
void imul(Register dst, Register src)
const Register esp
void orpd(XMMRegister dst, XMMRegister src)
static const int kPatchDebugBreakSlotReturnOffset
static Operand StaticVariable(const ExternalReference &ext)
int SizeOfCodeGeneratedSince(Label *label)
void movdbl(XMMRegister dst, const Operand &src)
void or_(Register dst, int32_t imm32)
void dd(uint32_t data)
void push(Register src, Condition cond=al)
void movntdq(const Operand &dst, XMMRegister src)
void cvtsi2sd(XMMRegister dst, Register src)
void cvtss2sd(XMMRegister dst, XMMRegister src)
void sqrtsd(XMMRegister dst, XMMRegister src)
int int32_t
Definition: unicode.cc:47
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
const int kRegister_ebx_Code
static Operand StaticArray(Register index, ScaleFactor scale, const ExternalReference &arr)
static bool enabled()
Definition: serialize.h:481
void sbb(Register dst, const Operand &src)
void andpd(XMMRegister dst, XMMRegister src)
static const int kCallInstructionLength
bool predictable_code_size() const
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
void sub(Register dst, Register src)
static const int kMinimalBufferSize
#define ASSERT(condition)
Definition: checks.h:270
void dec_b(Register dst)
void ptest(XMMRegister dst, XMMRegister src)
static const int kPatchReturnSequenceAddressOffset
static const byte kJmpShortOpcode
void test_b(Register reg, uint8_t imm8)
void set_predictable_code_size(bool value)
static const char * AllocationIndexToString(int index)
const XMMRegister xmm6
void xorpd(XMMRegister dst, XMMRegister src)
StringInputBuffer *const buffer_
void ret(int imm16)
void bt(const Operand &dst, Register src)
void sar(Register dst, uint8_t imm8)
static const byte kTestAlByte
const int kRegister_esp_Code
const XMMRegister xmm5
void fistp_s(const Operand &adr)
void movsx_b(Register dst, Register src)
void imul(Register src)
void pxor(XMMRegister dst, XMMRegister src)
void addsd(XMMRegister dst, XMMRegister src)
void movzx_b(Register dst, Register src)
void shr_cl(Register dst)
void fld_d(const Operand &adr)
static const int kNumRegisters
Definition: assembler-arm.h:73
void cmpb_al(const Operand &op)
const Register edi
void xchg(Register dst, Register src)
void fild_s(const Operand &adr)
EnsureSpace(Assembler *assembler)
void movntdqa(XMMRegister dst, const Operand &src)
uint8_t byte
Definition: globals.h:156
void rcl(Register dst, uint8_t imm8)
void enter(const Immediate &size)
Condition ReverseCondition(Condition cond)
static const byte kJcShortOpcode
T ** location() const
Definition: handles.h:75
void movd(Register dst, XMMRegister src)
void jmp(Register reg)
const Register ebp
void shld(Register dst, Register src)
DwVfpRegister DoubleRegister
void fisttp_d(const Operand &adr)
void movss(XMMRegister dst, const Operand &src)
const Register eax
void set_byte_at(int pos, byte value)
void cvtsd2ss(XMMRegister dst, XMMRegister src)
void pinsrd(XMMRegister dst, Register src, int8_t offset)
void mov_b(Register dst, int8_t imm8)
static const int kSpecialTargetSize
const XMMRegister xmm1
void movsd(XMMRegister dst, XMMRegister src)
void GetCode(CodeDesc *desc)
void movdqa(XMMRegister dst, const Operand &src)
const int kPointerSize
Definition: globals.h:220
static const int kJSReturnSequenceLength
void movdqu(XMMRegister dst, const Operand &src)
static const byte kNopByte
const Register ecx
static void set_target_address_at(Address pc, Address target)
static const byte kJzShortOpcode
void movmskpd(Register dst, XMMRegister src)
void fisttp_s(const Operand &adr)
static void set_external_target_at(Address instruction_payload, Address target)
const int kRegister_edx_Code
static Register FromAllocationIndex(int index)
Definition: assembler-arm.h:82
void xor_(Register dst, const Immediate &imm)
void shl(Register dst, uint8_t imm8)
const Register pc
void cmpw_ax(const Operand &op)
void not_(Register dst)
static Register from_code(int code)
void set_emit_debug_code(bool value)
static int ToAllocationIndex(Register reg)
Definition: assembler-arm.h:77
void emit_sse_operand(XMMRegister reg, const Operand &adr)
void rcr(Register dst, uint8_t imm8)
void xorps(XMMRegister dst, XMMRegister src)
static const int kCallTargetAddressOffset
#define BASE_EMBEDDED
Definition: allocation.h:68
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const int kDebugBreakSlotLength
void Nop(int bytes=1)
void neg(Register dst)
void setcc(Condition cc, Register reg)
void fld_s(const Operand &adr)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void pand(XMMRegister dst, XMMRegister src)
void mov_w(Register dst, const Operand &src)
const XMMRegister xmm3
void fstp_d(const Operand &adr)
static const int kNumAllocatableRegisters
const int kRegister_eax_Code
static int ToAllocationIndex(XMMRegister reg)
bool is_valid() const
void push_imm32(int32_t imm32)
void fistp_d(const Operand &adr)
bool is(Register reg) const
static Address target_address_at(Address pc)
void pextrd(Register dst, XMMRegister src, int8_t offset)
static const byte kJncShortOpcode
const Register ebx
void shrd(Register dst, Register src)
void movaps(XMMRegister dst, XMMRegister src)
void psrlq(XMMRegister reg, int8_t shift)
void RecordComment(const char *msg)
int CallSize(const Operand &adr)
void sub(Register dst, const Immediate &imm)
static void deserialization_set_special_target_at(Address instruction_payload, Address target)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
void mov_b(Register dst, Register src)
void fstp_s(const Operand &adr)
void divsd(XMMRegister dst, XMMRegister src)
void lea(Register dst, const Operand &src)
static Address target_address_from_return_address(Address pc)
void fild_d(const Operand &adr)
void xor_(Register dst, int32_t imm32)
friend class PositionsRecorder
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void inc(Register dst)
static const int kNumAllocatableRegisters
Definition: assembler-arm.h:74
Displacement(Label *L, Type type)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void call(Register reg)
void cmpltsd(XMMRegister dst, XMMRegister src)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
void test(Register reg, const Immediate &imm)
const Register esi
void shr(Register dst, uint8_t imm8)
const int kRegister_ecx_Code
static XMMRegister from_code(int code)
void add(Register dst, Register src)
void sar_cl(Register dst)
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle)
void movd(XMMRegister dst, Register src)
void cmov(Condition cc, Register dst, Register src)
PositionsRecorder * positions_recorder()
void movsx_w(Register dst, Register src)
static const char * AllocationIndexToString(int index)
Definition: assembler-arm.h:87
void cmp(Register reg, const Immediate &imm)
void fisub_s(const Operand &adr)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
void cmp(Register reg0, Register reg1)
void extractps(Register dst, XMMRegister src, byte imm8)
static const byte kJnzShortOpcode
const Register no_reg
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fist_s(const Operand &adr)
const XMMRegister xmm7
bool is(XMMRegister reg) const
const int kRegister_esi_Code
static const int kNumRegisters
const XMMRegister xmm2
const Register edx
void bts(Register dst, Register src)
const int kRegister_edi_Code
void prefetch(const Operand &src, int level)
static const int kPatchDebugBreakSlotAddressOffset
void movzx_w(Register dst, Register src)
void dec(Register dst)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
bool emit_debug_code() const
void subsd(XMMRegister dst, XMMRegister src)
void cmpw(const Operand &op, Immediate imm16)
const int kRegister_no_reg_Code
void xor_(Register dst, Register src)
void next(Label *L) const
void test_b(Register reg, const Operand &op)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void cvttsd2si(Register dst, const Operand &src)
void and_(Register dst, Register src)
void shl_cl(Register dst)
const XMMRegister xmm0
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const int MB
Definition: globals.h:208