v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-x64-inl.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_ASSEMBLER_X64_INL_H_
29 #define V8_X64_ASSEMBLER_X64_INL_H_
30 
31 #include "x64/assembler-x64.h"
32 
33 #include "cpu.h"
34 #include "debug.h"
35 #include "v8memory.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // -----------------------------------------------------------------------------
42 // Implementation of Assembler
43 
44 
45 void Assembler::emitl(uint32_t x) {
46  Memory::uint32_at(pc_) = x;
47  pc_ += sizeof(uint32_t);
48 }
49 
50 
51 void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
52  Memory::uint64_at(pc_) = x;
53  if (rmode != RelocInfo::NONE) {
54  RecordRelocInfo(rmode, x);
55  }
56  pc_ += sizeof(uint64_t);
57 }
58 
59 
60 void Assembler::emitw(uint16_t x) {
61  Memory::uint16_at(pc_) = x;
62  pc_ += sizeof(uint16_t);
63 }
64 
65 
66 void Assembler::emit_code_target(Handle<Code> target,
67  RelocInfo::Mode rmode,
68  TypeFeedbackId ast_id) {
69  ASSERT(RelocInfo::IsCodeTarget(rmode));
70  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
71  RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
72  } else {
73  RecordRelocInfo(rmode);
74  }
75  int current = code_targets_.length();
76  if (current > 0 && code_targets_.last().is_identical_to(target)) {
77  // Optimization if we keep jumping to the same code target.
78  emitl(current - 1);
79  } else {
80  code_targets_.Add(target);
81  emitl(current);
82  }
83 }
84 
85 
86 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
87  emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
88 }
89 
90 
91 void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
92  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
93 }
94 
95 
96 void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
97  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
98 }
99 
100 
101 void Assembler::emit_rex_64(Register reg, const Operand& op) {
102  emit(0x48 | reg.high_bit() << 2 | op.rex_);
103 }
104 
105 
106 void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
107  emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
108 }
109 
110 
111 void Assembler::emit_rex_64(Register rm_reg) {
112  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
113  emit(0x48 | rm_reg.high_bit());
114 }
115 
116 
117 void Assembler::emit_rex_64(const Operand& op) {
118  emit(0x48 | op.rex_);
119 }
120 
121 
122 void Assembler::emit_rex_32(Register reg, Register rm_reg) {
123  emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
124 }
125 
126 
127 void Assembler::emit_rex_32(Register reg, const Operand& op) {
128  emit(0x40 | reg.high_bit() << 2 | op.rex_);
129 }
130 
131 
132 void Assembler::emit_rex_32(Register rm_reg) {
133  emit(0x40 | rm_reg.high_bit());
134 }
135 
136 
137 void Assembler::emit_rex_32(const Operand& op) {
138  emit(0x40 | op.rex_);
139 }
140 
141 
142 void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
143  byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
144  if (rex_bits != 0) emit(0x40 | rex_bits);
145 }
146 
147 
148 void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
149  byte rex_bits = reg.high_bit() << 2 | op.rex_;
150  if (rex_bits != 0) emit(0x40 | rex_bits);
151 }
152 
153 
154 void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
155  byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
156  if (rex_bits != 0) emit(0x40 | rex_bits);
157 }
158 
159 
160 void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
161  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
162  if (rex_bits != 0) emit(0x40 | rex_bits);
163 }
164 
165 
166 void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
167  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
168  if (rex_bits != 0) emit(0x40 | rex_bits);
169 }
170 
171 
172 void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
173  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
174  if (rex_bits != 0) emit(0x40 | rex_bits);
175 }
176 
177 
178 void Assembler::emit_optional_rex_32(Register rm_reg) {
179  if (rm_reg.high_bit()) emit(0x41);
180 }
181 
182 
183 void Assembler::emit_optional_rex_32(const Operand& op) {
184  if (op.rex_ != 0) emit(0x40 | op.rex_);
185 }
186 
187 
189  return Memory::int32_at(pc) + pc + 4;
190 }
191 
192 
194  Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
195  CPU::FlushICache(pc, sizeof(int32_t));
196 }
197 
198 
200  return pc - kCallTargetAddressOffset;
201 }
202 
203 
205  return code_targets_[Memory::int32_at(pc)];
206 }
207 
208 // -----------------------------------------------------------------------------
209 // Implementation of RelocInfo
210 
211 // The modes possibly affected by apply must be in kApplyMask.
212 void RelocInfo::apply(intptr_t delta) {
213  if (IsInternalReference(rmode_)) {
214  // absolute code pointer inside code object moves with the code object.
215  Memory::Address_at(pc_) += static_cast<int32_t>(delta);
216  CPU::FlushICache(pc_, sizeof(Address));
217  } else if (IsCodeTarget(rmode_)) {
218  Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
219  CPU::FlushICache(pc_, sizeof(int32_t));
220  }
221 }
222 
223 
224 Address RelocInfo::target_address() {
225  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
226  if (IsCodeTarget(rmode_)) {
227  return Assembler::target_address_at(pc_);
228  } else {
229  return Memory::Address_at(pc_);
230  }
231 }
232 
233 
234 Address RelocInfo::target_address_address() {
235  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
236  || rmode_ == EMBEDDED_OBJECT
237  || rmode_ == EXTERNAL_REFERENCE);
238  return reinterpret_cast<Address>(pc_);
239 }
240 
241 
242 int RelocInfo::target_address_size() {
243  if (IsCodedSpecially()) {
245  } else {
246  return kPointerSize;
247  }
248 }
249 
250 
251 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
252  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
253  if (IsCodeTarget(rmode_)) {
255  Object* target_code = Code::GetCodeFromTargetAddress(target);
256  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
257  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
258  host(), this, HeapObject::cast(target_code));
259  }
260  } else {
261  Memory::Address_at(pc_) = target;
262  CPU::FlushICache(pc_, sizeof(Address));
263  }
264 }
265 
266 
267 Object* RelocInfo::target_object() {
268  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
269  return Memory::Object_at(pc_);
270 }
271 
272 
273 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
274  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
275  if (rmode_ == EMBEDDED_OBJECT) {
276  return Memory::Object_Handle_at(pc_);
277  } else {
278  return origin->code_target_object_handle_at(pc_);
279  }
280 }
281 
282 
283 Object** RelocInfo::target_object_address() {
284  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
285  return reinterpret_cast<Object**>(pc_);
286 }
287 
288 
289 Address* RelocInfo::target_reference_address() {
290  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
291  return reinterpret_cast<Address*>(pc_);
292 }
293 
294 
295 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
296  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
297  Memory::Object_at(pc_) = target;
298  CPU::FlushICache(pc_, sizeof(Address));
299  if (mode == UPDATE_WRITE_BARRIER &&
300  host() != NULL &&
301  target->IsHeapObject()) {
302  host()->GetHeap()->incremental_marking()->RecordWrite(
303  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
304  }
305 }
306 
307 
308 Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
309  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
310  Address address = Memory::Address_at(pc_);
311  return Handle<JSGlobalPropertyCell>(
312  reinterpret_cast<JSGlobalPropertyCell**>(address));
313 }
314 
315 
316 JSGlobalPropertyCell* RelocInfo::target_cell() {
317  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
319 }
320 
321 
322 void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
323  WriteBarrierMode mode) {
324  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
325  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
326  Memory::Address_at(pc_) = address;
327  CPU::FlushICache(pc_, sizeof(Address));
328  if (mode == UPDATE_WRITE_BARRIER &&
329  host() != NULL) {
330  // TODO(1550) We are passing NULL as a slot because cell can never be on
331  // evacuation candidate.
332  host()->GetHeap()->incremental_marking()->RecordWrite(
333  host(), NULL, cell);
334  }
335 }
336 
337 
338 bool RelocInfo::IsPatchedReturnSequence() {
339  // The recognized call sequence is:
340  // movq(kScratchRegister, immediate64); call(kScratchRegister);
341  // It only needs to be distinguished from a return sequence
342  // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
343  // The 11th byte is int3 (0xCC) in the return sequence and
344  // REX.WB (0x48+register bit) for the call sequence.
345 #ifdef ENABLE_DEBUGGER_SUPPORT
346  return pc_[10] != 0xCC;
347 #else
348  return false;
349 #endif
350 }
351 
352 
353 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
354  return !Assembler::IsNop(pc());
355 }
356 
357 
358 Address RelocInfo::call_address() {
359  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
360  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
361  return Memory::Address_at(
363 }
364 
365 
366 void RelocInfo::set_call_address(Address target) {
367  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
368  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
370  target;
372  sizeof(Address));
373  if (host() != NULL) {
374  Object* target_code = Code::GetCodeFromTargetAddress(target);
375  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
376  host(), this, HeapObject::cast(target_code));
377  }
378 }
379 
380 
381 Object* RelocInfo::call_object() {
382  return *call_object_address();
383 }
384 
385 
386 void RelocInfo::set_call_object(Object* target) {
387  *call_object_address() = target;
388 }
389 
390 
391 Object** RelocInfo::call_object_address() {
392  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
393  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
394  return reinterpret_cast<Object**>(
396 }
397 
398 
399 void RelocInfo::Visit(ObjectVisitor* visitor) {
400  RelocInfo::Mode mode = rmode();
401  if (mode == RelocInfo::EMBEDDED_OBJECT) {
402  visitor->VisitEmbeddedPointer(this);
403  CPU::FlushICache(pc_, sizeof(Address));
404  } else if (RelocInfo::IsCodeTarget(mode)) {
405  visitor->VisitCodeTarget(this);
406  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
407  visitor->VisitGlobalPropertyCell(this);
408  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
409  visitor->VisitExternalReference(this);
410  CPU::FlushICache(pc_, sizeof(Address));
411 #ifdef ENABLE_DEBUGGER_SUPPORT
412  // TODO(isolates): Get a cached isolate below.
413  } else if (((RelocInfo::IsJSReturn(mode) &&
414  IsPatchedReturnSequence()) ||
415  (RelocInfo::IsDebugBreakSlot(mode) &&
416  IsPatchedDebugBreakSlotSequence())) &&
417  Isolate::Current()->debug()->has_break_points()) {
418  visitor->VisitDebugTarget(this);
419 #endif
420  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
421  visitor->VisitRuntimeEntry(this);
422  }
423 }
424 
425 
426 template<typename StaticVisitor>
427 void RelocInfo::Visit(Heap* heap) {
428  RelocInfo::Mode mode = rmode();
429  if (mode == RelocInfo::EMBEDDED_OBJECT) {
430  StaticVisitor::VisitEmbeddedPointer(heap, this);
431  CPU::FlushICache(pc_, sizeof(Address));
432  } else if (RelocInfo::IsCodeTarget(mode)) {
433  StaticVisitor::VisitCodeTarget(heap, this);
434  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
435  StaticVisitor::VisitGlobalPropertyCell(heap, this);
436  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
437  StaticVisitor::VisitExternalReference(this);
438  CPU::FlushICache(pc_, sizeof(Address));
439 #ifdef ENABLE_DEBUGGER_SUPPORT
440  } else if (heap->isolate()->debug()->has_break_points() &&
441  ((RelocInfo::IsJSReturn(mode) &&
442  IsPatchedReturnSequence()) ||
443  (RelocInfo::IsDebugBreakSlot(mode) &&
444  IsPatchedDebugBreakSlotSequence()))) {
445  StaticVisitor::VisitDebugTarget(heap, this);
446 #endif
447  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
448  StaticVisitor::VisitRuntimeEntry(this);
449  }
450 }
451 
452 
453 // -----------------------------------------------------------------------------
454 // Implementation of Operand
455 
456 void Operand::set_modrm(int mod, Register rm_reg) {
457  ASSERT(is_uint2(mod));
458  buf_[0] = mod << 6 | rm_reg.low_bits();
459  // Set REX.B to the high bit of rm.code().
460  rex_ |= rm_reg.high_bit();
461 }
462 
463 
464 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
465  ASSERT(len_ == 1);
466  ASSERT(is_uint2(scale));
467  // Use SIB with no index register only for base rsp or r12. Otherwise we
468  // would skip the SIB byte entirely.
469  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
470  buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
471  rex_ |= index.high_bit() << 1 | base.high_bit();
472  len_ = 2;
473 }
474 
475 void Operand::set_disp8(int disp) {
476  ASSERT(is_int8(disp));
477  ASSERT(len_ == 1 || len_ == 2);
478  int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
479  *p = disp;
480  len_ += sizeof(int8_t);
481 }
482 
483 void Operand::set_disp32(int disp) {
484  ASSERT(len_ == 1 || len_ == 2);
485  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
486  *p = disp;
487  len_ += sizeof(int32_t);
488 }
489 
490 
491 } } // namespace v8::internal
492 
493 #endif // V8_X64_ASSEMBLER_X64_INL_H_
byte * Address
Definition: globals.h:157
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
static Handle< Object > & Object_Handle_at(Address addr)
Definition: v8memory.h:79
static HeapObject * cast(Object *obj)
int int32_t
Definition: unicode.cc:47
bool is_int8(int x)
Definition: assembler.h:836
#define ASSERT(condition)
Definition: checks.h:270
static const int kPatchReturnSequenceAddressOffset
unsigned short uint16_t
Definition: unicode.cc:46
static uint16_t & uint16_at(Address addr)
Definition: v8memory.h:43
uint8_t byte
Definition: globals.h:156
static const int kRealPatchReturnSequenceAddressOffset
static const int kSpecialTargetSize
const int kPointerSize
Definition: globals.h:220
static Address & Address_at(Address addr)
Definition: v8memory.h:71
static void set_target_address_at(Address pc, Address target)
const Register rsp
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
const Register pc
const Register r12
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3559
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static Address target_address_at(Address pc)
bool is_uint2(int x)
Definition: assembler.h:845
static JSGlobalPropertyCell * FromValueAddress(Address value)
Definition: objects.h:7993
static Address target_address_from_return_address(Address pc)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static uint64_t & uint64_at(Address addr)
Definition: v8memory.h:55
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
Handle< Object > code_target_object_handle_at(Address pc)
#define RUNTIME_ENTRY(name, nargs, ressize)
static void FlushICache(void *start, size_t size)