v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-x64-inl.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_ASSEMBLER_X64_INL_H_
29 #define V8_X64_ASSEMBLER_X64_INL_H_
30 
31 #include "x64/assembler-x64.h"
32 
33 #include "cpu.h"
34 #include "debug.h"
35 #include "v8memory.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // -----------------------------------------------------------------------------
42 // Implementation of Assembler
43 
44 
45 void Assembler::emitl(uint32_t x) {
46  Memory::uint32_at(pc_) = x;
47  pc_ += sizeof(uint32_t);
48 }
49 
50 
51 void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
52  Memory::uint64_at(pc_) = x;
53  if (rmode != RelocInfo::NONE) {
54  RecordRelocInfo(rmode, x);
55  }
56  pc_ += sizeof(uint64_t);
57 }
58 
59 
60 void Assembler::emitw(uint16_t x) {
61  Memory::uint16_at(pc_) = x;
62  pc_ += sizeof(uint16_t);
63 }
64 
65 
66 void Assembler::emit_code_target(Handle<Code> target,
67  RelocInfo::Mode rmode,
68  unsigned ast_id) {
69  ASSERT(RelocInfo::IsCodeTarget(rmode));
70  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
71  RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
72  } else {
73  RecordRelocInfo(rmode);
74  }
75  int current = code_targets_.length();
76  if (current > 0 && code_targets_.last().is_identical_to(target)) {
77  // Optimization if we keep jumping to the same code target.
78  emitl(current - 1);
79  } else {
80  code_targets_.Add(target);
81  emitl(current);
82  }
83 }
84 
85 
86 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
87  emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
88 }
89 
90 
91 void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
92  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
93 }
94 
95 
96 void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
97  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
98 }
99 
100 
101 void Assembler::emit_rex_64(Register reg, const Operand& op) {
102  emit(0x48 | reg.high_bit() << 2 | op.rex_);
103 }
104 
105 
106 void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
107  emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
108 }
109 
110 
111 void Assembler::emit_rex_64(Register rm_reg) {
112  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
113  emit(0x48 | rm_reg.high_bit());
114 }
115 
116 
117 void Assembler::emit_rex_64(const Operand& op) {
118  emit(0x48 | op.rex_);
119 }
120 
121 
122 void Assembler::emit_rex_32(Register reg, Register rm_reg) {
123  emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
124 }
125 
126 
127 void Assembler::emit_rex_32(Register reg, const Operand& op) {
128  emit(0x40 | reg.high_bit() << 2 | op.rex_);
129 }
130 
131 
132 void Assembler::emit_rex_32(Register rm_reg) {
133  emit(0x40 | rm_reg.high_bit());
134 }
135 
136 
137 void Assembler::emit_rex_32(const Operand& op) {
138  emit(0x40 | op.rex_);
139 }
140 
141 
142 void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
143  byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
144  if (rex_bits != 0) emit(0x40 | rex_bits);
145 }
146 
147 
148 void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
149  byte rex_bits = reg.high_bit() << 2 | op.rex_;
150  if (rex_bits != 0) emit(0x40 | rex_bits);
151 }
152 
153 
154 void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
155  byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
156  if (rex_bits != 0) emit(0x40 | rex_bits);
157 }
158 
159 
160 void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
161  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
162  if (rex_bits != 0) emit(0x40 | rex_bits);
163 }
164 
165 
166 void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
167  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
168  if (rex_bits != 0) emit(0x40 | rex_bits);
169 }
170 
171 
172 void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
173  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
174  if (rex_bits != 0) emit(0x40 | rex_bits);
175 }
176 
177 
178 void Assembler::emit_optional_rex_32(Register rm_reg) {
179  if (rm_reg.high_bit()) emit(0x41);
180 }
181 
182 
183 void Assembler::emit_optional_rex_32(const Operand& op) {
184  if (op.rex_ != 0) emit(0x40 | op.rex_);
185 }
186 
187 
189  return Memory::int32_at(pc) + pc + 4;
190 }
191 
192 
194  Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
195  CPU::FlushICache(pc, sizeof(int32_t));
196 }
197 
199  return code_targets_[Memory::int32_at(pc)];
200 }
201 
202 // -----------------------------------------------------------------------------
203 // Implementation of RelocInfo
204 
205 // The modes possibly affected by apply must be in kApplyMask.
206 void RelocInfo::apply(intptr_t delta) {
207  if (IsInternalReference(rmode_)) {
208  // absolute code pointer inside code object moves with the code object.
209  Memory::Address_at(pc_) += static_cast<int32_t>(delta);
210  CPU::FlushICache(pc_, sizeof(Address));
211  } else if (IsCodeTarget(rmode_)) {
212  Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
213  CPU::FlushICache(pc_, sizeof(int32_t));
214  }
215 }
216 
217 
218 Address RelocInfo::target_address() {
219  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
220  if (IsCodeTarget(rmode_)) {
221  return Assembler::target_address_at(pc_);
222  } else {
223  return Memory::Address_at(pc_);
224  }
225 }
226 
227 
228 Address RelocInfo::target_address_address() {
229  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
230  || rmode_ == EMBEDDED_OBJECT
231  || rmode_ == EXTERNAL_REFERENCE);
232  return reinterpret_cast<Address>(pc_);
233 }
234 
235 
236 int RelocInfo::target_address_size() {
237  if (IsCodedSpecially()) {
239  } else {
240  return kPointerSize;
241  }
242 }
243 
244 
245 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
246  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
247  if (IsCodeTarget(rmode_)) {
249  Object* target_code = Code::GetCodeFromTargetAddress(target);
250  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
251  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
252  host(), this, HeapObject::cast(target_code));
253  }
254  } else {
255  Memory::Address_at(pc_) = target;
256  CPU::FlushICache(pc_, sizeof(Address));
257  }
258 }
259 
260 
261 Object* RelocInfo::target_object() {
262  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
263  return Memory::Object_at(pc_);
264 }
265 
266 
267 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
268  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
269  if (rmode_ == EMBEDDED_OBJECT) {
270  return Memory::Object_Handle_at(pc_);
271  } else {
272  return origin->code_target_object_handle_at(pc_);
273  }
274 }
275 
276 
277 Object** RelocInfo::target_object_address() {
278  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
279  return reinterpret_cast<Object**>(pc_);
280 }
281 
282 
283 Address* RelocInfo::target_reference_address() {
284  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
285  return reinterpret_cast<Address*>(pc_);
286 }
287 
288 
289 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
290  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
291  Memory::Object_at(pc_) = target;
292  CPU::FlushICache(pc_, sizeof(Address));
293  if (mode == UPDATE_WRITE_BARRIER &&
294  host() != NULL &&
295  target->IsHeapObject()) {
296  host()->GetHeap()->incremental_marking()->RecordWrite(
297  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
298  }
299 }
300 
301 
302 Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
303  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
304  Address address = Memory::Address_at(pc_);
305  return Handle<JSGlobalPropertyCell>(
306  reinterpret_cast<JSGlobalPropertyCell**>(address));
307 }
308 
309 
310 JSGlobalPropertyCell* RelocInfo::target_cell() {
311  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
312  Address address = Memory::Address_at(pc_);
315  return reinterpret_cast<JSGlobalPropertyCell*>(object);
316 }
317 
318 
319 void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
320  WriteBarrierMode mode) {
321  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
322  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
323  Memory::Address_at(pc_) = address;
324  CPU::FlushICache(pc_, sizeof(Address));
325  if (mode == UPDATE_WRITE_BARRIER &&
326  host() != NULL) {
327  // TODO(1550) We are passing NULL as a slot because cell can never be on
328  // evacuation candidate.
329  host()->GetHeap()->incremental_marking()->RecordWrite(
330  host(), NULL, cell);
331  }
332 }
333 
334 
335 bool RelocInfo::IsPatchedReturnSequence() {
336  // The recognized call sequence is:
337  // movq(kScratchRegister, immediate64); call(kScratchRegister);
338  // It only needs to be distinguished from a return sequence
339  // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
340  // The 11th byte is int3 (0xCC) in the return sequence and
341  // REX.WB (0x48+register bit) for the call sequence.
342 #ifdef ENABLE_DEBUGGER_SUPPORT
343  return pc_[10] != 0xCC;
344 #else
345  return false;
346 #endif
347 }
348 
349 
350 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
351  return !Assembler::IsNop(pc());
352 }
353 
354 
355 Address RelocInfo::call_address() {
356  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
357  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
358  return Memory::Address_at(
360 }
361 
362 
363 void RelocInfo::set_call_address(Address target) {
364  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
365  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
367  target;
369  sizeof(Address));
370  if (host() != NULL) {
371  Object* target_code = Code::GetCodeFromTargetAddress(target);
372  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
373  host(), this, HeapObject::cast(target_code));
374  }
375 }
376 
377 
378 Object* RelocInfo::call_object() {
379  return *call_object_address();
380 }
381 
382 
383 void RelocInfo::set_call_object(Object* target) {
384  *call_object_address() = target;
385 }
386 
387 
388 Object** RelocInfo::call_object_address() {
389  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
390  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
391  return reinterpret_cast<Object**>(
393 }
394 
395 
396 void RelocInfo::Visit(ObjectVisitor* visitor) {
397  RelocInfo::Mode mode = rmode();
398  if (mode == RelocInfo::EMBEDDED_OBJECT) {
399  visitor->VisitEmbeddedPointer(this);
400  CPU::FlushICache(pc_, sizeof(Address));
401  } else if (RelocInfo::IsCodeTarget(mode)) {
402  visitor->VisitCodeTarget(this);
403  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
404  visitor->VisitGlobalPropertyCell(this);
405  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
406  visitor->VisitExternalReference(this);
407  CPU::FlushICache(pc_, sizeof(Address));
408 #ifdef ENABLE_DEBUGGER_SUPPORT
409  // TODO(isolates): Get a cached isolate below.
410  } else if (((RelocInfo::IsJSReturn(mode) &&
411  IsPatchedReturnSequence()) ||
412  (RelocInfo::IsDebugBreakSlot(mode) &&
413  IsPatchedDebugBreakSlotSequence())) &&
414  Isolate::Current()->debug()->has_break_points()) {
415  visitor->VisitDebugTarget(this);
416 #endif
417  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
418  visitor->VisitRuntimeEntry(this);
419  }
420 }
421 
422 
423 template<typename StaticVisitor>
424 void RelocInfo::Visit(Heap* heap) {
425  RelocInfo::Mode mode = rmode();
426  if (mode == RelocInfo::EMBEDDED_OBJECT) {
427  StaticVisitor::VisitEmbeddedPointer(heap, this);
428  CPU::FlushICache(pc_, sizeof(Address));
429  } else if (RelocInfo::IsCodeTarget(mode)) {
430  StaticVisitor::VisitCodeTarget(heap, this);
431  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
432  StaticVisitor::VisitGlobalPropertyCell(heap, this);
433  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
434  StaticVisitor::VisitExternalReference(this);
435  CPU::FlushICache(pc_, sizeof(Address));
436 #ifdef ENABLE_DEBUGGER_SUPPORT
437  } else if (heap->isolate()->debug()->has_break_points() &&
438  ((RelocInfo::IsJSReturn(mode) &&
439  IsPatchedReturnSequence()) ||
440  (RelocInfo::IsDebugBreakSlot(mode) &&
441  IsPatchedDebugBreakSlotSequence()))) {
442  StaticVisitor::VisitDebugTarget(heap, this);
443 #endif
444  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
445  StaticVisitor::VisitRuntimeEntry(this);
446  }
447 }
448 
449 
450 // -----------------------------------------------------------------------------
451 // Implementation of Operand
452 
453 void Operand::set_modrm(int mod, Register rm_reg) {
454  ASSERT(is_uint2(mod));
455  buf_[0] = mod << 6 | rm_reg.low_bits();
456  // Set REX.B to the high bit of rm.code().
457  rex_ |= rm_reg.high_bit();
458 }
459 
460 
461 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
462  ASSERT(len_ == 1);
463  ASSERT(is_uint2(scale));
464  // Use SIB with no index register only for base rsp or r12. Otherwise we
465  // would skip the SIB byte entirely.
466  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
467  buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
468  rex_ |= index.high_bit() << 1 | base.high_bit();
469  len_ = 2;
470 }
471 
472 void Operand::set_disp8(int disp) {
473  ASSERT(is_int8(disp));
474  ASSERT(len_ == 1 || len_ == 2);
475  int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
476  *p = disp;
477  len_ += sizeof(int8_t);
478 }
479 
480 void Operand::set_disp32(int disp) {
481  ASSERT(len_ == 1 || len_ == 2);
482  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
483  *p = disp;
484  len_ += sizeof(int32_t);
485 }
486 
487 
488 } } // namespace v8::internal
489 
490 #endif // V8_X64_ASSEMBLER_X64_INL_H_
byte * Address
Definition: globals.h:172
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
static Handle< Object > & Object_Handle_at(Address addr)
Definition: v8memory.h:79
static HeapObject * cast(Object *obj)
int int32_t
Definition: unicode.cc:47
bool is_int8(int x)
Definition: assembler.h:830
#define ASSERT(condition)
Definition: checks.h:270
static const int kPatchReturnSequenceAddressOffset
unsigned short uint16_t
Definition: unicode.cc:46
static uint16_t & uint16_at(Address addr)
Definition: v8memory.h:43
uint8_t byte
Definition: globals.h:171
const unsigned kNoASTId
Definition: assembler.h:54
static const int kRealPatchReturnSequenceAddressOffset
static const int kSpecialTargetSize
const int kPointerSize
Definition: globals.h:234
static Address & Address_at(Address addr)
Definition: v8memory.h:71
static void set_target_address_at(Address pc, Address target)
const Register rsp
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
const Register pc
const Register r12
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3380
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static Address target_address_at(Address pc)
bool is_uint2(int x)
Definition: assembler.h:839
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1163
static uint64_t & uint64_at(Address addr)
Definition: v8memory.h:55
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
Handle< Object > code_target_object_handle_at(Address pc)
#define RUNTIME_ENTRY(name, nargs, ressize)
static void FlushICache(void *start, size_t size)