v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm-inl.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been modified
34 // significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
38 #define V8_ARM_ASSEMBLER_ARM_INL_H_
39 
40 #include "arm/assembler-arm.h"
41 
42 #include "cpu.h"
43 #include "debug.h"
44 
45 
46 namespace v8 {
47 namespace internal {
48 
49 
51  ASSERT(!reg.is(kDoubleRegZero));
53  return reg.code();
54 }
55 
56 
57 void RelocInfo::apply(intptr_t delta) {
58  if (RelocInfo::IsInternalReference(rmode_)) {
59  // absolute code pointer inside code object moves with the code object.
60  int32_t* p = reinterpret_cast<int32_t*>(pc_);
61  *p += delta; // relocate entry
62  }
63  // We do not use pc relative addressing on ARM, so there is
64  // nothing else to do.
65 }
66 
67 
68 Address RelocInfo::target_address() {
69  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
70  return Assembler::target_address_at(pc_);
71 }
72 
73 
74 Address RelocInfo::target_address_address() {
75  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
76  || rmode_ == EMBEDDED_OBJECT
77  || rmode_ == EXTERNAL_REFERENCE);
78  return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
79 }
80 
81 
82 int RelocInfo::target_address_size() {
83  return kPointerSize;
84 }
85 
86 
87 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
88  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
89  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
90  reinterpret_cast<intptr_t>(target) & ~3));
91  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
92  Object* target_code = Code::GetCodeFromTargetAddress(target);
93  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
94  host(), this, HeapObject::cast(target_code));
95  }
96 }
97 
98 
99 Object* RelocInfo::target_object() {
100  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
101  return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
102 }
103 
104 
105 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
106  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
107  return Handle<Object>(reinterpret_cast<Object**>(
108  Assembler::target_pointer_at(pc_)));
109 }
110 
111 
112 Object** RelocInfo::target_object_address() {
113  // Provide a "natural pointer" to the embedded object,
114  // which can be de-referenced during heap iteration.
115  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
116  reconstructed_obj_ptr_ =
117  reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
118  return &reconstructed_obj_ptr_;
119 }
120 
121 
122 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
123  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
124  Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
125  if (mode == UPDATE_WRITE_BARRIER &&
126  host() != NULL &&
127  target->IsHeapObject()) {
128  host()->GetHeap()->incremental_marking()->RecordWrite(
129  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
130  }
131 }
132 
133 
134 Address* RelocInfo::target_reference_address() {
135  ASSERT(rmode_ == EXTERNAL_REFERENCE);
136  reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
137  return &reconstructed_adr_ptr_;
138 }
139 
140 
141 Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
142  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
143  Address address = Memory::Address_at(pc_);
144  return Handle<JSGlobalPropertyCell>(
145  reinterpret_cast<JSGlobalPropertyCell**>(address));
146 }
147 
148 
149 JSGlobalPropertyCell* RelocInfo::target_cell() {
150  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
152 }
153 
154 
155 void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
156  WriteBarrierMode mode) {
157  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
158  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
159  Memory::Address_at(pc_) = address;
160  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
161  // TODO(1550) We are passing NULL as a slot because cell can never be on
162  // evacuation candidate.
163  host()->GetHeap()->incremental_marking()->RecordWrite(
164  host(), NULL, cell);
165  }
166 }
167 
168 
169 Address RelocInfo::call_address() {
170  // The 2 instructions offset assumes patched debug break slot or return
171  // sequence.
172  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
173  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
174  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
175 }
176 
177 
178 void RelocInfo::set_call_address(Address target) {
179  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
180  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
181  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
182  if (host() != NULL) {
183  Object* target_code = Code::GetCodeFromTargetAddress(target);
184  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
185  host(), this, HeapObject::cast(target_code));
186  }
187 }
188 
189 
190 Object* RelocInfo::call_object() {
191  return *call_object_address();
192 }
193 
194 
195 void RelocInfo::set_call_object(Object* target) {
196  *call_object_address() = target;
197 }
198 
199 
200 Object** RelocInfo::call_object_address() {
201  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
202  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
203  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
204 }
205 
206 
207 bool RelocInfo::IsPatchedReturnSequence() {
208  Instr current_instr = Assembler::instr_at(pc_);
209  Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
210 #ifdef USE_BLX
211  // A patched return sequence is:
212  // ldr ip, [pc, #0]
213  // blx ip
214  return ((current_instr & kLdrPCMask) == kLdrPCPattern)
215  && ((next_instr & kBlxRegMask) == kBlxRegPattern);
216 #else
217  // A patched return sequence is:
218  // mov lr, pc
219  // ldr pc, [pc, #-4]
220  return (current_instr == kMovLrPc)
221  && ((next_instr & kLdrPCMask) == kLdrPCPattern);
222 #endif
223 }
224 
225 
226 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
227  Instr current_instr = Assembler::instr_at(pc_);
228  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
229 }
230 
231 
232 void RelocInfo::Visit(ObjectVisitor* visitor) {
233  RelocInfo::Mode mode = rmode();
234  if (mode == RelocInfo::EMBEDDED_OBJECT) {
235  visitor->VisitEmbeddedPointer(this);
236  } else if (RelocInfo::IsCodeTarget(mode)) {
237  visitor->VisitCodeTarget(this);
238  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
239  visitor->VisitGlobalPropertyCell(this);
240  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
241  visitor->VisitExternalReference(this);
242 #ifdef ENABLE_DEBUGGER_SUPPORT
243  // TODO(isolates): Get a cached isolate below.
244  } else if (((RelocInfo::IsJSReturn(mode) &&
245  IsPatchedReturnSequence()) ||
246  (RelocInfo::IsDebugBreakSlot(mode) &&
247  IsPatchedDebugBreakSlotSequence())) &&
248  Isolate::Current()->debug()->has_break_points()) {
249  visitor->VisitDebugTarget(this);
250 #endif
251  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
252  visitor->VisitRuntimeEntry(this);
253  }
254 }
255 
256 
257 template<typename StaticVisitor>
258 void RelocInfo::Visit(Heap* heap) {
259  RelocInfo::Mode mode = rmode();
260  if (mode == RelocInfo::EMBEDDED_OBJECT) {
261  StaticVisitor::VisitEmbeddedPointer(heap, this);
262  } else if (RelocInfo::IsCodeTarget(mode)) {
263  StaticVisitor::VisitCodeTarget(heap, this);
264  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
265  StaticVisitor::VisitGlobalPropertyCell(heap, this);
266  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
267  StaticVisitor::VisitExternalReference(this);
268 #ifdef ENABLE_DEBUGGER_SUPPORT
269  } else if (heap->isolate()->debug()->has_break_points() &&
270  ((RelocInfo::IsJSReturn(mode) &&
271  IsPatchedReturnSequence()) ||
272  (RelocInfo::IsDebugBreakSlot(mode) &&
273  IsPatchedDebugBreakSlotSequence()))) {
274  StaticVisitor::VisitDebugTarget(heap, this);
275 #endif
276  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
277  StaticVisitor::VisitRuntimeEntry(this);
278  }
279 }
280 
281 
282 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
283  rm_ = no_reg;
284  imm32_ = immediate;
285  rmode_ = rmode;
286 }
287 
288 
289 Operand::Operand(const ExternalReference& f) {
290  rm_ = no_reg;
291  imm32_ = reinterpret_cast<int32_t>(f.address());
292  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
293 }
294 
295 
296 Operand::Operand(Smi* value) {
297  rm_ = no_reg;
298  imm32_ = reinterpret_cast<intptr_t>(value);
299  rmode_ = RelocInfo::NONE;
300 }
301 
302 
303 Operand::Operand(Register rm) {
304  rm_ = rm;
305  rs_ = no_reg;
306  shift_op_ = LSL;
307  shift_imm_ = 0;
308 }
309 
310 
311 bool Operand::is_reg() const {
312  return rm_.is_valid() &&
313  rs_.is(no_reg) &&
314  shift_op_ == LSL &&
315  shift_imm_ == 0;
316 }
317 
318 
319 void Assembler::CheckBuffer() {
320  if (buffer_space() <= kGap) {
321  GrowBuffer();
322  }
323  if (pc_offset() >= next_buffer_check_) {
324  CheckConstPool(false, true);
325  }
326 }
327 
328 
329 void Assembler::emit(Instr x) {
330  CheckBuffer();
331  *reinterpret_cast<Instr*>(pc_) = x;
332  pc_ += kInstrSize;
333 }
334 
335 
336 Address Assembler::target_pointer_address_at(Address pc) {
337  Address target_pc = pc;
338  Instr instr = Memory::int32_at(target_pc);
339  // If we have a bx instruction, the instruction before the bx is
340  // what we need to patch.
341  static const int32_t kBxInstMask = 0x0ffffff0;
342  static const int32_t kBxInstPattern = 0x012fff10;
343  if ((instr & kBxInstMask) == kBxInstPattern) {
344  target_pc -= kInstrSize;
345  instr = Memory::int32_at(target_pc);
346  }
347 
348 #ifdef USE_BLX
349  // If we have a blx instruction, the instruction before it is
350  // what needs to be patched.
351  if ((instr & kBlxRegMask) == kBlxRegPattern) {
352  target_pc -= kInstrSize;
353  instr = Memory::int32_at(target_pc);
354  }
355 #endif
356 
358  int offset = instr & 0xfff; // offset_12 is unsigned
359  if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
360  // Verify that the constant pool comes after the instruction referencing it.
361  ASSERT(offset >= -4);
362  return target_pc + offset + 8;
363 }
364 
365 
366 Address Assembler::target_pointer_at(Address pc) {
367  if (IsMovW(Memory::int32_at(pc))) {
369  Instruction* instr = Instruction::At(pc);
370  Instruction* next_instr = Instruction::At(pc + kInstrSize);
371  return reinterpret_cast<Address>(
372  (next_instr->ImmedMovwMovtValue() << 16) |
373  instr->ImmedMovwMovtValue());
374  }
375  return Memory::Address_at(target_pointer_address_at(pc));
376 }
377 
378 
380  // Returns the address of the call target from the return address that will
381  // be returned to after a call.
382 #ifdef USE_BLX
383  // Call sequence on V7 or later is :
384  // movw ip, #... @ call address low 16
385  // movt ip, #... @ call address high 16
386  // blx ip
387  // @ return address
388  // Or pre-V7 or cases that need frequent patching:
389  // ldr ip, [pc, #...] @ call address
390  // blx ip
391  // @ return address
392  Address candidate = pc - 2 * Assembler::kInstrSize;
393  Instr candidate_instr(Memory::int32_at(candidate));
394  if (IsLdrPcImmediateOffset(candidate_instr)) {
395  return candidate;
396  }
397  candidate = pc - 3 * Assembler::kInstrSize;
398  ASSERT(IsMovW(Memory::int32_at(candidate)) &&
399  IsMovT(Memory::int32_at(candidate + kInstrSize)));
400  return candidate;
401 #else
402  // Call sequence is:
403  // mov lr, pc
404  // ldr pc, [pc, #...] @ call address
405  // @ return address
406  return pc - kInstrSize;
407 #endif
408 }
409 
410 
411 Address Assembler::return_address_from_call_start(Address pc) {
412 #ifdef USE_BLX
414  return pc + kInstrSize * 2;
415  } else {
418  return pc + kInstrSize * 3;
419  }
420 #else
421  return pc + kInstrSize;
422 #endif
423 }
424 
425 
427  Address constant_pool_entry, Address target) {
428  Memory::Address_at(constant_pool_entry) = target;
429 }
430 
431 
433  Address target) {
434  Memory::Address_at(constant_pool_entry) = target;
435 }
436 
437 
438 static Instr EncodeMovwImmediate(uint32_t immediate) {
439  ASSERT(immediate < 0x10000);
440  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
441 }
442 
443 
444 void Assembler::set_target_pointer_at(Address pc, Address target) {
445  if (IsMovW(Memory::int32_at(pc))) {
447  uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
448  uint32_t immediate = reinterpret_cast<uint32_t>(target);
449  uint32_t intermediate = instr_ptr[0];
450  intermediate &= ~EncodeMovwImmediate(0xFFFF);
451  intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
452  instr_ptr[0] = intermediate;
453  intermediate = instr_ptr[1];
454  intermediate &= ~EncodeMovwImmediate(0xFFFF);
455  intermediate |= EncodeMovwImmediate(immediate >> 16);
456  instr_ptr[1] = intermediate;
459  CPU::FlushICache(pc, 2 * kInstrSize);
460  } else {
462  Memory::Address_at(target_pointer_address_at(pc)) = target;
463  // Intuitively, we would think it is necessary to always flush the
464  // instruction cache after patching a target address in the code as follows:
465  // CPU::FlushICache(pc, sizeof(target));
466  // However, on ARM, no instruction is actually patched in the case
467  // of embedded constants of the form:
468  // ldr ip, [pc, #...]
469  // since the instruction accessing this address in the constant pool remains
470  // unchanged.
471  }
472 }
473 
474 
476  return reinterpret_cast<Address>(
477  reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
478 }
479 
480 
482  set_target_pointer_at(pc, reinterpret_cast<Address>(
483  reinterpret_cast<intptr_t>(target) & ~3));
484 }
485 
486 
487 } } // namespace v8::internal
488 
489 #endif // V8_ARM_ASSEMBLER_ARM_INL_H_
byte * Address
Definition: globals.h:157
static Object *& Object_at(Address addr)
Definition: v8memory.h:75
const Instr kLdrPCMask
const Instr kLdrPCPattern
static void deserialization_set_special_target_at(Address constant_pool_entry, Address target)
const Instr kMovLrPc
static bool IsMovW(Instr instr)
static HeapObject * cast(Object *obj)
int int32_t
Definition: unicode.cc:47
#define ASSERT(condition)
Definition: checks.h:270
const Instr kBlxRegMask
static Instruction * At(byte *pc)
const int kPointerSize
Definition: globals.h:220
#define kScratchDoubleReg
static Address & Address_at(Address addr)
Definition: v8memory.h:71
static void set_target_address_at(Address pc, Address target)
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
static void set_external_target_at(Address constant_pool_entry, Address target)
static bool IsMovT(Instr instr)
const Register pc
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:3559
void CheckConstPool(bool force_emit, bool require_jump)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static Address target_address_at(Address pc)
#define kDoubleRegZero
static JSGlobalPropertyCell * FromValueAddress(Address value)
Definition: objects.h:7993
static int ToAllocationIndex(DwVfpRegister reg)
const Instr kBlxRegPattern
static Address target_address_from_return_address(Address pc)
bool is(DwVfpRegister reg) const
static const int kInstrSize
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static void FlushICache(void *start, size_t size)
static bool IsLdrPcImmediateOffset(Instr instr)