v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-mips-inl.h
Go to the documentation of this file.
1 
2 // Copyright (c) 1994-2006 Sun Microsystems Inc.
3 // All Rights Reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // - Redistributions of source code must retain the above copyright notice,
10 // this list of conditions and the following disclaimer.
11 //
12 // - Redistribution in binary form must reproduce the above copyright
13 // notice, this list of conditions and the following disclaimer in the
14 // documentation and/or other materials provided with the distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
21 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 
32 // The original source code covered by the above license above has been
33 // modified significantly by Google Inc.
34 // Copyright 2012 the V8 project authors. All rights reserved.
35 
36 
37 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
38 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
39 
40 #include "mips/assembler-mips.h"
41 
42 #include "cpu.h"
43 #include "debug.h"
44 
45 
46 namespace v8 {
47 namespace internal {
48 
49 // -----------------------------------------------------------------------------
50 // Operand and MemOperand.
51 
52 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
53  rm_ = no_reg;
54  imm32_ = immediate;
55  rmode_ = rmode;
56 }
57 
58 
59 Operand::Operand(const ExternalReference& f) {
60  rm_ = no_reg;
61  imm32_ = reinterpret_cast<int32_t>(f.address());
62  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
63 }
64 
65 
66 Operand::Operand(Smi* value) {
67  rm_ = no_reg;
68  imm32_ = reinterpret_cast<intptr_t>(value);
69  rmode_ = RelocInfo::NONE32;
70 }
71 
72 
73 Operand::Operand(Register rm) {
74  rm_ = rm;
75 }
76 
77 
78 bool Operand::is_reg() const {
79  return rm_.is_valid();
80 }
81 
82 
85 }
86 
87 
90 }
91 
92 
95 }
96 
97 
99  ASSERT(reg.code() % 2 == 0);
101  ASSERT(reg.is_valid());
102  ASSERT(!reg.is(kDoubleRegZero));
104  return (reg.code() / 2);
105 }
106 
107 
108 // -----------------------------------------------------------------------------
109 // RelocInfo.
110 
111 void RelocInfo::apply(intptr_t delta) {
112  if (IsCodeTarget(rmode_)) {
113  uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
114  uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
115 
116  if (scope1 != scope2) {
118  }
119  }
120  if (IsInternalReference(rmode_)) {
121  // Absolute code pointer inside code object moves with the code object.
122  byte* p = reinterpret_cast<byte*>(pc_);
123  int count = Assembler::RelocateInternalReference(p, delta);
124  CPU::FlushICache(p, count * sizeof(uint32_t));
125  }
126 }
127 
128 
129 Address RelocInfo::target_address() {
130  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
131  return Assembler::target_address_at(pc_, host_);
132 }
133 
134 
135 Address RelocInfo::target_address_address() {
136  ASSERT(IsCodeTarget(rmode_) ||
137  IsRuntimeEntry(rmode_) ||
138  rmode_ == EMBEDDED_OBJECT ||
139  rmode_ == EXTERNAL_REFERENCE);
140  // Read the address of the word containing the target_address in an
141  // instruction stream.
142  // The only architecture-independent user of this function is the serializer.
143  // The serializer uses it to find out how many raw bytes of instruction to
144  // output before the next target.
145  // For an instruction like LUI/ORI where the target bits are mixed into the
146  // instruction bits, the size of the target will be zero, indicating that the
147  // serializer should not step forward in memory after a target is resolved
148  // and written. In this case the target_address_address function should
149  // return the end of the instructions to be patched, allowing the
150  // deserializer to deserialize the instructions as raw bytes and put them in
151  // place, ready to be patched with the target. After jump optimization,
152  // that is the address of the instruction that follows J/JAL/JR/JALR
153  // instruction.
154  return reinterpret_cast<Address>(
156 }
157 
158 
159 Address RelocInfo::constant_pool_entry_address() {
160  UNREACHABLE();
161  return NULL;
162 }
163 
164 
165 int RelocInfo::target_address_size() {
167 }
168 
169 
170 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
171  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
172  Assembler::set_target_address_at(pc_, host_, target);
173  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
174  Object* target_code = Code::GetCodeFromTargetAddress(target);
175  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
176  host(), this, HeapObject::cast(target_code));
177  }
178 }
179 
180 
182  return pc - kCallTargetAddressOffset;
183 }
184 
185 
186 Object* RelocInfo::target_object() {
187  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
188  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
189 }
190 
191 
192 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
193  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
194  return Handle<Object>(reinterpret_cast<Object**>(
195  Assembler::target_address_at(pc_, host_)));
196 }
197 
198 
199 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
200  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
201  ASSERT(!target->IsConsString());
203  reinterpret_cast<Address>(target));
204  if (mode == UPDATE_WRITE_BARRIER &&
205  host() != NULL &&
206  target->IsHeapObject()) {
207  host()->GetHeap()->incremental_marking()->RecordWrite(
208  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
209  }
210 }
211 
212 
213 Address RelocInfo::target_reference() {
214  ASSERT(rmode_ == EXTERNAL_REFERENCE);
215  return Assembler::target_address_at(pc_, host_);
216 }
217 
218 
219 Address RelocInfo::target_runtime_entry(Assembler* origin) {
220  ASSERT(IsRuntimeEntry(rmode_));
221  return target_address();
222 }
223 
224 
225 void RelocInfo::set_target_runtime_entry(Address target,
226  WriteBarrierMode mode) {
227  ASSERT(IsRuntimeEntry(rmode_));
228  if (target_address() != target) set_target_address(target, mode);
229 }
230 
231 
232 Handle<Cell> RelocInfo::target_cell_handle() {
233  ASSERT(rmode_ == RelocInfo::CELL);
234  Address address = Memory::Address_at(pc_);
235  return Handle<Cell>(reinterpret_cast<Cell**>(address));
236 }
237 
238 
239 Cell* RelocInfo::target_cell() {
240  ASSERT(rmode_ == RelocInfo::CELL);
242 }
243 
244 
245 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
246  ASSERT(rmode_ == RelocInfo::CELL);
247  Address address = cell->address() + Cell::kValueOffset;
248  Memory::Address_at(pc_) = address;
249  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
250  // TODO(1550) We are passing NULL as a slot because cell can never be on
251  // evacuation candidate.
252  host()->GetHeap()->incremental_marking()->RecordWrite(
253  host(), NULL, cell);
254  }
255 }
256 
257 
258 static const int kNoCodeAgeSequenceLength = 7;
259 
260 
261 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
262  UNREACHABLE(); // This should never be reached on Arm.
263  return Handle<Object>();
264 }
265 
266 
267 Code* RelocInfo::code_age_stub() {
268  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
271 }
272 
273 
274 void RelocInfo::set_code_age_stub(Code* stub) {
275  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
277  host_,
278  stub->instruction_start());
279 }
280 
281 
282 Address RelocInfo::call_address() {
283  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
284  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
285  // The pc_ offset of 0 assumes mips patched return sequence per
286  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
287  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
288  return Assembler::target_address_at(pc_, host_);
289 }
290 
291 
292 void RelocInfo::set_call_address(Address target) {
293  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
294  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
295  // The pc_ offset of 0 assumes mips patched return sequence per
296  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
297  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
298  Assembler::set_target_address_at(pc_, host_, target);
299  if (host() != NULL) {
300  Object* target_code = Code::GetCodeFromTargetAddress(target);
301  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
302  host(), this, HeapObject::cast(target_code));
303  }
304 }
305 
306 
307 Object* RelocInfo::call_object() {
308  return *call_object_address();
309 }
310 
311 
312 Object** RelocInfo::call_object_address() {
313  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
314  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
315  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
316 }
317 
318 
319 void RelocInfo::set_call_object(Object* target) {
320  *call_object_address() = target;
321 }
322 
323 
324 void RelocInfo::WipeOut() {
325  ASSERT(IsEmbeddedObject(rmode_) ||
326  IsCodeTarget(rmode_) ||
327  IsRuntimeEntry(rmode_) ||
328  IsExternalReference(rmode_));
330 }
331 
332 
333 bool RelocInfo::IsPatchedReturnSequence() {
334  Instr instr0 = Assembler::instr_at(pc_);
335  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
336  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
337  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
338  (instr1 & kOpcodeMask) == ORI &&
339  ((instr2 & kOpcodeMask) == JAL ||
340  ((instr2 & kOpcodeMask) == SPECIAL &&
341  (instr2 & kFunctionFieldMask) == JALR)));
342  return patched_return;
343 }
344 
345 
346 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
347  Instr current_instr = Assembler::instr_at(pc_);
348  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
349 }
350 
351 
352 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
353  RelocInfo::Mode mode = rmode();
354  if (mode == RelocInfo::EMBEDDED_OBJECT) {
355  visitor->VisitEmbeddedPointer(this);
356  } else if (RelocInfo::IsCodeTarget(mode)) {
357  visitor->VisitCodeTarget(this);
358  } else if (mode == RelocInfo::CELL) {
359  visitor->VisitCell(this);
360  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
361  visitor->VisitExternalReference(this);
362  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
363  visitor->VisitCodeAgeSequence(this);
364 #ifdef ENABLE_DEBUGGER_SUPPORT
365  } else if (((RelocInfo::IsJSReturn(mode) &&
366  IsPatchedReturnSequence()) ||
367  (RelocInfo::IsDebugBreakSlot(mode) &&
368  IsPatchedDebugBreakSlotSequence())) &&
369  isolate->debug()->has_break_points()) {
370  visitor->VisitDebugTarget(this);
371 #endif
372  } else if (RelocInfo::IsRuntimeEntry(mode)) {
373  visitor->VisitRuntimeEntry(this);
374  }
375 }
376 
377 
378 template<typename StaticVisitor>
379 void RelocInfo::Visit(Heap* heap) {
380  RelocInfo::Mode mode = rmode();
381  if (mode == RelocInfo::EMBEDDED_OBJECT) {
382  StaticVisitor::VisitEmbeddedPointer(heap, this);
383  } else if (RelocInfo::IsCodeTarget(mode)) {
384  StaticVisitor::VisitCodeTarget(heap, this);
385  } else if (mode == RelocInfo::CELL) {
386  StaticVisitor::VisitCell(heap, this);
387  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
388  StaticVisitor::VisitExternalReference(this);
389  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
390  StaticVisitor::VisitCodeAgeSequence(heap, this);
391 #ifdef ENABLE_DEBUGGER_SUPPORT
392  } else if (heap->isolate()->debug()->has_break_points() &&
393  ((RelocInfo::IsJSReturn(mode) &&
394  IsPatchedReturnSequence()) ||
395  (RelocInfo::IsDebugBreakSlot(mode) &&
396  IsPatchedDebugBreakSlotSequence()))) {
397  StaticVisitor::VisitDebugTarget(heap, this);
398 #endif
399  } else if (RelocInfo::IsRuntimeEntry(mode)) {
400  StaticVisitor::VisitRuntimeEntry(this);
401  }
402 }
403 
404 
405 // -----------------------------------------------------------------------------
406 // Assembler.
407 
408 
409 void Assembler::CheckBuffer() {
410  if (buffer_space() <= kGap) {
411  GrowBuffer();
412  }
413 }
414 
415 
416 void Assembler::CheckTrampolinePoolQuick() {
417  if (pc_offset() >= next_buffer_check_) {
419  }
420 }
421 
422 
423 void Assembler::emit(Instr x) {
424  if (!is_buffer_growth_blocked()) {
425  CheckBuffer();
426  }
427  *reinterpret_cast<Instr*>(pc_) = x;
428  pc_ += kInstrSize;
429  CheckTrampolinePoolQuick();
430 }
431 
432 
433 } } // namespace v8::internal
434 
435 #endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static const int kMaxNumRegisters
static const int kValueOffset
Definition: objects.h:9547
static int ToAllocationIndex(FPURegister reg)
static HeapObject * cast(Object *obj)
static int NumAllocatableRegisters()
kSerializedDataOffset Object
Definition: objects-inl.h:5016
int int32_t
Definition: unicode.cc:47
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
bool is(FPURegister creg) const
#define ASSERT(condition)
Definition: checks.h:329
const int kFunctionFieldMask
static const int kMaxNumAllocatableRegisters
bool is_buffer_growth_blocked() const
static void JumpLabelToJumpRegister(Address pc)
uint8_t byte
Definition: globals.h:185
static const int kMaxNumAllocatableRegisters
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
#define kLithiumScratchDouble
static const int kSpecialTargetSize
const int kOpcodeMask
static Address & Address_at(Address addr)
Definition: v8memory.h:79
static const int kInstructionsFor32BitConstant
const Register pc
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const int kImm28Mask
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
#define kDoubleRegZero
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static Address target_address_from_return_address(Address pc)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
static const int kInstrSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
const Register no_reg
int64_t immediate() const
static Cell * FromValueAddress(Address value)
Definition: objects.h:9532
RelocInfo::Mode rmode() const