37 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
38 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
61 imm32_ =
reinterpret_cast<int32_t>(f.address());
62 rmode_ = RelocInfo::EXTERNAL_REFERENCE;
68 imm32_ =
reinterpret_cast<intptr_t
>(value);
69 rmode_ = RelocInfo::NONE32;
78 bool Operand::is_reg()
const {
79 return rm_.is_valid();
104 return (reg.
code() / 2);
111 void RelocInfo::apply(intptr_t delta) {
112 if (IsCodeTarget(rmode_)) {
113 uint32_t scope1 = (uint32_t) target_address() & ~
kImm28Mask;
114 uint32_t scope2 =
reinterpret_cast<uint32_t
>(pc_) & ~
kImm28Mask;
116 if (scope1 != scope2) {
120 if (IsInternalReference(rmode_)) {
122 byte* p =
reinterpret_cast<byte*
>(pc_);
124 CPU::FlushICache(p, count *
sizeof(uint32_t));
129 Address RelocInfo::target_address() {
130 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
135 Address RelocInfo::target_address_address() {
136 ASSERT(IsCodeTarget(rmode_) ||
137 IsRuntimeEntry(rmode_) ||
138 rmode_ == EMBEDDED_OBJECT ||
139 rmode_ == EXTERNAL_REFERENCE);
154 return reinterpret_cast<Address>(
159 Address RelocInfo::constant_pool_entry_address() {
165 int RelocInfo::target_address_size() {
171 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
175 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
186 Object* RelocInfo::target_object() {
187 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
192 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
193 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
194 return Handle<Object>(
reinterpret_cast<Object**
>(
200 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
201 ASSERT(!target->IsConsString());
203 reinterpret_cast<Address>(target));
206 target->IsHeapObject()) {
207 host()->GetHeap()->incremental_marking()->RecordWrite(
213 Address RelocInfo::target_reference() {
214 ASSERT(rmode_ == EXTERNAL_REFERENCE);
219 Address RelocInfo::target_runtime_entry(Assembler* origin) {
220 ASSERT(IsRuntimeEntry(rmode_));
221 return target_address();
225 void RelocInfo::set_target_runtime_entry(
Address target,
227 ASSERT(IsRuntimeEntry(rmode_));
228 if (target_address() != target) set_target_address(target, mode);
232 Handle<Cell> RelocInfo::target_cell_handle() {
233 ASSERT(rmode_ == RelocInfo::CELL);
235 return Handle<Cell>(
reinterpret_cast<Cell**
>(address));
239 Cell* RelocInfo::target_cell() {
240 ASSERT(rmode_ == RelocInfo::CELL);
246 ASSERT(rmode_ == RelocInfo::CELL);
252 host()->GetHeap()->incremental_marking()->RecordWrite(
258 static const int kNoCodeAgeSequenceLength = 7;
261 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
263 return Handle<Object>();
267 Code* RelocInfo::code_age_stub() {
268 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
274 void RelocInfo::set_code_age_stub(Code* stub) {
275 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
278 stub->instruction_start());
282 Address RelocInfo::call_address() {
283 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
284 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
292 void RelocInfo::set_call_address(
Address target) {
293 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
294 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
299 if (host() !=
NULL) {
301 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
307 Object* RelocInfo::call_object() {
308 return *call_object_address();
312 Object** RelocInfo::call_object_address() {
313 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
314 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
319 void RelocInfo::set_call_object(
Object* target) {
320 *call_object_address() = target;
324 void RelocInfo::WipeOut() {
325 ASSERT(IsEmbeddedObject(rmode_) ||
326 IsCodeTarget(rmode_) ||
327 IsRuntimeEntry(rmode_) ||
328 IsExternalReference(rmode_));
333 bool RelocInfo::IsPatchedReturnSequence() {
340 ((instr2 & kOpcodeMask) ==
SPECIAL &&
342 return patched_return;
346 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
352 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
353 RelocInfo::Mode mode = rmode();
354 if (mode == RelocInfo::EMBEDDED_OBJECT) {
355 visitor->VisitEmbeddedPointer(
this);
356 }
else if (RelocInfo::IsCodeTarget(mode)) {
357 visitor->VisitCodeTarget(
this);
358 }
else if (mode == RelocInfo::CELL) {
359 visitor->VisitCell(
this);
360 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
361 visitor->VisitExternalReference(
this);
362 }
else if (RelocInfo::IsCodeAgeSequence(mode)) {
363 visitor->VisitCodeAgeSequence(
this);
364 #ifdef ENABLE_DEBUGGER_SUPPORT
365 }
else if (((RelocInfo::IsJSReturn(mode) &&
366 IsPatchedReturnSequence()) ||
367 (RelocInfo::IsDebugBreakSlot(mode) &&
368 IsPatchedDebugBreakSlotSequence())) &&
369 isolate->debug()->has_break_points()) {
370 visitor->VisitDebugTarget(
this);
372 }
else if (RelocInfo::IsRuntimeEntry(mode)) {
373 visitor->VisitRuntimeEntry(
this);
378 template<
typename StaticVisitor>
379 void RelocInfo::Visit(Heap* heap) {
380 RelocInfo::Mode mode = rmode();
381 if (mode == RelocInfo::EMBEDDED_OBJECT) {
382 StaticVisitor::VisitEmbeddedPointer(heap,
this);
383 }
else if (RelocInfo::IsCodeTarget(mode)) {
384 StaticVisitor::VisitCodeTarget(heap,
this);
385 }
else if (mode == RelocInfo::CELL) {
386 StaticVisitor::VisitCell(heap,
this);
387 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
388 StaticVisitor::VisitExternalReference(
this);
389 }
else if (RelocInfo::IsCodeAgeSequence(mode)) {
390 StaticVisitor::VisitCodeAgeSequence(heap,
this);
391 #ifdef ENABLE_DEBUGGER_SUPPORT
392 }
else if (heap->isolate()->debug()->has_break_points() &&
393 ((RelocInfo::IsJSReturn(mode) &&
394 IsPatchedReturnSequence()) ||
395 (RelocInfo::IsDebugBreakSlot(mode) &&
396 IsPatchedDebugBreakSlotSequence()))) {
397 StaticVisitor::VisitDebugTarget(heap,
this);
399 }
else if (RelocInfo::IsRuntimeEntry(mode)) {
400 StaticVisitor::VisitRuntimeEntry(
this);
409 void Assembler::CheckBuffer() {
416 void Assembler::CheckTrampolinePoolQuick() {
423 void Assembler::emit(
Instr x) {
429 CheckTrampolinePoolQuick();
435 #endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static Object *& Object_at(Address addr)
static const int kMaxNumRegisters
static const int kValueOffset
static int ToAllocationIndex(FPURegister reg)
static HeapObject * cast(Object *obj)
static int NumAllocatableRegisters()
kSerializedDataOffset Object
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
bool is(FPURegister creg) const
#define ASSERT(condition)
static int NumAllocatableRegisters()
const int kFunctionFieldMask
static const int kMaxNumAllocatableRegisters
void CheckTrampolinePool()
bool is_buffer_growth_blocked() const
static void JumpLabelToJumpRegister(Address pc)
static const int kMaxNumAllocatableRegisters
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
#define kLithiumScratchDouble
static const int kSpecialTargetSize
static Address & Address_at(Address addr)
static int NumRegisters()
static const int kInstructionsFor32BitConstant
static Code * GetCodeFromTargetAddress(Address address)
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static Address target_address_from_return_address(Address pc)
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
static const int kInstrSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
int64_t immediate() const
static Cell * FromValueAddress(Address value)
RelocInfo::Mode rmode() const