37 #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
38 #define V8_IA32_ASSEMBLER_IA32_INL_H_
49 static const byte kCallOpcode = 0xE8;
50 static const int kNoCodeAgeSequenceLength = 5;
54 void RelocInfo::apply(intptr_t delta) {
55 if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
58 CPU::FlushICache(p,
sizeof(uint32_t));
59 }
else if (rmode_ == CODE_AGE_SEQUENCE) {
60 if (*pc_ == kCallOpcode) {
63 CPU::FlushICache(p,
sizeof(uint32_t));
65 }
else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
70 CPU::FlushICache(p,
sizeof(uint32_t));
71 }
else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
76 CPU::FlushICache(p,
sizeof(uint32_t));
77 }
else if (IsInternalReference(rmode_)) {
81 CPU::FlushICache(p,
sizeof(uint32_t));
86 Address RelocInfo::target_address() {
87 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
92 Address RelocInfo::target_address_address() {
93 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
94 || rmode_ == EMBEDDED_OBJECT
95 || rmode_ == EXTERNAL_REFERENCE);
96 return reinterpret_cast<Address>(pc_);
100 Address RelocInfo::constant_pool_entry_address() {
106 int RelocInfo::target_address_size() {
113 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
116 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
122 Object* RelocInfo::target_object() {
123 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
128 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
129 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
135 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
136 ASSERT(!target->IsConsString());
138 CPU::FlushICache(pc_,
sizeof(
Address));
141 target->IsHeapObject()) {
142 host()->GetHeap()->incremental_marking()->RecordWrite(
148 Address RelocInfo::target_reference() {
149 ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
154 Address RelocInfo::target_runtime_entry(Assembler* origin) {
155 ASSERT(IsRuntimeEntry(rmode_));
156 return reinterpret_cast<Address>(*
reinterpret_cast<int32_t*
>(pc_));
160 void RelocInfo::set_target_runtime_entry(
Address target,
162 ASSERT(IsRuntimeEntry(rmode_));
163 if (target_address() != target) set_target_address(target, mode);
167 Handle<Cell> RelocInfo::target_cell_handle() {
168 ASSERT(rmode_ == RelocInfo::CELL);
170 return Handle<Cell>(
reinterpret_cast<Cell**
>(address));
174 Cell* RelocInfo::target_cell() {
175 ASSERT(rmode_ == RelocInfo::CELL);
181 ASSERT(rmode_ == RelocInfo::CELL);
184 CPU::FlushICache(pc_,
sizeof(
Address));
188 host()->GetHeap()->incremental_marking()->RecordWrite(
194 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
195 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
196 ASSERT(*pc_ == kCallOpcode);
201 Code* RelocInfo::code_age_stub() {
202 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
203 ASSERT(*pc_ == kCallOpcode);
209 void RelocInfo::set_code_age_stub(Code* stub) {
210 ASSERT(*pc_ == kCallOpcode);
211 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
216 Address RelocInfo::call_address() {
217 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
218 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
223 void RelocInfo::set_call_address(
Address target) {
224 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
225 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
227 if (host() !=
NULL) {
229 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
235 Object* RelocInfo::call_object() {
236 return *call_object_address();
240 void RelocInfo::set_call_object(
Object* target) {
241 *call_object_address() = target;
245 Object** RelocInfo::call_object_address() {
246 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
247 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
248 return reinterpret_cast<Object**
>(pc_ + 1);
252 void RelocInfo::WipeOut() {
253 if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
255 }
else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
264 bool RelocInfo::IsPatchedReturnSequence() {
265 return *pc_ == kCallOpcode;
269 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
274 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
275 RelocInfo::Mode mode = rmode();
276 if (mode == RelocInfo::EMBEDDED_OBJECT) {
277 visitor->VisitEmbeddedPointer(
this);
278 CPU::FlushICache(pc_,
sizeof(
Address));
279 }
else if (RelocInfo::IsCodeTarget(mode)) {
280 visitor->VisitCodeTarget(
this);
281 }
else if (mode == RelocInfo::CELL) {
282 visitor->VisitCell(
this);
283 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
284 visitor->VisitExternalReference(
this);
285 CPU::FlushICache(pc_,
sizeof(
Address));
286 }
else if (RelocInfo::IsCodeAgeSequence(mode)) {
287 visitor->VisitCodeAgeSequence(
this);
288 #ifdef ENABLE_DEBUGGER_SUPPORT
289 }
else if (((RelocInfo::IsJSReturn(mode) &&
290 IsPatchedReturnSequence()) ||
291 (RelocInfo::IsDebugBreakSlot(mode) &&
292 IsPatchedDebugBreakSlotSequence())) &&
293 isolate->debug()->has_break_points()) {
294 visitor->VisitDebugTarget(
this);
296 }
else if (IsRuntimeEntry(mode)) {
297 visitor->VisitRuntimeEntry(
this);
302 template<
typename StaticVisitor>
303 void RelocInfo::Visit(Heap* heap) {
304 RelocInfo::Mode mode = rmode();
305 if (mode == RelocInfo::EMBEDDED_OBJECT) {
306 StaticVisitor::VisitEmbeddedPointer(heap,
this);
307 CPU::FlushICache(pc_,
sizeof(
Address));
308 }
else if (RelocInfo::IsCodeTarget(mode)) {
309 StaticVisitor::VisitCodeTarget(heap,
this);
310 }
else if (mode == RelocInfo::CELL) {
311 StaticVisitor::VisitCell(heap,
this);
312 }
else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
313 StaticVisitor::VisitExternalReference(
this);
314 CPU::FlushICache(pc_,
sizeof(
Address));
315 }
else if (RelocInfo::IsCodeAgeSequence(mode)) {
316 StaticVisitor::VisitCodeAgeSequence(heap,
this);
317 #ifdef ENABLE_DEBUGGER_SUPPORT
318 }
else if (heap->isolate()->debug()->has_break_points() &&
319 ((RelocInfo::IsJSReturn(mode) &&
320 IsPatchedReturnSequence()) ||
321 (RelocInfo::IsDebugBreakSlot(mode) &&
322 IsPatchedDebugBreakSlotSequence()))) {
323 StaticVisitor::VisitDebugTarget(heap,
this);
325 }
else if (IsRuntimeEntry(mode)) {
326 StaticVisitor::VisitRuntimeEntry(
this);
332 Immediate::Immediate(
int x) {
334 rmode_ = RelocInfo::NONE32;
338 Immediate::Immediate(
const ExternalReference& ext) {
339 x_ =
reinterpret_cast<int32_t>(ext.address());
340 rmode_ = RelocInfo::EXTERNAL_REFERENCE;
344 Immediate::Immediate(Label* internal_offset) {
345 x_ =
reinterpret_cast<int32_t>(internal_offset);
346 rmode_ = RelocInfo::INTERNAL_REFERENCE;
350 Immediate::Immediate(Handle<Object>
handle) {
354 if (obj->IsHeapObject()) {
356 x_ =
reinterpret_cast<intptr_t
>(handle.location());
357 rmode_ = RelocInfo::EMBEDDED_OBJECT;
360 x_ =
reinterpret_cast<intptr_t
>(
obj);
361 rmode_ = RelocInfo::NONE32;
366 Immediate::Immediate(Smi* value) {
367 x_ =
reinterpret_cast<intptr_t
>(value);
368 rmode_ = RelocInfo::NONE32;
372 Immediate::Immediate(
Address addr) {
373 x_ =
reinterpret_cast<int32_t>(addr);
374 rmode_ = RelocInfo::NONE32;
378 void Assembler::emit(uint32_t x) {
379 *
reinterpret_cast<uint32_t*
>(
pc_) = x;
380 pc_ +=
sizeof(uint32_t);
384 void Assembler::emit(Handle<Object> handle) {
389 if (obj->IsHeapObject()) {
390 emit(reinterpret_cast<intptr_t>(handle.location()),
391 RelocInfo::EMBEDDED_OBJECT);
394 emit(reinterpret_cast<intptr_t>(obj));
399 void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId
id) {
400 if (rmode == RelocInfo::CODE_TARGET && !
id.IsNone()) {
401 RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID,
id.ToInt());
402 }
else if (!RelocInfo::IsNone(rmode)
403 && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
404 RecordRelocInfo(rmode);
410 void Assembler::emit(Handle<Code>
code,
411 RelocInfo::Mode rmode,
414 emit(reinterpret_cast<intptr_t>(code.location()), rmode,
id);
418 void Assembler::emit(
const Immediate& x) {
419 if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
420 Label* label =
reinterpret_cast<Label*
>(x.x_);
421 emit_code_relative_offset(label);
424 if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
429 void Assembler::emit_code_relative_offset(Label* label) {
430 if (label->is_bound()) {
435 emit_disp(label, Displacement::CODE_RELATIVE);
440 void Assembler::emit_w(
const Immediate& x) {
441 ASSERT(RelocInfo::IsNone(x.rmode_));
449 ConstantPoolArray* constant_pool) {
450 return pc +
sizeof(
int32_t) + *reinterpret_cast<int32_t*>(pc);
455 ConstantPoolArray* constant_pool,
458 *p = target - (pc +
sizeof(
int32_t));
459 CPU::FlushICache(p,
sizeof(
int32_t));
468 Displacement Assembler::disp_at(Label*
L) {
469 return Displacement(long_at(L->pos()));
473 void Assembler::disp_at_put(Label* L, Displacement disp) {
474 long_at_put(L->pos(), disp.data());
479 Displacement disp(L, type);
481 emit(static_cast<int>(disp.data()));
485 void Assembler::emit_near_disp(Label* L) {
487 if (L->is_near_linked()) {
488 int offset = L->near_link_pos() -
pc_offset();
490 disp =
static_cast<byte>(offset & 0xFF);
497 void Operand::set_modrm(
int mod, Register rm) {
499 buf_[0] = mod << 6 | rm.code();
504 void Operand::set_sib(
ScaleFactor scale, Register index, Register base) {
506 ASSERT((scale & -4) == 0);
509 buf_[1] = scale << 6 | index.code() << 3 | base.code();
514 void Operand::set_disp8(int8_t disp) {
515 ASSERT(len_ == 1 || len_ == 2);
516 *
reinterpret_cast<int8_t*
>(&buf_[len_++]) = disp;
520 void Operand::set_dispr(
int32_t disp, RelocInfo::Mode rmode) {
521 ASSERT(len_ == 1 || len_ == 2);
535 Register
reg = { xmm_reg.
code() };
543 set_dispr(disp, rmode);
548 #endif // V8_IA32_ASSEMBLER_IA32_INL_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Isolate * isolate() const
static Object *& Object_at(Address addr)
static const int kValueOffset
static Handle< Object > & Object_Handle_at(Address addr)
static HeapObject * cast(Object *obj)
kSerializedDataOffset Object
TypeImpl< ZoneTypeConfig > Type
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static const int kSpecialTargetSize
static Address & Address_at(Address addr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static Code * GetCodeFromTargetAddress(Address address)
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
static Address target_address_from_return_address(Address pc)
static const int kHeaderSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static Cell * FromValueAddress(Address value)
RelocInfo::Mode rmode() const