v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-ia32-inl.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 // A light-weight IA32 Assembler.
36 
37 #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
38 #define V8_IA32_ASSEMBLER_IA32_INL_H_
39 
40 #include "ia32/assembler-ia32.h"
41 
42 #include "cpu.h"
43 #include "debug.h"
44 
45 namespace v8 {
46 namespace internal {
47 
48 
49 static const byte kCallOpcode = 0xE8;
50 static const int kNoCodeAgeSequenceLength = 5;
51 
52 
53 // The modes possibly affected by apply must be in kApplyMask.
54 void RelocInfo::apply(intptr_t delta) {
55  if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
56  int32_t* p = reinterpret_cast<int32_t*>(pc_);
57  *p -= delta; // Relocate entry.
58  CPU::FlushICache(p, sizeof(uint32_t));
59  } else if (rmode_ == CODE_AGE_SEQUENCE) {
60  if (*pc_ == kCallOpcode) {
61  int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
62  *p -= delta; // Relocate entry.
63  CPU::FlushICache(p, sizeof(uint32_t));
64  }
65  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
66  // Special handling of js_return when a break point is set (call
67  // instruction has been inserted).
68  int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
69  *p -= delta; // Relocate entry.
70  CPU::FlushICache(p, sizeof(uint32_t));
71  } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
72  // Special handling of a debug break slot when a break point is set (call
73  // instruction has been inserted).
74  int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
75  *p -= delta; // Relocate entry.
76  CPU::FlushICache(p, sizeof(uint32_t));
77  } else if (IsInternalReference(rmode_)) {
78  // absolute code pointer inside code object moves with the code object.
79  int32_t* p = reinterpret_cast<int32_t*>(pc_);
80  *p += delta; // Relocate entry.
81  CPU::FlushICache(p, sizeof(uint32_t));
82  }
83 }
84 
85 
86 Address RelocInfo::target_address() {
87  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
88  return Assembler::target_address_at(pc_, host_);
89 }
90 
91 
92 Address RelocInfo::target_address_address() {
93  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
94  || rmode_ == EMBEDDED_OBJECT
95  || rmode_ == EXTERNAL_REFERENCE);
96  return reinterpret_cast<Address>(pc_);
97 }
98 
99 
100 Address RelocInfo::constant_pool_entry_address() {
101  UNREACHABLE();
102  return NULL;
103 }
104 
105 
106 int RelocInfo::target_address_size() {
108 }
109 
110 
111 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
112  Assembler::set_target_address_at(pc_, host_, target);
113  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
114  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
115  Object* target_code = Code::GetCodeFromTargetAddress(target);
116  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
117  host(), this, HeapObject::cast(target_code));
118  }
119 }
120 
121 
122 Object* RelocInfo::target_object() {
123  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
124  return Memory::Object_at(pc_);
125 }
126 
127 
128 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
129  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
130  return Memory::Object_Handle_at(pc_);
131 }
132 
133 
134 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
135  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
136  ASSERT(!target->IsConsString());
137  Memory::Object_at(pc_) = target;
138  CPU::FlushICache(pc_, sizeof(Address));
139  if (mode == UPDATE_WRITE_BARRIER &&
140  host() != NULL &&
141  target->IsHeapObject()) {
142  host()->GetHeap()->incremental_marking()->RecordWrite(
143  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
144  }
145 }
146 
147 
148 Address RelocInfo::target_reference() {
149  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
150  return Memory::Address_at(pc_);
151 }
152 
153 
154 Address RelocInfo::target_runtime_entry(Assembler* origin) {
155  ASSERT(IsRuntimeEntry(rmode_));
156  return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
157 }
158 
159 
160 void RelocInfo::set_target_runtime_entry(Address target,
161  WriteBarrierMode mode) {
162  ASSERT(IsRuntimeEntry(rmode_));
163  if (target_address() != target) set_target_address(target, mode);
164 }
165 
166 
167 Handle<Cell> RelocInfo::target_cell_handle() {
168  ASSERT(rmode_ == RelocInfo::CELL);
169  Address address = Memory::Address_at(pc_);
170  return Handle<Cell>(reinterpret_cast<Cell**>(address));
171 }
172 
173 
174 Cell* RelocInfo::target_cell() {
175  ASSERT(rmode_ == RelocInfo::CELL);
177 }
178 
179 
180 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
181  ASSERT(rmode_ == RelocInfo::CELL);
182  Address address = cell->address() + Cell::kValueOffset;
183  Memory::Address_at(pc_) = address;
184  CPU::FlushICache(pc_, sizeof(Address));
185  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
186  // TODO(1550) We are passing NULL as a slot because cell can never be on
187  // evacuation candidate.
188  host()->GetHeap()->incremental_marking()->RecordWrite(
189  host(), NULL, cell);
190  }
191 }
192 
193 
194 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
195  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
196  ASSERT(*pc_ == kCallOpcode);
197  return Memory::Object_Handle_at(pc_ + 1);
198 }
199 
200 
201 Code* RelocInfo::code_age_stub() {
202  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
203  ASSERT(*pc_ == kCallOpcode);
205  Assembler::target_address_at(pc_ + 1, host_));
206 }
207 
208 
209 void RelocInfo::set_code_age_stub(Code* stub) {
210  ASSERT(*pc_ == kCallOpcode);
211  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
212  Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
213 }
214 
215 
216 Address RelocInfo::call_address() {
217  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
218  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
219  return Assembler::target_address_at(pc_ + 1, host_);
220 }
221 
222 
223 void RelocInfo::set_call_address(Address target) {
224  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
225  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
226  Assembler::set_target_address_at(pc_ + 1, host_, target);
227  if (host() != NULL) {
228  Object* target_code = Code::GetCodeFromTargetAddress(target);
229  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
230  host(), this, HeapObject::cast(target_code));
231  }
232 }
233 
234 
235 Object* RelocInfo::call_object() {
236  return *call_object_address();
237 }
238 
239 
240 void RelocInfo::set_call_object(Object* target) {
241  *call_object_address() = target;
242 }
243 
244 
245 Object** RelocInfo::call_object_address() {
246  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
247  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
248  return reinterpret_cast<Object**>(pc_ + 1);
249 }
250 
251 
252 void RelocInfo::WipeOut() {
253  if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
254  Memory::Address_at(pc_) = NULL;
255  } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
256  // Effectively write zero into the relocation.
257  Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
258  } else {
259  UNREACHABLE();
260  }
261 }
262 
263 
264 bool RelocInfo::IsPatchedReturnSequence() {
265  return *pc_ == kCallOpcode;
266 }
267 
268 
269 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
270  return !Assembler::IsNop(pc());
271 }
272 
273 
274 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
275  RelocInfo::Mode mode = rmode();
276  if (mode == RelocInfo::EMBEDDED_OBJECT) {
277  visitor->VisitEmbeddedPointer(this);
278  CPU::FlushICache(pc_, sizeof(Address));
279  } else if (RelocInfo::IsCodeTarget(mode)) {
280  visitor->VisitCodeTarget(this);
281  } else if (mode == RelocInfo::CELL) {
282  visitor->VisitCell(this);
283  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
284  visitor->VisitExternalReference(this);
285  CPU::FlushICache(pc_, sizeof(Address));
286  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
287  visitor->VisitCodeAgeSequence(this);
288  #ifdef ENABLE_DEBUGGER_SUPPORT
289  } else if (((RelocInfo::IsJSReturn(mode) &&
290  IsPatchedReturnSequence()) ||
291  (RelocInfo::IsDebugBreakSlot(mode) &&
292  IsPatchedDebugBreakSlotSequence())) &&
293  isolate->debug()->has_break_points()) {
294  visitor->VisitDebugTarget(this);
295 #endif
296  } else if (IsRuntimeEntry(mode)) {
297  visitor->VisitRuntimeEntry(this);
298  }
299 }
300 
301 
302 template<typename StaticVisitor>
303 void RelocInfo::Visit(Heap* heap) {
304  RelocInfo::Mode mode = rmode();
305  if (mode == RelocInfo::EMBEDDED_OBJECT) {
306  StaticVisitor::VisitEmbeddedPointer(heap, this);
307  CPU::FlushICache(pc_, sizeof(Address));
308  } else if (RelocInfo::IsCodeTarget(mode)) {
309  StaticVisitor::VisitCodeTarget(heap, this);
310  } else if (mode == RelocInfo::CELL) {
311  StaticVisitor::VisitCell(heap, this);
312  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
313  StaticVisitor::VisitExternalReference(this);
314  CPU::FlushICache(pc_, sizeof(Address));
315  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
316  StaticVisitor::VisitCodeAgeSequence(heap, this);
317 #ifdef ENABLE_DEBUGGER_SUPPORT
318  } else if (heap->isolate()->debug()->has_break_points() &&
319  ((RelocInfo::IsJSReturn(mode) &&
320  IsPatchedReturnSequence()) ||
321  (RelocInfo::IsDebugBreakSlot(mode) &&
322  IsPatchedDebugBreakSlotSequence()))) {
323  StaticVisitor::VisitDebugTarget(heap, this);
324 #endif
325  } else if (IsRuntimeEntry(mode)) {
326  StaticVisitor::VisitRuntimeEntry(this);
327  }
328 }
329 
330 
331 
332 Immediate::Immediate(int x) {
333  x_ = x;
334  rmode_ = RelocInfo::NONE32;
335 }
336 
337 
338 Immediate::Immediate(const ExternalReference& ext) {
339  x_ = reinterpret_cast<int32_t>(ext.address());
340  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
341 }
342 
343 
344 Immediate::Immediate(Label* internal_offset) {
345  x_ = reinterpret_cast<int32_t>(internal_offset);
346  rmode_ = RelocInfo::INTERNAL_REFERENCE;
347 }
348 
349 
350 Immediate::Immediate(Handle<Object> handle) {
351  AllowDeferredHandleDereference using_raw_address;
352  // Verify all Objects referred by code are NOT in new space.
353  Object* obj = *handle;
354  if (obj->IsHeapObject()) {
355  ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
356  x_ = reinterpret_cast<intptr_t>(handle.location());
357  rmode_ = RelocInfo::EMBEDDED_OBJECT;
358  } else {
359  // no relocation needed
360  x_ = reinterpret_cast<intptr_t>(obj);
361  rmode_ = RelocInfo::NONE32;
362  }
363 }
364 
365 
366 Immediate::Immediate(Smi* value) {
367  x_ = reinterpret_cast<intptr_t>(value);
368  rmode_ = RelocInfo::NONE32;
369 }
370 
371 
372 Immediate::Immediate(Address addr) {
373  x_ = reinterpret_cast<int32_t>(addr);
374  rmode_ = RelocInfo::NONE32;
375 }
376 
377 
378 void Assembler::emit(uint32_t x) {
379  *reinterpret_cast<uint32_t*>(pc_) = x;
380  pc_ += sizeof(uint32_t);
381 }
382 
383 
384 void Assembler::emit(Handle<Object> handle) {
385  AllowDeferredHandleDereference heap_object_check;
386  // Verify all Objects referred by code are NOT in new space.
387  Object* obj = *handle;
388  ASSERT(!isolate()->heap()->InNewSpace(obj));
389  if (obj->IsHeapObject()) {
390  emit(reinterpret_cast<intptr_t>(handle.location()),
391  RelocInfo::EMBEDDED_OBJECT);
392  } else {
393  // no relocation needed
394  emit(reinterpret_cast<intptr_t>(obj));
395  }
396 }
397 
398 
399 void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
400  if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
401  RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
402  } else if (!RelocInfo::IsNone(rmode)
403  && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
404  RecordRelocInfo(rmode);
405  }
406  emit(x);
407 }
408 
409 
410 void Assembler::emit(Handle<Code> code,
411  RelocInfo::Mode rmode,
412  TypeFeedbackId id) {
413  AllowDeferredHandleDereference embedding_raw_address;
414  emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
415 }
416 
417 
418 void Assembler::emit(const Immediate& x) {
419  if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
420  Label* label = reinterpret_cast<Label*>(x.x_);
421  emit_code_relative_offset(label);
422  return;
423  }
424  if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
425  emit(x.x_);
426 }
427 
428 
429 void Assembler::emit_code_relative_offset(Label* label) {
430  if (label->is_bound()) {
431  int32_t pos;
432  pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
433  emit(pos);
434  } else {
435  emit_disp(label, Displacement::CODE_RELATIVE);
436  }
437 }
438 
439 
440 void Assembler::emit_w(const Immediate& x) {
441  ASSERT(RelocInfo::IsNone(x.rmode_));
442  uint16_t value = static_cast<uint16_t>(x.x_);
443  reinterpret_cast<uint16_t*>(pc_)[0] = value;
444  pc_ += sizeof(uint16_t);
445 }
446 
447 
449  ConstantPoolArray* constant_pool) {
450  return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
451 }
452 
453 
455  ConstantPoolArray* constant_pool,
456  Address target) {
457  int32_t* p = reinterpret_cast<int32_t*>(pc);
458  *p = target - (pc + sizeof(int32_t));
459  CPU::FlushICache(p, sizeof(int32_t));
460 }
461 
462 
464  return pc - kCallTargetAddressOffset;
465 }
466 
467 
468 Displacement Assembler::disp_at(Label* L) {
469  return Displacement(long_at(L->pos()));
470 }
471 
472 
473 void Assembler::disp_at_put(Label* L, Displacement disp) {
474  long_at_put(L->pos(), disp.data());
475 }
476 
477 
478 void Assembler::emit_disp(Label* L, Displacement::Type type) {
479  Displacement disp(L, type);
480  L->link_to(pc_offset());
481  emit(static_cast<int>(disp.data()));
482 }
483 
484 
485 void Assembler::emit_near_disp(Label* L) {
486  byte disp = 0x00;
487  if (L->is_near_linked()) {
488  int offset = L->near_link_pos() - pc_offset();
489  ASSERT(is_int8(offset));
490  disp = static_cast<byte>(offset & 0xFF);
491  }
492  L->link_to(pc_offset(), Label::kNear);
493  *pc_++ = disp;
494 }
495 
496 
497 void Operand::set_modrm(int mod, Register rm) {
498  ASSERT((mod & -4) == 0);
499  buf_[0] = mod << 6 | rm.code();
500  len_ = 1;
501 }
502 
503 
504 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
505  ASSERT(len_ == 1);
506  ASSERT((scale & -4) == 0);
507  // Use SIB with no index register only for base esp.
508  ASSERT(!index.is(esp) || base.is(esp));
509  buf_[1] = scale << 6 | index.code() << 3 | base.code();
510  len_ = 2;
511 }
512 
513 
514 void Operand::set_disp8(int8_t disp) {
515  ASSERT(len_ == 1 || len_ == 2);
516  *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
517 }
518 
519 
520 void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
521  ASSERT(len_ == 1 || len_ == 2);
522  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
523  *p = disp;
524  len_ += sizeof(int32_t);
525  rmode_ = rmode;
526 }
527 
528 Operand::Operand(Register reg) {
529  // reg
530  set_modrm(3, reg);
531 }
532 
533 
534 Operand::Operand(XMMRegister xmm_reg) {
535  Register reg = { xmm_reg.code() };
536  set_modrm(3, reg);
537 }
538 
539 
540 Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
541  // [disp/r]
542  set_modrm(0, ebp);
543  set_dispr(disp, rmode);
544 }
545 
546 } } // namespace v8::internal
547 
548 #endif // V8_IA32_ASSEMBLER_IA32_INL_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
Isolate * isolate() const
Definition: assembler.h:62
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static const int kValueOffset
Definition: objects.h:9547
static Handle< Object > & Object_Handle_at(Address addr)
Definition: v8memory.h:87
const Register esp
static HeapObject * cast(Object *obj)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
Definition: checks.h:329
unsigned short uint16_t
Definition: unicode.cc:46
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
uint8_t byte
Definition: globals.h:185
const Register ebp
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static const int kSpecialTargetSize
static Address & Address_at(Address addr)
Definition: v8memory.h:79
const int kHeapObjectTag
Definition: v8.h:5473
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register pc
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static Address target_address_from_return_address(Address pc)
static const int kHeaderSize
Definition: objects.h:5604
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
HeapObject * obj
static Cell * FromValueAddress(Address value)
Definition: objects.h:9532
RelocInfo::Mode rmode() const