v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-x64-inl.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_X64_ASSEMBLER_X64_INL_H_
29 #define V8_X64_ASSEMBLER_X64_INL_H_
30 
31 #include "x64/assembler-x64.h"
32 
33 #include "cpu.h"
34 #include "debug.h"
35 #include "v8memory.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // -----------------------------------------------------------------------------
42 // Implementation of Assembler
43 
44 
45 static const byte kCallOpcode = 0xE8;
46 static const int kNoCodeAgeSequenceLength = 6;
47 
48 
49 void Assembler::emitl(uint32_t x) {
51  pc_ += sizeof(uint32_t);
52 }
53 
54 
55 void Assembler::emitp(void* x, RelocInfo::Mode rmode) {
56  uintptr_t value = reinterpret_cast<uintptr_t>(x);
57  Memory::uintptr_at(pc_) = value;
58  if (!RelocInfo::IsNone(rmode)) {
59  RecordRelocInfo(rmode, value);
60  }
61  pc_ += sizeof(uintptr_t);
62 }
63 
64 
65 void Assembler::emitq(uint64_t x) {
67  pc_ += sizeof(uint64_t);
68 }
69 
70 
71 void Assembler::emitw(uint16_t x) {
73  pc_ += sizeof(uint16_t);
74 }
75 
76 
77 void Assembler::emit_code_target(Handle<Code> target,
78  RelocInfo::Mode rmode,
79  TypeFeedbackId ast_id) {
80  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
81  rmode == RelocInfo::CODE_AGE_SEQUENCE);
82  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
83  RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
84  } else {
85  RecordRelocInfo(rmode);
86  }
87  int current = code_targets_.length();
88  if (current > 0 && code_targets_.last().is_identical_to(target)) {
89  // Optimization if we keep jumping to the same code target.
90  emitl(current - 1);
91  } else {
92  code_targets_.Add(target);
93  emitl(current);
94  }
95 }
96 
97 
98 void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
99  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
100  ASSERT(isolate()->code_range()->exists());
101  RecordRelocInfo(rmode);
102  emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
103 }
104 
105 
106 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
107  emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
108 }
109 
110 
111 void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
112  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
113 }
114 
115 
116 void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
117  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
118 }
119 
120 
121 void Assembler::emit_rex_64(Register reg, const Operand& op) {
122  emit(0x48 | reg.high_bit() << 2 | op.rex_);
123 }
124 
125 
126 void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
127  emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
128 }
129 
130 
131 void Assembler::emit_rex_64(Register rm_reg) {
132  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
133  emit(0x48 | rm_reg.high_bit());
134 }
135 
136 
137 void Assembler::emit_rex_64(const Operand& op) {
138  emit(0x48 | op.rex_);
139 }
140 
141 
142 void Assembler::emit_rex_32(Register reg, Register rm_reg) {
143  emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
144 }
145 
146 
147 void Assembler::emit_rex_32(Register reg, const Operand& op) {
148  emit(0x40 | reg.high_bit() << 2 | op.rex_);
149 }
150 
151 
152 void Assembler::emit_rex_32(Register rm_reg) {
153  emit(0x40 | rm_reg.high_bit());
154 }
155 
156 
157 void Assembler::emit_rex_32(const Operand& op) {
158  emit(0x40 | op.rex_);
159 }
160 
161 
162 void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
163  byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
164  if (rex_bits != 0) emit(0x40 | rex_bits);
165 }
166 
167 
168 void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
169  byte rex_bits = reg.high_bit() << 2 | op.rex_;
170  if (rex_bits != 0) emit(0x40 | rex_bits);
171 }
172 
173 
174 void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
175  byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
176  if (rex_bits != 0) emit(0x40 | rex_bits);
177 }
178 
179 
180 void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
181  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
182  if (rex_bits != 0) emit(0x40 | rex_bits);
183 }
184 
185 
186 void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
187  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
188  if (rex_bits != 0) emit(0x40 | rex_bits);
189 }
190 
191 
192 void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
193  byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
194  if (rex_bits != 0) emit(0x40 | rex_bits);
195 }
196 
197 
198 void Assembler::emit_optional_rex_32(Register rm_reg) {
199  if (rm_reg.high_bit()) emit(0x41);
200 }
201 
202 
203 void Assembler::emit_optional_rex_32(const Operand& op) {
204  if (op.rex_ != 0) emit(0x40 | op.rex_);
205 }
206 
207 
209  ConstantPoolArray* constant_pool) {
210  return Memory::int32_at(pc) + pc + 4;
211 }
212 
213 
215  ConstantPoolArray* constant_pool,
216  Address target) {
217  Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
218  CPU::FlushICache(pc, sizeof(int32_t));
219 }
220 
221 
223  return pc - kCallTargetAddressOffset;
224 }
225 
226 
228  return code_targets_[Memory::int32_at(pc)];
229 }
230 
231 
233  ASSERT(isolate()->code_range()->exists());
234  return Memory::int32_at(pc) + isolate()->code_range()->start();
235 }
236 
237 // -----------------------------------------------------------------------------
238 // Implementation of RelocInfo
239 
240 // The modes possibly affected by apply must be in kApplyMask.
241 void RelocInfo::apply(intptr_t delta) {
242  if (IsInternalReference(rmode_)) {
243  // absolute code pointer inside code object moves with the code object.
244  Memory::Address_at(pc_) += static_cast<int32_t>(delta);
245  CPU::FlushICache(pc_, sizeof(Address));
246  } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
247  Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
248  CPU::FlushICache(pc_, sizeof(int32_t));
249  } else if (rmode_ == CODE_AGE_SEQUENCE) {
250  if (*pc_ == kCallOpcode) {
251  int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
252  *p -= static_cast<int32_t>(delta); // Relocate entry.
253  CPU::FlushICache(p, sizeof(uint32_t));
254  }
255  }
256 }
257 
258 
259 Address RelocInfo::target_address() {
260  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
261  return Assembler::target_address_at(pc_, host_);
262 }
263 
264 
265 Address RelocInfo::target_address_address() {
266  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
267  || rmode_ == EMBEDDED_OBJECT
268  || rmode_ == EXTERNAL_REFERENCE);
269  return reinterpret_cast<Address>(pc_);
270 }
271 
272 
273 Address RelocInfo::constant_pool_entry_address() {
274  UNREACHABLE();
275  return NULL;
276 }
277 
278 
279 int RelocInfo::target_address_size() {
280  if (IsCodedSpecially()) {
282  } else {
283  return kPointerSize;
284  }
285 }
286 
287 
288 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
289  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
290  Assembler::set_target_address_at(pc_, host_, target);
291  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
292  Object* target_code = Code::GetCodeFromTargetAddress(target);
293  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
294  host(), this, HeapObject::cast(target_code));
295  }
296 }
297 
298 
299 Object* RelocInfo::target_object() {
300  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
301  return Memory::Object_at(pc_);
302 }
303 
304 
305 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
306  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
307  if (rmode_ == EMBEDDED_OBJECT) {
308  return Memory::Object_Handle_at(pc_);
309  } else {
310  return origin->code_target_object_handle_at(pc_);
311  }
312 }
313 
314 
315 Address RelocInfo::target_reference() {
316  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
317  return Memory::Address_at(pc_);
318 }
319 
320 
321 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
322  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
323  ASSERT(!target->IsConsString());
324  Memory::Object_at(pc_) = target;
325  CPU::FlushICache(pc_, sizeof(Address));
326  if (mode == UPDATE_WRITE_BARRIER &&
327  host() != NULL &&
328  target->IsHeapObject()) {
329  host()->GetHeap()->incremental_marking()->RecordWrite(
330  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
331  }
332 }
333 
334 
335 Address RelocInfo::target_runtime_entry(Assembler* origin) {
336  ASSERT(IsRuntimeEntry(rmode_));
337  return origin->runtime_entry_at(pc_);
338 }
339 
340 
341 void RelocInfo::set_target_runtime_entry(Address target,
342  WriteBarrierMode mode) {
343  ASSERT(IsRuntimeEntry(rmode_));
344  if (target_address() != target) set_target_address(target, mode);
345 }
346 
347 
348 Handle<Cell> RelocInfo::target_cell_handle() {
349  ASSERT(rmode_ == RelocInfo::CELL);
350  Address address = Memory::Address_at(pc_);
351  return Handle<Cell>(reinterpret_cast<Cell**>(address));
352 }
353 
354 
355 Cell* RelocInfo::target_cell() {
356  ASSERT(rmode_ == RelocInfo::CELL);
358 }
359 
360 
361 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
362  ASSERT(rmode_ == RelocInfo::CELL);
363  Address address = cell->address() + Cell::kValueOffset;
364  Memory::Address_at(pc_) = address;
365  CPU::FlushICache(pc_, sizeof(Address));
366  if (mode == UPDATE_WRITE_BARRIER &&
367  host() != NULL) {
368  // TODO(1550) We are passing NULL as a slot because cell can never be on
369  // evacuation candidate.
370  host()->GetHeap()->incremental_marking()->RecordWrite(
371  host(), NULL, cell);
372  }
373 }
374 
375 
376 void RelocInfo::WipeOut() {
377  if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
378  Memory::Address_at(pc_) = NULL;
379  } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
380  // Effectively write zero into the relocation.
381  Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
382  } else {
383  UNREACHABLE();
384  }
385 }
386 
387 
388 bool RelocInfo::IsPatchedReturnSequence() {
389  // The recognized call sequence is:
390  // movq(kScratchRegister, address); call(kScratchRegister);
391  // It only needs to be distinguished from a return sequence
392  // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
393  // The 11th byte is int3 (0xCC) in the return sequence and
394  // REX.WB (0x48+register bit) for the call sequence.
395 #ifdef ENABLE_DEBUGGER_SUPPORT
397  0xCC;
398 #else
399  return false;
400 #endif
401 }
402 
403 
404 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
405  return !Assembler::IsNop(pc());
406 }
407 
408 
409 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
410  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
411  ASSERT(*pc_ == kCallOpcode);
412  return origin->code_target_object_handle_at(pc_ + 1);
413 }
414 
415 
416 Code* RelocInfo::code_age_stub() {
417  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
418  ASSERT(*pc_ == kCallOpcode);
420  Assembler::target_address_at(pc_ + 1, host_));
421 }
422 
423 
424 void RelocInfo::set_code_age_stub(Code* stub) {
425  ASSERT(*pc_ == kCallOpcode);
426  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
427  Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
428 }
429 
430 
431 Address RelocInfo::call_address() {
432  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
433  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
434  return Memory::Address_at(
436 }
437 
438 
439 void RelocInfo::set_call_address(Address target) {
440  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
441  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
443  target;
445  sizeof(Address));
446  if (host() != NULL) {
447  Object* target_code = Code::GetCodeFromTargetAddress(target);
448  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
449  host(), this, HeapObject::cast(target_code));
450  }
451 }
452 
453 
454 Object* RelocInfo::call_object() {
455  return *call_object_address();
456 }
457 
458 
459 void RelocInfo::set_call_object(Object* target) {
460  *call_object_address() = target;
461 }
462 
463 
464 Object** RelocInfo::call_object_address() {
465  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
466  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
467  return reinterpret_cast<Object**>(
469 }
470 
471 
472 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
473  RelocInfo::Mode mode = rmode();
474  if (mode == RelocInfo::EMBEDDED_OBJECT) {
475  visitor->VisitEmbeddedPointer(this);
476  CPU::FlushICache(pc_, sizeof(Address));
477  } else if (RelocInfo::IsCodeTarget(mode)) {
478  visitor->VisitCodeTarget(this);
479  } else if (mode == RelocInfo::CELL) {
480  visitor->VisitCell(this);
481  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
482  visitor->VisitExternalReference(this);
483  CPU::FlushICache(pc_, sizeof(Address));
484  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
485  visitor->VisitCodeAgeSequence(this);
486 #ifdef ENABLE_DEBUGGER_SUPPORT
487  } else if (((RelocInfo::IsJSReturn(mode) &&
488  IsPatchedReturnSequence()) ||
489  (RelocInfo::IsDebugBreakSlot(mode) &&
490  IsPatchedDebugBreakSlotSequence())) &&
491  isolate->debug()->has_break_points()) {
492  visitor->VisitDebugTarget(this);
493 #endif
494  } else if (RelocInfo::IsRuntimeEntry(mode)) {
495  visitor->VisitRuntimeEntry(this);
496  }
497 }
498 
499 
500 template<typename StaticVisitor>
501 void RelocInfo::Visit(Heap* heap) {
502  RelocInfo::Mode mode = rmode();
503  if (mode == RelocInfo::EMBEDDED_OBJECT) {
504  StaticVisitor::VisitEmbeddedPointer(heap, this);
505  CPU::FlushICache(pc_, sizeof(Address));
506  } else if (RelocInfo::IsCodeTarget(mode)) {
507  StaticVisitor::VisitCodeTarget(heap, this);
508  } else if (mode == RelocInfo::CELL) {
509  StaticVisitor::VisitCell(heap, this);
510  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
511  StaticVisitor::VisitExternalReference(this);
512  CPU::FlushICache(pc_, sizeof(Address));
513  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
514  StaticVisitor::VisitCodeAgeSequence(heap, this);
515 #ifdef ENABLE_DEBUGGER_SUPPORT
516  } else if (heap->isolate()->debug()->has_break_points() &&
517  ((RelocInfo::IsJSReturn(mode) &&
518  IsPatchedReturnSequence()) ||
519  (RelocInfo::IsDebugBreakSlot(mode) &&
520  IsPatchedDebugBreakSlotSequence()))) {
521  StaticVisitor::VisitDebugTarget(heap, this);
522 #endif
523  } else if (RelocInfo::IsRuntimeEntry(mode)) {
524  StaticVisitor::VisitRuntimeEntry(this);
525  }
526 }
527 
528 
529 // -----------------------------------------------------------------------------
530 // Implementation of Operand
531 
532 void Operand::set_modrm(int mod, Register rm_reg) {
533  ASSERT(is_uint2(mod));
534  buf_[0] = mod << 6 | rm_reg.low_bits();
535  // Set REX.B to the high bit of rm.code().
536  rex_ |= rm_reg.high_bit();
537 }
538 
539 
540 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
541  ASSERT(len_ == 1);
542  ASSERT(is_uint2(scale));
543  // Use SIB with no index register only for base rsp or r12. Otherwise we
544  // would skip the SIB byte entirely.
545  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
546  buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
547  rex_ |= index.high_bit() << 1 | base.high_bit();
548  len_ = 2;
549 }
550 
551 void Operand::set_disp8(int disp) {
552  ASSERT(is_int8(disp));
553  ASSERT(len_ == 1 || len_ == 2);
554  int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
555  *p = disp;
556  len_ += sizeof(int8_t);
557 }
558 
559 void Operand::set_disp32(int disp) {
560  ASSERT(len_ == 1 || len_ == 2);
561  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
562  *p = disp;
563  len_ += sizeof(int32_t);
564 }
565 
566 
567 } } // namespace v8::internal
568 
569 #endif // V8_X64_ASSEMBLER_X64_INL_H_
Address runtime_entry_at(Address pc)
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
Isolate * isolate() const
Definition: assembler.h:62
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static const int kValueOffset
Definition: objects.h:9547
CodeRange * code_range()
Definition: isolate.h:865
static Handle< Object > & Object_Handle_at(Address addr)
Definition: v8memory.h:87
static HeapObject * cast(Object *obj)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
int int32_t
Definition: unicode.cc:47
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
Definition: checks.h:329
static const int kPatchReturnSequenceAddressOffset
unsigned short uint16_t
Definition: unicode.cc:46
static uint16_t & uint16_at(Address addr)
Definition: v8memory.h:43
static const int kMoveAddressIntoScratchRegisterInstructionLength
uint8_t byte
Definition: globals.h:185
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static const int kRealPatchReturnSequenceAddressOffset
static const int kSpecialTargetSize
const int kPointerSize
Definition: globals.h:268
static Address & Address_at(Address addr)
Definition: v8memory.h:79
const Register rsp
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
const Register pc
const Register r12
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
static const int kCallTargetAddressOffset
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static Address target_address_from_return_address(Address pc)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static uint64_t & uint64_at(Address addr)
Definition: v8memory.h:55
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
Handle< Object > code_target_object_handle_at(Address pc)
static uintptr_t & uintptr_at(Address addr)
Definition: v8memory.h:71
static Cell * FromValueAddress(Address value)
Definition: objects.h:9532