v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler-arm-inl.h
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been modified
34 // significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
38 #define V8_ARM_ASSEMBLER_ARM_INL_H_
39 
40 #include "arm/assembler-arm.h"
41 
42 #include "cpu.h"
43 #include "debug.h"
44 
45 
46 namespace v8 {
47 namespace internal {
48 
49 
52 }
53 
54 
56  return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
57 }
58 
59 
61  return kNumReservedRegisters;
62 }
63 
64 
67 }
68 
69 
71  ASSERT(!reg.is(kDoubleRegZero));
73  if (reg.code() > kDoubleRegZero.code()) {
74  return reg.code() - kNumReservedRegisters;
75  }
76  return reg.code();
77 }
78 
79 
81  ASSERT(index >= 0 && index < NumAllocatableRegisters());
82  ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
84  if (index >= kDoubleRegZero.code()) {
85  return from_code(index + kNumReservedRegisters);
86  }
87  return from_code(index);
88 }
89 
90 
91 void RelocInfo::apply(intptr_t delta) {
92  if (RelocInfo::IsInternalReference(rmode_)) {
93  // absolute code pointer inside code object moves with the code object.
94  int32_t* p = reinterpret_cast<int32_t*>(pc_);
95  *p += delta; // relocate entry
96  }
97  // We do not use pc relative addressing on ARM, so there is
98  // nothing else to do.
99 }
100 
101 
102 Address RelocInfo::target_address() {
103  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
104  return Assembler::target_address_at(pc_, host_);
105 }
106 
107 
108 Address RelocInfo::target_address_address() {
109  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
110  || rmode_ == EMBEDDED_OBJECT
111  || rmode_ == EXTERNAL_REFERENCE);
112  if (FLAG_enable_ool_constant_pool ||
114  // We return the PC for ool constant pool since this function is used by the
115  // serializerer and expects the address to reside within the code object.
116  return reinterpret_cast<Address>(pc_);
117  } else {
120  }
121 }
122 
123 
124 Address RelocInfo::constant_pool_entry_address() {
125  ASSERT(IsInConstantPool());
126  if (FLAG_enable_ool_constant_pool) {
128  return Assembler::target_constant_pool_address_at(pc_,
129  host_->constant_pool());
130  } else {
133  }
134 }
135 
136 
137 int RelocInfo::target_address_size() {
138  return kPointerSize;
139 }
140 
141 
142 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
143  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
144  Assembler::set_target_address_at(pc_, host_, target);
145  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
146  Object* target_code = Code::GetCodeFromTargetAddress(target);
147  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
148  host(), this, HeapObject::cast(target_code));
149  }
150 }
151 
152 
153 Object* RelocInfo::target_object() {
154  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
155  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
156 }
157 
158 
159 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
160  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
161  return Handle<Object>(reinterpret_cast<Object**>(
162  Assembler::target_address_at(pc_, host_)));
163 }
164 
165 
166 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
167  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
168  ASSERT(!target->IsConsString());
170  reinterpret_cast<Address>(target));
171  if (mode == UPDATE_WRITE_BARRIER &&
172  host() != NULL &&
173  target->IsHeapObject()) {
174  host()->GetHeap()->incremental_marking()->RecordWrite(
175  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
176  }
177 }
178 
179 
180 Address RelocInfo::target_reference() {
181  ASSERT(rmode_ == EXTERNAL_REFERENCE);
182  return Assembler::target_address_at(pc_, host_);
183 }
184 
185 
186 Address RelocInfo::target_runtime_entry(Assembler* origin) {
187  ASSERT(IsRuntimeEntry(rmode_));
188  return target_address();
189 }
190 
191 
192 void RelocInfo::set_target_runtime_entry(Address target,
193  WriteBarrierMode mode) {
194  ASSERT(IsRuntimeEntry(rmode_));
195  if (target_address() != target) set_target_address(target, mode);
196 }
197 
198 
199 Handle<Cell> RelocInfo::target_cell_handle() {
200  ASSERT(rmode_ == RelocInfo::CELL);
201  Address address = Memory::Address_at(pc_);
202  return Handle<Cell>(reinterpret_cast<Cell**>(address));
203 }
204 
205 
206 Cell* RelocInfo::target_cell() {
207  ASSERT(rmode_ == RelocInfo::CELL);
209 }
210 
211 
212 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
213  ASSERT(rmode_ == RelocInfo::CELL);
214  Address address = cell->address() + Cell::kValueOffset;
215  Memory::Address_at(pc_) = address;
216  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
217  // TODO(1550) We are passing NULL as a slot because cell can never be on
218  // evacuation candidate.
219  host()->GetHeap()->incremental_marking()->RecordWrite(
220  host(), NULL, cell);
221  }
222 }
223 
224 
225 static const int kNoCodeAgeSequenceLength = 3;
226 
227 
228 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
229  UNREACHABLE(); // This should never be reached on Arm.
230  return Handle<Object>();
231 }
232 
233 
234 Code* RelocInfo::code_age_stub() {
235  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
238  (kNoCodeAgeSequenceLength - 1)));
239 }
240 
241 
242 void RelocInfo::set_code_age_stub(Code* stub) {
243  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
245  (kNoCodeAgeSequenceLength - 1)) =
246  stub->instruction_start();
247 }
248 
249 
250 Address RelocInfo::call_address() {
251  // The 2 instructions offset assumes patched debug break slot or return
252  // sequence.
253  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
254  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
255  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
256 }
257 
258 
259 void RelocInfo::set_call_address(Address target) {
260  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
261  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
262  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
263  if (host() != NULL) {
264  Object* target_code = Code::GetCodeFromTargetAddress(target);
265  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
266  host(), this, HeapObject::cast(target_code));
267  }
268 }
269 
270 
271 Object* RelocInfo::call_object() {
272  return *call_object_address();
273 }
274 
275 
276 void RelocInfo::set_call_object(Object* target) {
277  *call_object_address() = target;
278 }
279 
280 
281 Object** RelocInfo::call_object_address() {
282  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
283  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
284  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
285 }
286 
287 
288 void RelocInfo::WipeOut() {
289  ASSERT(IsEmbeddedObject(rmode_) ||
290  IsCodeTarget(rmode_) ||
291  IsRuntimeEntry(rmode_) ||
292  IsExternalReference(rmode_));
294 }
295 
296 
297 bool RelocInfo::IsPatchedReturnSequence() {
298  Instr current_instr = Assembler::instr_at(pc_);
299  Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
300  // A patched return sequence is:
301  // ldr ip, [pc, #0]
302  // blx ip
303  return ((current_instr & kLdrPCMask) == kLdrPCPattern)
304  && ((next_instr & kBlxRegMask) == kBlxRegPattern);
305 }
306 
307 
308 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
309  Instr current_instr = Assembler::instr_at(pc_);
310  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
311 }
312 
313 
314 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
315  RelocInfo::Mode mode = rmode();
316  if (mode == RelocInfo::EMBEDDED_OBJECT) {
317  visitor->VisitEmbeddedPointer(this);
318  } else if (RelocInfo::IsCodeTarget(mode)) {
319  visitor->VisitCodeTarget(this);
320  } else if (mode == RelocInfo::CELL) {
321  visitor->VisitCell(this);
322  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
323  visitor->VisitExternalReference(this);
324  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
325  visitor->VisitCodeAgeSequence(this);
326 #ifdef ENABLE_DEBUGGER_SUPPORT
327  } else if (((RelocInfo::IsJSReturn(mode) &&
328  IsPatchedReturnSequence()) ||
329  (RelocInfo::IsDebugBreakSlot(mode) &&
330  IsPatchedDebugBreakSlotSequence())) &&
331  isolate->debug()->has_break_points()) {
332  visitor->VisitDebugTarget(this);
333 #endif
334  } else if (RelocInfo::IsRuntimeEntry(mode)) {
335  visitor->VisitRuntimeEntry(this);
336  }
337 }
338 
339 
340 template<typename StaticVisitor>
341 void RelocInfo::Visit(Heap* heap) {
342  RelocInfo::Mode mode = rmode();
343  if (mode == RelocInfo::EMBEDDED_OBJECT) {
344  StaticVisitor::VisitEmbeddedPointer(heap, this);
345  } else if (RelocInfo::IsCodeTarget(mode)) {
346  StaticVisitor::VisitCodeTarget(heap, this);
347  } else if (mode == RelocInfo::CELL) {
348  StaticVisitor::VisitCell(heap, this);
349  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
350  StaticVisitor::VisitExternalReference(this);
351  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
352  StaticVisitor::VisitCodeAgeSequence(heap, this);
353 #ifdef ENABLE_DEBUGGER_SUPPORT
354  } else if (heap->isolate()->debug()->has_break_points() &&
355  ((RelocInfo::IsJSReturn(mode) &&
356  IsPatchedReturnSequence()) ||
357  (RelocInfo::IsDebugBreakSlot(mode) &&
358  IsPatchedDebugBreakSlotSequence()))) {
359  StaticVisitor::VisitDebugTarget(heap, this);
360 #endif
361  } else if (RelocInfo::IsRuntimeEntry(mode)) {
362  StaticVisitor::VisitRuntimeEntry(this);
363  }
364 }
365 
366 
367 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
368  rm_ = no_reg;
369  imm32_ = immediate;
370  rmode_ = rmode;
371 }
372 
373 
374 Operand::Operand(const ExternalReference& f) {
375  rm_ = no_reg;
376  imm32_ = reinterpret_cast<int32_t>(f.address());
377  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
378 }
379 
380 
381 Operand::Operand(Smi* value) {
382  rm_ = no_reg;
383  imm32_ = reinterpret_cast<intptr_t>(value);
384  rmode_ = RelocInfo::NONE32;
385 }
386 
387 
388 Operand::Operand(Register rm) {
389  rm_ = rm;
390  rs_ = no_reg;
391  shift_op_ = LSL;
392  shift_imm_ = 0;
393 }
394 
395 
396 bool Operand::is_reg() const {
397  return rm_.is_valid() &&
398  rs_.is(no_reg) &&
399  shift_op_ == LSL &&
400  shift_imm_ == 0;
401 }
402 
403 
404 void Assembler::CheckBuffer() {
405  if (buffer_space() <= kGap) {
406  GrowBuffer();
407  }
408  if (pc_offset() >= next_buffer_check_) {
409  CheckConstPool(false, true);
410  }
411 }
412 
413 
414 void Assembler::emit(Instr x) {
415  CheckBuffer();
416  *reinterpret_cast<Instr*>(pc_) = x;
417  pc_ += kInstrSize;
418 }
419 
420 
422  Instr instr = Memory::int32_at(pc);
423  return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
424 }
425 
426 
427 Address Assembler::target_constant_pool_address_at(
428  Address pc, ConstantPoolArray* constant_pool) {
429  ASSERT(constant_pool != NULL);
431  Instr instr = Memory::int32_at(pc);
432  return reinterpret_cast<Address>(constant_pool) +
434 }
435 
436 
438  ConstantPoolArray* constant_pool) {
439  if (IsMovW(Memory::int32_at(pc))) {
441  Instruction* instr = Instruction::At(pc);
442  Instruction* next_instr = Instruction::At(pc + kInstrSize);
443  return reinterpret_cast<Address>(
444  (next_instr->ImmedMovwMovtValue() << 16) |
445  instr->ImmedMovwMovtValue());
446  } else if (FLAG_enable_ool_constant_pool) {
448  return Memory::Address_at(
449  target_constant_pool_address_at(pc, constant_pool));
450  } else {
453  }
454 }
455 
456 
458  // Returns the address of the call target from the return address that will
459  // be returned to after a call.
460  // Call sequence on V7 or later is :
461  // movw ip, #... @ call address low 16
462  // movt ip, #... @ call address high 16
463  // blx ip
464  // @ return address
465  // Or pre-V7 or cases that need frequent patching:
466  // ldr ip, [pc, #...] @ call address
467  // blx ip
468  // @ return address
469  Address candidate = pc - 2 * Assembler::kInstrSize;
470  Instr candidate_instr(Memory::int32_at(candidate));
471  if (IsLdrPcImmediateOffset(candidate_instr) |
472  IsLdrPpImmediateOffset(candidate_instr)) {
473  return candidate;
474  }
475  candidate = pc - 3 * Assembler::kInstrSize;
476  ASSERT(IsMovW(Memory::int32_at(candidate)) &&
477  IsMovT(Memory::int32_at(candidate + kInstrSize)));
478  return candidate;
479 }
480 
481 
485  return pc + kInstrSize * 2;
486  } else {
489  return pc + kInstrSize * 3;
490  }
491 }
492 
493 
495  Address constant_pool_entry, Code* code, Address target) {
496  if (FLAG_enable_ool_constant_pool) {
497  set_target_address_at(constant_pool_entry, code, target);
498  } else {
499  Memory::Address_at(constant_pool_entry) = target;
500  }
501 }
502 
503 
504 static Instr EncodeMovwImmediate(uint32_t immediate) {
505  ASSERT(immediate < 0x10000);
506  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
507 }
508 
509 
511  ConstantPoolArray* constant_pool,
512  Address target) {
513  if (IsMovW(Memory::int32_at(pc))) {
515  uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
516  uint32_t immediate = reinterpret_cast<uint32_t>(target);
517  uint32_t intermediate = instr_ptr[0];
518  intermediate &= ~EncodeMovwImmediate(0xFFFF);
519  intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
520  instr_ptr[0] = intermediate;
521  intermediate = instr_ptr[1];
522  intermediate &= ~EncodeMovwImmediate(0xFFFF);
523  intermediate |= EncodeMovwImmediate(immediate >> 16);
524  instr_ptr[1] = intermediate;
527  CPU::FlushICache(pc, 2 * kInstrSize);
528  } else if (FLAG_enable_ool_constant_pool) {
531  target_constant_pool_address_at(pc, constant_pool)) = target;
532  } else {
535  // Intuitively, we would think it is necessary to always flush the
536  // instruction cache after patching a target address in the code as follows:
537  // CPU::FlushICache(pc, sizeof(target));
538  // However, on ARM, no instruction is actually patched in the case
539  // of embedded constants of the form:
540  // ldr ip, [pc, #...]
541  // since the instruction accessing this address in the constant pool remains
542  // unchanged.
543  }
544 }
545 
546 
547 } } // namespace v8::internal
548 
549 #endif // V8_ARM_ASSEMBLER_ARM_INL_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static DwVfpRegister FromAllocationIndex(int index)
static Object *& Object_at(Address addr)
Definition: v8memory.h:83
static const int kValueOffset
Definition: objects.h:9547
const Instr kLdrPCMask
const Instr kLdrPCPattern
static bool IsMovW(Instr instr)
static HeapObject * cast(Object *obj)
static int NumAllocatableRegisters()
kSerializedDataOffset Object
Definition: objects-inl.h:5016
int int32_t
Definition: unicode.cc:47
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
static bool IsLdrPpImmediateOffset(Instr instr)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
Definition: checks.h:329
const Instr kBlxRegMask
static DwVfpRegister from_code(int code)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static Instruction * At(byte *pc)
int ImmedMovwMovtValue() const
static const int kMaxNumAllocatableRegisters
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const int kPointerSize
Definition: globals.h:268
#define kScratchDoubleReg
static const int kPcLoadDelta
static Address & Address_at(Address addr)
Definition: v8memory.h:79
static int32_t & int32_at(Address addr)
Definition: v8memory.h:51
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static bool IsMovT(Instr instr)
const Register pc
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
void CheckConstPool(bool force_emit, bool require_jump)
static Address target_pointer_address_at(Address pc)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
#define kDoubleRegZero
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static int ToAllocationIndex(DwVfpRegister reg)
const Instr kBlxRegPattern
static Address target_address_from_return_address(Address pc)
static Address return_address_from_call_start(Address pc)
bool is(DwVfpRegister reg) const
static const int kInstrSize
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target)
const Register no_reg
int64_t immediate() const
static Cell * FromValueAddress(Address value)
Definition: objects.h:9532
static int GetLdrRegisterImmediateOffset(Instr instr)
RelocInfo::Mode rmode() const
static const int kNumReservedRegisters
static bool IsLdrPcImmediateOffset(Instr instr)