28 #ifndef V8_ARM64_SIMULATOR_ARM64_H_
29 #define V8_ARM64_SIMULATOR_ARM64_H_
45 #define REGISTER_CODE_LIST(R) \
46 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
47 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
48 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
49 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
54 #if !defined(USE_SIMULATOR)
58 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
59 (entry(p0, p1, p2, p3, p4))
63 const byte* input_start,
64 const byte* input_end,
76 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
77 (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
78 p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
80 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
81 reinterpret_cast<TryCatch*>(try_catch_address)
93 return try_catch_address;
99 #else // !defined(USE_SIMULATOR)
101 enum ReverseByteMode {
111 class SimSystemRegister {
115 SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
117 uint32_t RawValue()
const {
121 void SetRawValue(uint32_t new_value) {
122 value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
125 uint32_t Bits(
int msb,
int lsb)
const {
129 int32_t SignedBits(
int msb,
int lsb)
const {
133 void SetBits(
int msb,
int lsb, uint32_t bits);
138 #define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
139 Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
140 void Set##Name(Type bits) { \
141 SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
143 #define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
144 static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
146 #undef DEFINE_ZERO_BITS
153 SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
154 : value_(value), write_ignore_mask_(write_ignore_mask) { }
157 uint32_t write_ignore_mask_;
162 template<
int kSizeInBytes>
163 class SimRegisterBase {
166 void Set(
T new_value,
unsigned size =
sizeof(
T)) {
171 memset(value_, 0, kSizeInBytes);
172 memcpy(value_, &new_value,
size);
178 T Get(
unsigned size =
sizeof(
T))
const {
181 memset(&result, 0,
sizeof(result));
182 memcpy(&result, value_,
size);
187 uint8_t value_[kSizeInBytes];
189 typedef SimRegisterBase<kXRegSize> SimRegister;
190 typedef SimRegisterBase<kDRegSize> SimFPRegister;
193 class Simulator :
public DecoderVisitor {
195 explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
196 Isolate* isolate =
NULL,
197 FILE* stream = stderr);
203 static void Initialize(Isolate* isolate);
212 void CallVoid(
byte* entry, CallArgument* args);
215 int64_t CallInt64(
byte* entry, CallArgument* args);
216 double CallDouble(
byte* entry, CallArgument* args);
221 int64_t CallJS(
byte* entry,
222 byte* function_entry,
227 int64_t CallRegExp(
byte* entry,
229 int64_t start_offset,
230 const byte* input_start,
231 const byte* input_end,
236 void* return_address,
246 explicit CallArgument(
T argument) {
247 ASSERT(
sizeof(argument) <=
sizeof(bits_));
248 memcpy(&bits_, &argument,
sizeof(argument));
252 explicit CallArgument(
double argument) {
253 ASSERT(
sizeof(argument) ==
sizeof(bits_));
254 memcpy(&bits_, &argument,
sizeof(argument));
258 explicit CallArgument(
float argument) {
264 ASSERT(
sizeof(kFP64SignallingNaN) ==
sizeof(bits_));
265 memcpy(&bits_, &kFP64SignallingNaN,
sizeof(kFP64SignallingNaN));
267 ASSERT(
sizeof(argument) <=
sizeof(bits_));
268 memcpy(&bits_, &argument,
sizeof(argument));
274 static CallArgument End() {
return CallArgument(); }
276 int64_t bits()
const {
return bits_; }
277 bool IsEnd()
const {
return type_ == NO_ARG; }
278 bool IsX()
const {
return type_ == X_ARG; }
279 bool IsD()
const {
return type_ == D_ARG; }
282 enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
287 CallArgumentType type_;
289 CallArgument() { type_ = NO_ARG; }
296 bool GetValue(
const char* desc, int64_t* value);
298 bool PrintValue(
const char* desc);
301 uintptr_t PushAddress(uintptr_t address);
304 uintptr_t PopAddress();
307 uintptr_t StackLimit()
const;
312 static void* RedirectExternalReference(
void* external_function,
314 void DoRuntimeCall(Instruction* instr);
317 static const Instruction* kEndOfSimAddress;
318 void DecodeInstruction();
320 void RunFrom(Instruction* start);
323 template <
typename T>
324 void set_pc(
T new_pc) {
325 ASSERT(
sizeof(
T) ==
sizeof(pc_));
326 memcpy(&pc_, &new_pc,
sizeof(
T));
329 Instruction*
pc() {
return pc_; }
331 void increment_pc() {
333 pc_ = pc_->following();
336 pc_modified_ =
false;
339 virtual void Decode(Instruction* instr) {
340 decoder_->Decode(instr);
343 void ExecuteInstruction() {
353 #define DECLARE(A) void Visit##A(Instruction* instr);
367 unsigned size_in_bytes = size / 8;
368 ASSERT(size_in_bytes <=
sizeof(
T));
374 memset(&result, 0,
sizeof(result));
377 return registers_[
code].Get<
T>(size_in_bytes);
383 return reg<T>(
sizeof(
T) * 8, code, r31mode);
389 return reg<int32_t>(
code, r31mode);
392 int64_t xreg(
unsigned code,
394 return reg<int64_t>(
code, r31mode);
397 int64_t reg(
unsigned size,
unsigned code,
399 return reg<int64_t>(
size,
code, r31mode);
408 void set_reg(
unsigned size,
unsigned code,
T value,
410 unsigned size_in_bytes = size / 8;
411 ASSERT(size_in_bytes <=
sizeof(
T));
418 return registers_[
code].Set(value, size_in_bytes);
423 void set_reg(
unsigned code,
T value,
425 set_reg(
sizeof(value) * 8, code, value, r31mode);
429 void set_wreg(
unsigned code,
int32_t value,
434 void set_xreg(
unsigned code, int64_t value,
441 void set_lr(
T value) {
447 void set_sp(
T value) {
467 T fpreg(
unsigned size,
unsigned code)
const {
468 unsigned size_in_bytes = size / 8;
469 ASSERT(size_in_bytes <=
sizeof(
T));
472 return fpregisters_[
code].Get<
T>(size_in_bytes);
477 T fpreg(
unsigned code)
const {
478 return fpreg<T>(
sizeof(
T) * 8, code);
482 float sreg(
unsigned code)
const {
483 return fpreg<float>(
code);
486 uint32_t sreg_bits(
unsigned code)
const {
487 return fpreg<uint32_t>(
code);
490 double dreg(
unsigned code)
const {
491 return fpreg<double>(
code);
494 uint64_t dreg_bits(
unsigned code)
const {
495 return fpreg<uint64_t>(
code);
498 double fpreg(
unsigned size,
unsigned code)
const {
511 void set_fpreg(
unsigned code,
T value) {
514 fpregisters_[
code].Set(value,
sizeof(value));
518 void set_sreg(
unsigned code,
float value) {
519 set_fpreg(code, value);
522 void set_sreg_bits(
unsigned code, uint32_t value) {
523 set_fpreg(code, value);
526 void set_dreg(
unsigned code,
double value) {
527 set_fpreg(code, value);
530 void set_dreg_bits(
unsigned code, uint64_t value) {
531 set_fpreg(code, value);
534 SimSystemRegister& nzcv() {
return nzcv_; }
535 SimSystemRegister& fpcr() {
return fpcr_; }
541 Instruction* location;
544 std::vector<Breakpoint> breakpoints_;
545 void SetBreakpoint(Instruction* breakpoint);
546 void ListBreakpoints();
547 void CheckBreakpoints();
555 void CheckBreakNext();
558 void PrintInstructionsAt(Instruction*
pc, uint64_t count);
560 void PrintSystemRegisters(
bool print_all =
false);
561 void PrintRegisters(
bool print_all_regs =
false);
562 void PrintFPRegisters(
bool print_all_regs =
false);
563 void PrintProcessorState();
564 void PrintWrite(uint8_t* address, uint64_t value,
unsigned num_bytes);
565 void LogSystemRegisters() {
566 if (log_parameters_ &
LOG_SYS_REGS) PrintSystemRegisters();
568 void LogRegisters() {
569 if (log_parameters_ &
LOG_REGS) PrintRegisters();
571 void LogFPRegisters() {
572 if (log_parameters_ &
LOG_FP_REGS) PrintFPRegisters();
574 void LogProcessorState() {
575 LogSystemRegisters();
579 void LogWrite(uint8_t* address, uint64_t value,
unsigned num_bytes) {
580 if (log_parameters_ &
LOG_WRITE) PrintWrite(address, value, num_bytes);
583 int log_parameters() {
return log_parameters_; }
584 void set_log_parameters(
int new_parameters) {
585 log_parameters_ = new_parameters;
588 PrintF(
"Run --debug-sim to dynamically turn on disassembler\n");
593 decoder_->InsertVisitorBefore(print_disasm_,
this);
595 decoder_->RemoveVisitor(print_disasm_);
599 static inline const char* WRegNameForCode(
unsigned code,
601 static inline const char* XRegNameForCode(
unsigned code,
603 static inline const char* SRegNameForCode(
unsigned code);
604 static inline const char* DRegNameForCode(
unsigned code);
605 static inline const char* VRegNameForCode(
unsigned code);
606 static inline int CodeFromName(
const char*
name);
611 SimSystemRegister&
flags = nzcv();
630 return flags.C() && !flags.Z();
632 return !(flags.C() && !flags.Z());
634 return flags.N() == flags.V();
636 return flags.N() != flags.V();
638 return !flags.Z() && (flags.N() == flags.V());
640 return !(!flags.Z() && (flags.N() == flags.V()));
651 return !ConditionPassed(cond);
654 void AddSubHelper(Instruction* instr, int64_t op2);
655 int64_t AddWithCarry(
unsigned reg_size,
659 int64_t carry_in = 0);
660 void LogicalHelper(Instruction* instr, int64_t op2);
661 void ConditionalCompareHelper(Instruction* instr, int64_t op2);
662 void LoadStoreHelper(Instruction* instr,
665 void LoadStorePairHelper(Instruction* instr,
AddrMode addrmode);
666 uint8_t* LoadStoreAddress(
unsigned addr_reg,
669 void LoadStoreWriteBack(
unsigned addr_reg,
672 void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
674 uint64_t MemoryRead(uint8_t* address,
unsigned num_bytes);
675 uint8_t MemoryRead8(uint8_t* address);
676 uint16_t MemoryRead16(uint8_t* address);
677 uint32_t MemoryRead32(uint8_t* address);
678 float MemoryReadFP32(uint8_t* address);
679 uint64_t MemoryRead64(uint8_t* address);
680 double MemoryReadFP64(uint8_t* address);
682 void MemoryWrite(uint8_t* address, uint64_t value,
unsigned num_bytes);
683 void MemoryWrite32(uint8_t* address, uint32_t value);
684 void MemoryWriteFP32(uint8_t* address,
float value);
685 void MemoryWrite64(uint8_t* address, uint64_t value);
686 void MemoryWriteFP64(uint8_t* address,
double value);
688 int64_t ShiftOperand(
unsigned reg_size,
692 int64_t Rotate(
unsigned reg_width,
696 int64_t ExtendValue(
unsigned reg_width,
699 unsigned left_shift = 0);
701 uint64_t ReverseBits(uint64_t value,
unsigned num_bits);
702 uint64_t ReverseBytes(uint64_t value, ReverseByteMode
mode);
704 template <
typename T>
705 T FPDefaultNaN()
const;
707 void FPCompare(
double val0,
double val1);
708 double FPRoundInt(
double value,
FPRounding round_mode);
709 double FPToDouble(
float value);
710 float FPToFloat(
double value,
FPRounding round_mode);
711 double FixedToDouble(int64_t src,
int fbits,
FPRounding round_mode);
712 double UFixedToDouble(uint64_t src,
int fbits,
FPRounding round_mode);
713 float FixedToFloat(int64_t src,
int fbits,
FPRounding round_mode);
714 float UFixedToFloat(uint64_t src,
int fbits,
FPRounding round_mode);
716 int64_t FPToInt64(
double value,
FPRounding rmode);
717 uint32_t FPToUInt32(
double value,
FPRounding rmode);
718 uint64_t FPToUInt64(
double value,
FPRounding rmode);
720 template <
typename T>
721 T FPAdd(
T op1,
T op2);
723 template <
typename T>
724 T FPDiv(
T op1,
T op2);
726 template <
typename T>
729 template <
typename T>
732 template <
typename T>
735 template <
typename T>
738 template <
typename T>
739 T FPMul(
T op1,
T op2);
741 template <
typename T>
742 T FPMulAdd(
T a,
T op1,
T op2);
744 template <
typename T>
747 template <
typename T>
748 T FPSub(
T op1,
T op2);
751 template <
typename T>
752 T FPProcessNaN(
T op);
754 bool FPProcessNaNs(Instruction* instr);
756 template <
typename T>
757 T FPProcessNaNs(
T op1,
T op2);
759 template <
typename T>
760 T FPProcessNaNs3(
T op1,
T op2,
T op3);
762 void CheckStackAlignment();
764 inline void CheckPCSComplianceAndRun();
769 static const uint64_t kCallerSavedRegisterCorruptionValue =
770 0xca11edc0de000000UL;
772 static const uint64_t kCallerSavedFPRegisterCorruptionValue =
773 0x7ff000007f801000UL;
775 static const uint64_t kDefaultCPURegisterCorruptionValue =
776 0x7ffbad007f8bad00UL;
778 void CorruptRegisters(CPURegList* list,
779 uint64_t value = kDefaultCPURegisterCorruptionValue);
780 void CorruptAllCallerSavedCPURegisters();
787 PrintDisassembler* print_disasm_;
790 Instrument* instrument_;
801 SimSystemRegister nzcv_;
804 SimSystemRegister fpcr_;
813 void AssertSupportedFPCR() {
821 static int CalcNFlag(uint64_t result,
unsigned reg_size) {
822 return (result >> (reg_size - 1)) & 1;
825 static int CalcZFlag(uint64_t result) {
829 static const uint32_t kConditionFlagsMask = 0xf0000000;
833 static const intptr_t stack_protection_size_ =
KB;
834 intptr_t stack_size_;
837 Decoder<DispatchingDecoderVisitor>* decoder_;
838 Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
845 static const char* xreg_names[];
846 static const char* wreg_names[];
847 static const char* sreg_names[];
848 static const char* dreg_names[];
849 static const char* vreg_names[];
852 void set_last_debugger_input(
char* input) {
854 last_debugger_input_ = input;
856 char* last_debugger_input() {
return last_debugger_input_; }
857 char* last_debugger_input_;
860 void Init(FILE* stream);
869 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
870 reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
871 FUNCTION_ADDR(entry), \
874 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
875 Simulator::current(Isolate::Current())->CallRegExp( \
877 p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
879 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
880 try_catch_address == NULL ? \
881 NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
891 return Simulator::current(isolate)->StackLimit();
895 Simulator* sim = Simulator::current(Isolate::Current());
896 return sim->PushAddress(try_catch_address);
900 Simulator::current(Isolate::Current())->PopAddress();
904 #endif // !defined(USE_SIMULATOR)
908 #endif // V8_ARM64_SIMULATOR_ARM64_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
int(* arm64_regexp_matcher)(String *input, int64_t start_offset, const byte *input_start, const byte *input_end, int *output, int64_t output_size, Address stack_base, int64_t direct_call, void *return_address, Isolate *isolate)
void PrintF(const char *format,...)
const unsigned kDRegSizeInBits
const unsigned kXRegSizeInBits
kSerializedDataOffset Object
TypeImpl< ZoneTypeConfig > Type
#define ASSERT(condition)
#define DEFINE_GETTER(Name, HighBit, LowBit, Func)
static void UnregisterCTryCatch()
const unsigned kLinkRegCode
const unsigned kWRegSizeInBits
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const unsigned kInstructionSize
bool IsAligned(T value, U alignment)
const unsigned kSRegSizeInBits
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate *isolate, uintptr_t c_limit)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const unsigned kNumberOfFPRegisters
int32_t signed_bitextract_32(int msb, int lsb, int32_t x)
#define T(name, string, precedence)
const unsigned kNumberOfRegisters
StringCharacterStream *const stream_
const unsigned kFramePointerRegCode
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address)
void DeleteArray(T *array)
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_)