v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
simulator-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_ARM64_SIMULATOR_ARM64_H_
29 #define V8_ARM64_SIMULATOR_ARM64_H_
30 
31 #include <stdarg.h>
32 #include <vector>
33 
34 #include "v8.h"
35 
36 #include "globals.h"
37 #include "utils.h"
38 #include "allocation.h"
39 #include "assembler.h"
40 #include "arm64/assembler-arm64.h"
41 #include "arm64/decoder-arm64.h"
42 #include "arm64/disasm-arm64.h"
43 #include "arm64/instrument-arm64.h"
44 
45 #define REGISTER_CODE_LIST(R) \
46 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
47 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
48 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
49 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
50 
51 namespace v8 {
52 namespace internal {
53 
54 #if !defined(USE_SIMULATOR)
55 
56 // Running without a simulator on a native ARM64 platform.
57 // When running without a simulator we call the entry directly.
58 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
59  (entry(p0, p1, p2, p3, p4))
60 
61 typedef int (*arm64_regexp_matcher)(String* input,
62  int64_t start_offset,
63  const byte* input_start,
64  const byte* input_end,
65  int* output,
66  int64_t output_size,
67  Address stack_base,
68  int64_t direct_call,
69  void* return_address,
70  Isolate* isolate);
71 
72 // Call the generated regexp code directly. The code at the entry address
73 // should act as a function matching the type arm64_regexp_matcher.
74 // The ninth argument is a dummy that reserves the space used for
75 // the return address added by the ExitFrame in native calls.
76 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
77  (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
78  p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
79 
80 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
81  reinterpret_cast<TryCatch*>(try_catch_address)
82 
83 // Running without a simulator there is nothing to do.
84 class SimulatorStack : public v8::internal::AllStatic {
85  public:
86  static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
87  uintptr_t c_limit) {
88  USE(isolate);
89  return c_limit;
90  }
91 
92  static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
93  return try_catch_address;
94  }
95 
96  static void UnregisterCTryCatch() { }
97 };
98 
99 #else // !defined(USE_SIMULATOR)
100 
101 enum ReverseByteMode {
102  Reverse16 = 0,
103  Reverse32 = 1,
104  Reverse64 = 2
105 };
106 
107 
108 // The proper way to initialize a simulated system register (such as NZCV) is as
109 // follows:
110 // SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
111 class SimSystemRegister {
112  public:
113  // The default constructor represents a register which has no writable bits.
114  // It is not possible to set its value to anything other than 0.
115  SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
116 
117  uint32_t RawValue() const {
118  return value_;
119  }
120 
121  void SetRawValue(uint32_t new_value) {
122  value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
123  }
124 
125  uint32_t Bits(int msb, int lsb) const {
126  return unsigned_bitextract_32(msb, lsb, value_);
127  }
128 
129  int32_t SignedBits(int msb, int lsb) const {
130  return signed_bitextract_32(msb, lsb, value_);
131  }
132 
133  void SetBits(int msb, int lsb, uint32_t bits);
134 
135  // Default system register values.
136  static SimSystemRegister DefaultValueFor(SystemRegister id);
137 
138 #define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
139  Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
140  void Set##Name(Type bits) { \
141  SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
142  }
143 #define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
144  static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
145  SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
146 #undef DEFINE_ZERO_BITS
147 #undef DEFINE_GETTER
148 
149  protected:
150  // Most system registers only implement a few of the bits in the word. Other
151  // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
152  // describes the bits which are not modifiable.
153  SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
154  : value_(value), write_ignore_mask_(write_ignore_mask) { }
155 
156  uint32_t value_;
157  uint32_t write_ignore_mask_;
158 };
159 
160 
161 // Represent a register (r0-r31, v0-v31).
162 template<int kSizeInBytes>
163 class SimRegisterBase {
164  public:
165  template<typename T>
166  void Set(T new_value, unsigned size = sizeof(T)) {
167  ASSERT(size <= kSizeInBytes);
168  ASSERT(size <= sizeof(new_value));
169  // All AArch64 registers are zero-extending; Writing a W register clears the
170  // top bits of the corresponding X register.
171  memset(value_, 0, kSizeInBytes);
172  memcpy(value_, &new_value, size);
173  }
174 
175  // Copy 'size' bytes of the register to the result, and zero-extend to fill
176  // the result.
177  template<typename T>
178  T Get(unsigned size = sizeof(T)) const {
179  ASSERT(size <= kSizeInBytes);
180  T result;
181  memset(&result, 0, sizeof(result));
182  memcpy(&result, value_, size);
183  return result;
184  }
185 
186  protected:
187  uint8_t value_[kSizeInBytes];
188 };
189 typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
190 typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
191 
192 
193 class Simulator : public DecoderVisitor {
194  public:
195  explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
196  Isolate* isolate = NULL,
197  FILE* stream = stderr);
198  Simulator();
199  ~Simulator();
200 
201  // System functions.
202 
203  static void Initialize(Isolate* isolate);
204 
205  static Simulator* current(v8::internal::Isolate* isolate);
206 
207  class CallArgument;
208 
209  // Call an arbitrary function taking an arbitrary number of arguments. The
210  // varargs list must be a set of arguments with type CallArgument, and
211  // terminated by CallArgument::End().
212  void CallVoid(byte* entry, CallArgument* args);
213 
214  // Like CallVoid, but expect a return value.
215  int64_t CallInt64(byte* entry, CallArgument* args);
216  double CallDouble(byte* entry, CallArgument* args);
217 
218  // V8 calls into generated JS code with 5 parameters and into
219  // generated RegExp code with 10 parameters. These are convenience functions,
220  // which set up the simulator state and grab the result on return.
221  int64_t CallJS(byte* entry,
222  byte* function_entry,
223  JSFunction* func,
224  Object* revc,
225  int64_t argc,
226  Object*** argv);
227  int64_t CallRegExp(byte* entry,
228  String* input,
229  int64_t start_offset,
230  const byte* input_start,
231  const byte* input_end,
232  int* output,
233  int64_t output_size,
234  Address stack_base,
235  int64_t direct_call,
236  void* return_address,
237  Isolate* isolate);
238 
239  // A wrapper class that stores an argument for one of the above Call
240  // functions.
241  //
242  // Only arguments up to 64 bits in size are supported.
243  class CallArgument {
244  public:
245  template<typename T>
246  explicit CallArgument(T argument) {
247  ASSERT(sizeof(argument) <= sizeof(bits_));
248  memcpy(&bits_, &argument, sizeof(argument));
249  type_ = X_ARG;
250  }
251 
252  explicit CallArgument(double argument) {
253  ASSERT(sizeof(argument) == sizeof(bits_));
254  memcpy(&bits_, &argument, sizeof(argument));
255  type_ = D_ARG;
256  }
257 
258  explicit CallArgument(float argument) {
259  // TODO(all): CallArgument(float) is untested, remove this check once
260  // tested.
261  UNIMPLEMENTED();
262  // Make the D register a NaN to try to trap errors if the callee expects a
263  // double. If it expects a float, the callee should ignore the top word.
264  ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
265  memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
266  // Write the float payload to the S register.
267  ASSERT(sizeof(argument) <= sizeof(bits_));
268  memcpy(&bits_, &argument, sizeof(argument));
269  type_ = D_ARG;
270  }
271 
272  // This indicates the end of the arguments list, so that CallArgument
273  // objects can be passed into varargs functions.
274  static CallArgument End() { return CallArgument(); }
275 
276  int64_t bits() const { return bits_; }
277  bool IsEnd() const { return type_ == NO_ARG; }
278  bool IsX() const { return type_ == X_ARG; }
279  bool IsD() const { return type_ == D_ARG; }
280 
281  private:
282  enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
283 
284  // All arguments are aligned to at least 64 bits and we don't support
285  // passing bigger arguments, so the payload size can be fixed at 64 bits.
286  int64_t bits_;
287  CallArgumentType type_;
288 
289  CallArgument() { type_ = NO_ARG; }
290  };
291 
292 
293  // Start the debugging command line.
294  void Debug();
295 
296  bool GetValue(const char* desc, int64_t* value);
297 
298  bool PrintValue(const char* desc);
299 
300  // Push an address onto the JS stack.
301  uintptr_t PushAddress(uintptr_t address);
302 
303  // Pop an address from the JS stack.
304  uintptr_t PopAddress();
305 
306  // Accessor to the internal simulator stack area.
307  uintptr_t StackLimit() const;
308 
309  void ResetState();
310 
311  // Runtime call support.
312  static void* RedirectExternalReference(void* external_function,
314  void DoRuntimeCall(Instruction* instr);
315 
316  // Run the simulator.
317  static const Instruction* kEndOfSimAddress;
318  void DecodeInstruction();
319  void Run();
320  void RunFrom(Instruction* start);
321 
322  // Simulation helpers.
323  template <typename T>
324  void set_pc(T new_pc) {
325  ASSERT(sizeof(T) == sizeof(pc_));
326  memcpy(&pc_, &new_pc, sizeof(T));
327  pc_modified_ = true;
328  }
329  Instruction* pc() { return pc_; }
330 
331  void increment_pc() {
332  if (!pc_modified_) {
333  pc_ = pc_->following();
334  }
335 
336  pc_modified_ = false;
337  }
338 
339  virtual void Decode(Instruction* instr) {
340  decoder_->Decode(instr);
341  }
342 
343  void ExecuteInstruction() {
344  ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
345  CheckBreakNext();
346  Decode(pc_);
347  LogProcessorState();
348  increment_pc();
349  CheckBreakpoints();
350  }
351 
352  // Declare all Visitor functions.
353  #define DECLARE(A) void Visit##A(Instruction* instr);
355  #undef DECLARE
356 
357  // Register accessors.
358 
359  // Return 'size' bits of the value of an integer register, as the specified
360  // type. The value is zero-extended to fill the result.
361  //
362  // The only supported values of 'size' are kXRegSizeInBits and
363  // kWRegSizeInBits.
364  template<typename T>
365  T reg(unsigned size, unsigned code,
366  Reg31Mode r31mode = Reg31IsZeroRegister) const {
367  unsigned size_in_bytes = size / 8;
368  ASSERT(size_in_bytes <= sizeof(T));
369  ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
370  ASSERT(code < kNumberOfRegisters);
371 
372  if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
373  T result;
374  memset(&result, 0, sizeof(result));
375  return result;
376  }
377  return registers_[code].Get<T>(size_in_bytes);
378  }
379 
380  // Like reg(), but infer the access size from the template type.
381  template<typename T>
382  T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
383  return reg<T>(sizeof(T) * 8, code, r31mode);
384  }
385 
386  // Common specialized accessors for the reg() template.
387  int32_t wreg(unsigned code,
388  Reg31Mode r31mode = Reg31IsZeroRegister) const {
389  return reg<int32_t>(code, r31mode);
390  }
391 
392  int64_t xreg(unsigned code,
393  Reg31Mode r31mode = Reg31IsZeroRegister) const {
394  return reg<int64_t>(code, r31mode);
395  }
396 
397  int64_t reg(unsigned size, unsigned code,
398  Reg31Mode r31mode = Reg31IsZeroRegister) const {
399  return reg<int64_t>(size, code, r31mode);
400  }
401 
402  // Write 'size' bits of 'value' into an integer register. The value is
403  // zero-extended. This behaviour matches AArch64 register writes.
404  //
405  // The only supported values of 'size' are kXRegSizeInBits and
406  // kWRegSizeInBits.
407  template<typename T>
408  void set_reg(unsigned size, unsigned code, T value,
409  Reg31Mode r31mode = Reg31IsZeroRegister) {
410  unsigned size_in_bytes = size / 8;
411  ASSERT(size_in_bytes <= sizeof(T));
412  ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
413  ASSERT(code < kNumberOfRegisters);
414 
415  if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
416  return;
417  }
418  return registers_[code].Set(value, size_in_bytes);
419  }
420 
421  // Like set_reg(), but infer the access size from the template type.
422  template<typename T>
423  void set_reg(unsigned code, T value,
424  Reg31Mode r31mode = Reg31IsZeroRegister) {
425  set_reg(sizeof(value) * 8, code, value, r31mode);
426  }
427 
428  // Common specialized accessors for the set_reg() template.
429  void set_wreg(unsigned code, int32_t value,
430  Reg31Mode r31mode = Reg31IsZeroRegister) {
431  set_reg(kWRegSizeInBits, code, value, r31mode);
432  }
433 
434  void set_xreg(unsigned code, int64_t value,
435  Reg31Mode r31mode = Reg31IsZeroRegister) {
436  set_reg(kXRegSizeInBits, code, value, r31mode);
437  }
438 
439  // Commonly-used special cases.
440  template<typename T>
441  void set_lr(T value) {
442  ASSERT(sizeof(T) == kPointerSize);
443  set_reg(kLinkRegCode, value);
444  }
445 
446  template<typename T>
447  void set_sp(T value) {
448  ASSERT(sizeof(T) == kPointerSize);
449  set_reg(31, value, Reg31IsStackPointer);
450  }
451 
452  int64_t sp() { return xreg(31, Reg31IsStackPointer); }
453  int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
454  int64_t fp() {
456  }
457  Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
458 
459  Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
460 
461  // Return 'size' bits of the value of a floating-point register, as the
462  // specified type. The value is zero-extended to fill the result.
463  //
464  // The only supported values of 'size' are kDRegSizeInBits and
465  // kSRegSizeInBits.
466  template<typename T>
467  T fpreg(unsigned size, unsigned code) const {
468  unsigned size_in_bytes = size / 8;
469  ASSERT(size_in_bytes <= sizeof(T));
470  ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
472  return fpregisters_[code].Get<T>(size_in_bytes);
473  }
474 
475  // Like fpreg(), but infer the access size from the template type.
476  template<typename T>
477  T fpreg(unsigned code) const {
478  return fpreg<T>(sizeof(T) * 8, code);
479  }
480 
481  // Common specialized accessors for the fpreg() template.
482  float sreg(unsigned code) const {
483  return fpreg<float>(code);
484  }
485 
486  uint32_t sreg_bits(unsigned code) const {
487  return fpreg<uint32_t>(code);
488  }
489 
490  double dreg(unsigned code) const {
491  return fpreg<double>(code);
492  }
493 
494  uint64_t dreg_bits(unsigned code) const {
495  return fpreg<uint64_t>(code);
496  }
497 
498  double fpreg(unsigned size, unsigned code) const {
499  switch (size) {
500  case kSRegSizeInBits: return sreg(code);
501  case kDRegSizeInBits: return dreg(code);
502  default:
503  UNREACHABLE();
504  return 0.0;
505  }
506  }
507 
508  // Write 'value' into a floating-point register. The value is zero-extended.
509  // This behaviour matches AArch64 register writes.
510  template<typename T>
511  void set_fpreg(unsigned code, T value) {
512  ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
514  fpregisters_[code].Set(value, sizeof(value));
515  }
516 
517  // Common specialized accessors for the set_fpreg() template.
518  void set_sreg(unsigned code, float value) {
519  set_fpreg(code, value);
520  }
521 
522  void set_sreg_bits(unsigned code, uint32_t value) {
523  set_fpreg(code, value);
524  }
525 
526  void set_dreg(unsigned code, double value) {
527  set_fpreg(code, value);
528  }
529 
530  void set_dreg_bits(unsigned code, uint64_t value) {
531  set_fpreg(code, value);
532  }
533 
534  SimSystemRegister& nzcv() { return nzcv_; }
535  SimSystemRegister& fpcr() { return fpcr_; }
536 
537  // Debug helpers
538 
539  // Simulator breakpoints.
540  struct Breakpoint {
541  Instruction* location;
542  bool enabled;
543  };
544  std::vector<Breakpoint> breakpoints_;
545  void SetBreakpoint(Instruction* breakpoint);
546  void ListBreakpoints();
547  void CheckBreakpoints();
548 
549  // Helpers for the 'next' command.
550  // When this is set, the Simulator will insert a breakpoint after the next BL
551  // instruction it meets.
552  bool break_on_next_;
553  // Check if the Simulator should insert a break after the current instruction
554  // for the 'next' command.
555  void CheckBreakNext();
556 
557  // Disassemble instruction at the given address.
558  void PrintInstructionsAt(Instruction* pc, uint64_t count);
559 
560  void PrintSystemRegisters(bool print_all = false);
561  void PrintRegisters(bool print_all_regs = false);
562  void PrintFPRegisters(bool print_all_regs = false);
563  void PrintProcessorState();
564  void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
565  void LogSystemRegisters() {
566  if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
567  }
568  void LogRegisters() {
569  if (log_parameters_ & LOG_REGS) PrintRegisters();
570  }
571  void LogFPRegisters() {
572  if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
573  }
574  void LogProcessorState() {
575  LogSystemRegisters();
576  LogRegisters();
577  LogFPRegisters();
578  }
579  void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
580  if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
581  }
582 
583  int log_parameters() { return log_parameters_; }
584  void set_log_parameters(int new_parameters) {
585  log_parameters_ = new_parameters;
586  if (!decoder_) {
587  if (new_parameters & LOG_DISASM) {
588  PrintF("Run --debug-sim to dynamically turn on disassembler\n");
589  }
590  return;
591  }
592  if (new_parameters & LOG_DISASM) {
593  decoder_->InsertVisitorBefore(print_disasm_, this);
594  } else {
595  decoder_->RemoveVisitor(print_disasm_);
596  }
597  }
598 
599  static inline const char* WRegNameForCode(unsigned code,
601  static inline const char* XRegNameForCode(unsigned code,
603  static inline const char* SRegNameForCode(unsigned code);
604  static inline const char* DRegNameForCode(unsigned code);
605  static inline const char* VRegNameForCode(unsigned code);
606  static inline int CodeFromName(const char* name);
607 
608  protected:
609  // Simulation helpers ------------------------------------
610  bool ConditionPassed(Condition cond) {
611  SimSystemRegister& flags = nzcv();
612  switch (cond) {
613  case eq:
614  return flags.Z();
615  case ne:
616  return !flags.Z();
617  case hs:
618  return flags.C();
619  case lo:
620  return !flags.C();
621  case mi:
622  return flags.N();
623  case pl:
624  return !flags.N();
625  case vs:
626  return flags.V();
627  case vc:
628  return !flags.V();
629  case hi:
630  return flags.C() && !flags.Z();
631  case ls:
632  return !(flags.C() && !flags.Z());
633  case ge:
634  return flags.N() == flags.V();
635  case lt:
636  return flags.N() != flags.V();
637  case gt:
638  return !flags.Z() && (flags.N() == flags.V());
639  case le:
640  return !(!flags.Z() && (flags.N() == flags.V()));
641  case nv: // Fall through.
642  case al:
643  return true;
644  default:
645  UNREACHABLE();
646  return false;
647  }
648  }
649 
650  bool ConditionFailed(Condition cond) {
651  return !ConditionPassed(cond);
652  }
653 
654  void AddSubHelper(Instruction* instr, int64_t op2);
655  int64_t AddWithCarry(unsigned reg_size,
656  bool set_flags,
657  int64_t src1,
658  int64_t src2,
659  int64_t carry_in = 0);
660  void LogicalHelper(Instruction* instr, int64_t op2);
661  void ConditionalCompareHelper(Instruction* instr, int64_t op2);
662  void LoadStoreHelper(Instruction* instr,
663  int64_t offset,
664  AddrMode addrmode);
665  void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
666  uint8_t* LoadStoreAddress(unsigned addr_reg,
667  int64_t offset,
668  AddrMode addrmode);
669  void LoadStoreWriteBack(unsigned addr_reg,
670  int64_t offset,
671  AddrMode addrmode);
672  void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
673 
674  uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
675  uint8_t MemoryRead8(uint8_t* address);
676  uint16_t MemoryRead16(uint8_t* address);
677  uint32_t MemoryRead32(uint8_t* address);
678  float MemoryReadFP32(uint8_t* address);
679  uint64_t MemoryRead64(uint8_t* address);
680  double MemoryReadFP64(uint8_t* address);
681 
682  void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
683  void MemoryWrite32(uint8_t* address, uint32_t value);
684  void MemoryWriteFP32(uint8_t* address, float value);
685  void MemoryWrite64(uint8_t* address, uint64_t value);
686  void MemoryWriteFP64(uint8_t* address, double value);
687 
688  int64_t ShiftOperand(unsigned reg_size,
689  int64_t value,
690  Shift shift_type,
691  unsigned amount);
692  int64_t Rotate(unsigned reg_width,
693  int64_t value,
694  Shift shift_type,
695  unsigned amount);
696  int64_t ExtendValue(unsigned reg_width,
697  int64_t value,
698  Extend extend_type,
699  unsigned left_shift = 0);
700 
701  uint64_t ReverseBits(uint64_t value, unsigned num_bits);
702  uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
703 
704  template <typename T>
705  T FPDefaultNaN() const;
706 
707  void FPCompare(double val0, double val1);
708  double FPRoundInt(double value, FPRounding round_mode);
709  double FPToDouble(float value);
710  float FPToFloat(double value, FPRounding round_mode);
711  double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
712  double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
713  float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
714  float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
715  int32_t FPToInt32(double value, FPRounding rmode);
716  int64_t FPToInt64(double value, FPRounding rmode);
717  uint32_t FPToUInt32(double value, FPRounding rmode);
718  uint64_t FPToUInt64(double value, FPRounding rmode);
719 
720  template <typename T>
721  T FPAdd(T op1, T op2);
722 
723  template <typename T>
724  T FPDiv(T op1, T op2);
725 
726  template <typename T>
727  T FPMax(T a, T b);
728 
729  template <typename T>
730  T FPMaxNM(T a, T b);
731 
732  template <typename T>
733  T FPMin(T a, T b);
734 
735  template <typename T>
736  T FPMinNM(T a, T b);
737 
738  template <typename T>
739  T FPMul(T op1, T op2);
740 
741  template <typename T>
742  T FPMulAdd(T a, T op1, T op2);
743 
744  template <typename T>
745  T FPSqrt(T op);
746 
747  template <typename T>
748  T FPSub(T op1, T op2);
749 
750  // Standard NaN processing.
751  template <typename T>
752  T FPProcessNaN(T op);
753 
754  bool FPProcessNaNs(Instruction* instr);
755 
756  template <typename T>
757  T FPProcessNaNs(T op1, T op2);
758 
759  template <typename T>
760  T FPProcessNaNs3(T op1, T op2, T op3);
761 
762  void CheckStackAlignment();
763 
764  inline void CheckPCSComplianceAndRun();
765 
766 #ifdef DEBUG
767  // Corruption values should have their least significant byte cleared to
768  // allow the code of the register being corrupted to be inserted.
769  static const uint64_t kCallerSavedRegisterCorruptionValue =
770  0xca11edc0de000000UL;
771  // This value is a NaN in both 32-bit and 64-bit FP.
772  static const uint64_t kCallerSavedFPRegisterCorruptionValue =
773  0x7ff000007f801000UL;
774  // This value is a mix of 32/64-bits NaN and "verbose" immediate.
775  static const uint64_t kDefaultCPURegisterCorruptionValue =
776  0x7ffbad007f8bad00UL;
777 
778  void CorruptRegisters(CPURegList* list,
779  uint64_t value = kDefaultCPURegisterCorruptionValue);
780  void CorruptAllCallerSavedCPURegisters();
781 #endif
782 
783  // Processor state ---------------------------------------
784 
785  // Output stream.
786  FILE* stream_;
787  PrintDisassembler* print_disasm_;
788 
789  // Instrumentation.
790  Instrument* instrument_;
791 
792  // General purpose registers. Register 31 is the stack pointer.
793  SimRegister registers_[kNumberOfRegisters];
794 
795  // Floating point registers
796  SimFPRegister fpregisters_[kNumberOfFPRegisters];
797 
798  // Processor state
799  // bits[31, 27]: Condition flags N, Z, C, and V.
800  // (Negative, Zero, Carry, Overflow)
801  SimSystemRegister nzcv_;
802 
803  // Floating-Point Control Register
804  SimSystemRegister fpcr_;
805 
806  // Only a subset of FPCR features are supported by the simulator. This helper
807  // checks that the FPCR settings are supported.
808  //
809  // This is checked when floating-point instructions are executed, not when
810  // FPCR is set. This allows generated code to modify FPCR for external
811  // functions, or to save and restore it when entering and leaving generated
812  // code.
813  void AssertSupportedFPCR() {
814  ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
815  ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
816 
817  // The simulator does not support half-precision operations so fpcr().AHP()
818  // is irrelevant, and is not checked here.
819  }
820 
821  static int CalcNFlag(uint64_t result, unsigned reg_size) {
822  return (result >> (reg_size - 1)) & 1;
823  }
824 
825  static int CalcZFlag(uint64_t result) {
826  return result == 0;
827  }
828 
829  static const uint32_t kConditionFlagsMask = 0xf0000000;
830 
831  // Stack
832  byte* stack_;
833  static const intptr_t stack_protection_size_ = KB;
834  intptr_t stack_size_;
835  byte* stack_limit_;
836 
837  Decoder<DispatchingDecoderVisitor>* decoder_;
838  Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
839 
840  // Indicates if the pc has been modified by the instruction and should not be
841  // automatically incremented.
842  bool pc_modified_;
843  Instruction* pc_;
844 
845  static const char* xreg_names[];
846  static const char* wreg_names[];
847  static const char* sreg_names[];
848  static const char* dreg_names[];
849  static const char* vreg_names[];
850 
851  // Debugger input.
852  void set_last_debugger_input(char* input) {
853  DeleteArray(last_debugger_input_);
854  last_debugger_input_ = input;
855  }
856  char* last_debugger_input() { return last_debugger_input_; }
857  char* last_debugger_input_;
858 
859  private:
860  void Init(FILE* stream);
861 
862  int log_parameters_;
863  Isolate* isolate_;
864 };
865 
866 
867 // When running with the simulator transition into simulated execution at this
868 // point.
869 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
870  reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
871  FUNCTION_ADDR(entry), \
872  p0, p1, p2, p3, p4))
873 
874 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
875  Simulator::current(Isolate::Current())->CallRegExp( \
876  entry, \
877  p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
878 
879 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
880  try_catch_address == NULL ? \
881  NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
882 
883 
884 // The simulator has its own stack. Thus it has a different stack limit from
885 // the C-based native code.
886 // See also 'class SimulatorStack' in arm/simulator-arm.h.
887 class SimulatorStack : public v8::internal::AllStatic {
888  public:
889  static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
890  uintptr_t c_limit) {
891  return Simulator::current(isolate)->StackLimit();
892  }
893 
894  static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
895  Simulator* sim = Simulator::current(Isolate::Current());
896  return sim->PushAddress(try_catch_address);
897  }
898 
899  static void UnregisterCTryCatch() {
900  Simulator::current(Isolate::Current())->PopAddress();
901  }
902 };
903 
904 #endif // !defined(USE_SIMULATOR)
905 
906 } } // namespace v8::internal
907 
908 #endif // V8_ARM64_SIMULATOR_ARM64_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
int(* arm64_regexp_matcher)(String *input, int64_t start_offset, const byte *input_start, const byte *input_end, int *output, int64_t output_size, Address stack_base, int64_t direct_call, void *return_address, Isolate *isolate)
void PrintF(const char *format,...)
Definition: v8utils.cc:40
#define VISITOR_LIST(V)
Definition: decoder-arm64.h:42
const unsigned kDRegSizeInBits
const int KB
Definition: globals.h:245
const unsigned kXRegSizeInBits
kSerializedDataOffset Object
Definition: objects-inl.h:5016
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
#define ASSERT(condition)
Definition: checks.h:329
#define DEFINE_GETTER(Name, HighBit, LowBit, Func)
unsigned short uint16_t
Definition: unicode.cc:46
const unsigned kLinkRegCode
uint8_t byte
Definition: globals.h:185
const unsigned kWRegSizeInBits
const Register sp
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const int kPointerSize
Definition: globals.h:268
const unsigned kInstructionSize
bool IsAligned(T value, U alignment)
Definition: utils.h:211
const unsigned kSRegSizeInBits
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate *isolate, uintptr_t c_limit)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const Register pc
const unsigned kNumberOfFPRegisters
const unsigned kSRegSize
int32_t signed_bitextract_32(int msb, int lsb, int32_t x)
Definition: utils.h:1092
#define T(name, string, precedence)
Definition: token.cc:48
const Register lr
const unsigned kNumberOfRegisters
#define UNIMPLEMENTED()
Definition: checks.h:50
const unsigned kDRegSize
void USE(T)
Definition: globals.h:341
StringCharacterStream *const stream_
const unsigned kFramePointerRegCode
const unsigned kJSSPCode
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address)
const Register fp
void DeleteArray(T *array)
Definition: allocation.h:91
#define DECLARE(A)
uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x)
Definition: utils.h:1084
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_)