v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
simulator-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdlib.h>
29 #include <cmath>
30 #include <cstdarg>
31 #include "v8.h"
32 
33 #if V8_TARGET_ARCH_ARM64
34 
35 #include "disasm.h"
36 #include "assembler.h"
38 #include "arm64/simulator-arm64.h"
39 #include "macro-assembler.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 #if defined(USE_SIMULATOR)
45 
46 
47 // This macro provides a platform independent use of sscanf. The reason for
48 // SScanF not being implemented in a platform independent way through
49 // ::v8::internal::OS in the same way as SNPrintF is that the
50 // Windows C Run-Time Library does not provide vsscanf.
51 #define SScanF sscanf // NOLINT
52 
53 
54 // Helpers for colors.
55 // Depending on your terminal configuration, the colour names may not match the
56 // observed colours.
57 #define COLOUR(colour_code) "\033[" colour_code "m"
58 #define BOLD(colour_code) "1;" colour_code
59 #define NORMAL ""
60 #define GREY "30"
61 #define GREEN "32"
62 #define ORANGE "33"
63 #define BLUE "34"
64 #define PURPLE "35"
65 #define INDIGO "36"
66 #define WHITE "37"
67 typedef char const * const TEXT_COLOUR;
68 TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
69 TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
70 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
71 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
72 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
73 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
74 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
75 TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
76 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
77 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
78 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
79 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
80 
81 
82 // This is basically the same as PrintF, with a guard for FLAG_trace_sim.
83 void PRINTF_CHECKING TraceSim(const char* format, ...) {
84  if (FLAG_trace_sim) {
85  va_list arguments;
86  va_start(arguments, format);
87  OS::VPrint(format, arguments);
88  va_end(arguments);
89  }
90 }
91 
92 
93 const Instruction* Simulator::kEndOfSimAddress = NULL;
94 
95 
96 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
97  int width = msb - lsb + 1;
98  ASSERT(is_uintn(bits, width) || is_intn(bits, width));
99 
100  bits <<= lsb;
101  uint32_t mask = ((1 << width) - 1) << lsb;
102  ASSERT((mask & write_ignore_mask_) == 0);
103 
104  value_ = (value_ & ~mask) | (bits & mask);
105 }
106 
107 
108 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
109  switch (id) {
110  case NZCV:
111  return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
112  case FPCR:
113  return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
114  default:
115  UNREACHABLE();
116  return SimSystemRegister();
117  }
118 }
119 
120 
121 void Simulator::Initialize(Isolate* isolate) {
122  if (isolate->simulator_initialized()) return;
123  isolate->set_simulator_initialized(true);
124  ExternalReference::set_redirector(isolate, &RedirectExternalReference);
125 }
126 
127 
128 // Get the active Simulator for the current thread.
129 Simulator* Simulator::current(Isolate* isolate) {
130  Isolate::PerIsolateThreadData* isolate_data =
131  isolate->FindOrAllocatePerThreadDataForThisThread();
132  ASSERT(isolate_data != NULL);
133 
134  Simulator* sim = isolate_data->simulator();
135  if (sim == NULL) {
136  if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
137  sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
138  } else {
139  sim = new Decoder<Simulator>();
140  sim->isolate_ = isolate;
141  }
142  isolate_data->set_simulator(sim);
143  }
144  return sim;
145 }
146 
147 
148 void Simulator::CallVoid(byte* entry, CallArgument* args) {
149  int index_x = 0;
150  int index_d = 0;
151 
152  std::vector<int64_t> stack_args(0);
153  for (int i = 0; !args[i].IsEnd(); i++) {
154  CallArgument arg = args[i];
155  if (arg.IsX() && (index_x < 8)) {
156  set_xreg(index_x++, arg.bits());
157  } else if (arg.IsD() && (index_d < 8)) {
158  set_dreg_bits(index_d++, arg.bits());
159  } else {
160  ASSERT(arg.IsD() || arg.IsX());
161  stack_args.push_back(arg.bits());
162  }
163  }
164 
165  // Process stack arguments, and make sure the stack is suitably aligned.
166  uintptr_t original_stack = sp();
167  uintptr_t entry_stack = original_stack -
168  stack_args.size() * sizeof(stack_args[0]);
169  if (OS::ActivationFrameAlignment() != 0) {
170  entry_stack &= -OS::ActivationFrameAlignment();
171  }
172  char * stack = reinterpret_cast<char*>(entry_stack);
173  std::vector<int64_t>::const_iterator it;
174  for (it = stack_args.begin(); it != stack_args.end(); it++) {
175  memcpy(stack, &(*it), sizeof(*it));
176  stack += sizeof(*it);
177  }
178 
179  ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
180  set_sp(entry_stack);
181 
182  // Call the generated code.
183  set_pc(entry);
184  set_lr(kEndOfSimAddress);
185  CheckPCSComplianceAndRun();
186 
187  set_sp(original_stack);
188 }
189 
190 
191 int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
192  CallVoid(entry, args);
193  return xreg(0);
194 }
195 
196 
197 double Simulator::CallDouble(byte* entry, CallArgument* args) {
198  CallVoid(entry, args);
199  return dreg(0);
200 }
201 
202 
203 int64_t Simulator::CallJS(byte* entry,
204  byte* function_entry,
205  JSFunction* func,
206  Object* revc,
207  int64_t argc,
208  Object*** argv) {
209  CallArgument args[] = {
210  CallArgument(function_entry),
211  CallArgument(func),
212  CallArgument(revc),
213  CallArgument(argc),
214  CallArgument(argv),
215  CallArgument::End()
216  };
217  return CallInt64(entry, args);
218 }
219 
220 int64_t Simulator::CallRegExp(byte* entry,
221  String* input,
222  int64_t start_offset,
223  const byte* input_start,
224  const byte* input_end,
225  int* output,
226  int64_t output_size,
227  Address stack_base,
228  int64_t direct_call,
229  void* return_address,
230  Isolate* isolate) {
231  CallArgument args[] = {
232  CallArgument(input),
233  CallArgument(start_offset),
234  CallArgument(input_start),
235  CallArgument(input_end),
236  CallArgument(output),
237  CallArgument(output_size),
238  CallArgument(stack_base),
239  CallArgument(direct_call),
240  CallArgument(return_address),
241  CallArgument(isolate),
242  CallArgument::End()
243  };
244  return CallInt64(entry, args);
245 }
246 
247 
248 void Simulator::CheckPCSComplianceAndRun() {
249 #ifdef DEBUG
252 
253  int64_t saved_registers[kNumberOfCalleeSavedRegisters];
254  uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
255 
256  CPURegList register_list = kCalleeSaved;
257  CPURegList fpregister_list = kCalleeSavedFP;
258 
259  for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
260  // x31 is not a caller saved register, so no need to specify if we want
261  // the stack or zero.
262  saved_registers[i] = xreg(register_list.PopLowestIndex().code());
263  }
264  for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
265  saved_fpregisters[i] =
266  dreg_bits(fpregister_list.PopLowestIndex().code());
267  }
268  int64_t original_stack = sp();
269 #endif
270  // Start the simulation!
271  Run();
272 #ifdef DEBUG
273  CHECK_EQ(original_stack, sp());
274  // Check that callee-saved registers have been preserved.
275  register_list = kCalleeSaved;
276  fpregister_list = kCalleeSavedFP;
277  for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
278  CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
279  }
280  for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
281  ASSERT(saved_fpregisters[i] ==
282  dreg_bits(fpregister_list.PopLowestIndex().code()));
283  }
284 
285  // Corrupt caller saved register minus the return regiters.
286 
287  // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
288  // for now .
289  register_list = kCallerSaved;
290  register_list.Remove(x0);
291  register_list.Remove(x1);
292 
293  // In theory d0 to d7 can be used for return values, but V8 only uses d0
294  // for now .
295  fpregister_list = kCallerSavedFP;
296  fpregister_list.Remove(d0);
297 
298  CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
299  CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
300 #endif
301 }
302 
303 
304 #ifdef DEBUG
305 // The least significant byte of the curruption value holds the corresponding
306 // register's code.
307 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
308  if (list->type() == CPURegister::kRegister) {
309  while (!list->IsEmpty()) {
310  unsigned code = list->PopLowestIndex().code();
311  set_xreg(code, value | code);
312  }
313  } else {
314  ASSERT(list->type() == CPURegister::kFPRegister);
315  while (!list->IsEmpty()) {
316  unsigned code = list->PopLowestIndex().code();
317  set_dreg_bits(code, value | code);
318  }
319  }
320 }
321 
322 
323 void Simulator::CorruptAllCallerSavedCPURegisters() {
324  // Corrupt alters its parameter so copy them first.
325  CPURegList register_list = kCallerSaved;
326  CPURegList fpregister_list = kCallerSavedFP;
327 
328  CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
329  CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
330 }
331 #endif
332 
333 
334 // Extending the stack by 2 * 64 bits is required for stack alignment purposes.
335 uintptr_t Simulator::PushAddress(uintptr_t address) {
336  ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
337  intptr_t new_sp = sp() - 2 * kXRegSize;
338  uintptr_t* alignment_slot =
339  reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
340  memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
341  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
342  memcpy(stack_slot, &address, kPointerSize);
343  set_sp(new_sp);
344  return new_sp;
345 }
346 
347 
348 uintptr_t Simulator::PopAddress() {
349  intptr_t current_sp = sp();
350  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
351  uintptr_t address = *stack_slot;
352  ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
353  set_sp(current_sp + 2 * kXRegSize);
354  return address;
355 }
356 
357 
358 // Returns the limit of the stack area to enable checking for stack overflows.
359 uintptr_t Simulator::StackLimit() const {
360  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
361  // pushing values.
362  return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
363 }
364 
365 
366 Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
367  Isolate* isolate, FILE* stream)
368  : decoder_(decoder),
369  last_debugger_input_(NULL),
370  log_parameters_(NO_PARAM),
371  isolate_(isolate) {
372  // Setup the decoder.
373  decoder_->AppendVisitor(this);
374 
375  Init(stream);
376 
377  if (FLAG_trace_sim) {
378  decoder_->InsertVisitorBefore(print_disasm_, this);
379  log_parameters_ = LOG_ALL;
380  }
381 
382  if (FLAG_log_instruction_stats) {
383  instrument_ = new Instrument(FLAG_log_instruction_file,
384  FLAG_log_instruction_period);
385  decoder_->AppendVisitor(instrument_);
386  }
387 }
388 
389 
390 Simulator::Simulator()
391  : decoder_(NULL),
392  last_debugger_input_(NULL),
393  log_parameters_(NO_PARAM),
394  isolate_(NULL) {
395  Init(NULL);
396  CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
397 }
398 
399 
400 void Simulator::Init(FILE* stream) {
401  ResetState();
402 
403  // Allocate and setup the simulator stack.
404  stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
405  stack_ = new byte[stack_size_];
406  stack_limit_ = stack_ + stack_protection_size_;
407  byte* tos = stack_ + stack_size_ - stack_protection_size_;
408  // The stack pointer must be 16 bytes aligned.
409  set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
410 
411  stream_ = stream;
412  print_disasm_ = new PrintDisassembler(stream_);
413 
414  // The debugger needs to disassemble code without the simulator executing an
415  // instruction, so we create a dedicated decoder.
416  disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
417  disassembler_decoder_->AppendVisitor(print_disasm_);
418 }
419 
420 
421 void Simulator::ResetState() {
422  // Reset the system registers.
423  nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
424  fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
425 
426  // Reset registers to 0.
427  pc_ = NULL;
428  for (unsigned i = 0; i < kNumberOfRegisters; i++) {
429  set_xreg(i, 0xbadbeef);
430  }
431  for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
432  // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
433  set_dreg_bits(i, 0x7ff000007f800001UL);
434  }
435  // Returning to address 0 exits the Simulator.
436  set_lr(kEndOfSimAddress);
437 
438  // Reset debug helpers.
439  breakpoints_.empty();
440  break_on_next_= false;
441 }
442 
443 
444 Simulator::~Simulator() {
445  delete[] stack_;
446  if (FLAG_log_instruction_stats) {
447  delete instrument_;
448  }
449  delete disassembler_decoder_;
450  delete print_disasm_;
451  DeleteArray(last_debugger_input_);
452  delete decoder_;
453 }
454 
455 
456 void Simulator::Run() {
457  pc_modified_ = false;
458  while (pc_ != kEndOfSimAddress) {
459  ExecuteInstruction();
460  }
461 }
462 
463 
464 void Simulator::RunFrom(Instruction* start) {
465  set_pc(start);
466  Run();
467 }
468 
469 
470 // When the generated code calls an external reference we need to catch that in
471 // the simulator. The external reference will be a function compiled for the
472 // host architecture. We need to call that function instead of trying to
473 // execute it with the simulator. We do that by redirecting the external
474 // reference to a svc (Supervisor Call) instruction that is handled by
475 // the simulator. We write the original destination of the jump just at a known
476 // offset from the svc instruction so the simulator knows what to call.
477 class Redirection {
478  public:
479  Redirection(void* external_function, ExternalReference::Type type)
480  : external_function_(external_function),
481  type_(type),
482  next_(NULL) {
483  redirect_call_.SetInstructionBits(
485  Isolate* isolate = Isolate::Current();
486  next_ = isolate->simulator_redirection();
487  // TODO(all): Simulator flush I cache
488  isolate->set_simulator_redirection(this);
489  }
490 
491  void* address_of_redirect_call() {
492  return reinterpret_cast<void*>(&redirect_call_);
493  }
494 
495  template <typename T>
496  T external_function() { return reinterpret_cast<T>(external_function_); }
497 
498  ExternalReference::Type type() { return type_; }
499 
500  static Redirection* Get(void* external_function,
502  Isolate* isolate = Isolate::Current();
503  Redirection* current = isolate->simulator_redirection();
504  for (; current != NULL; current = current->next_) {
505  if (current->external_function_ == external_function) {
506  ASSERT_EQ(current->type(), type);
507  return current;
508  }
509  }
510  return new Redirection(external_function, type);
511  }
512 
513  static Redirection* FromHltInstruction(Instruction* redirect_call) {
514  char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
515  char* addr_of_redirection =
516  addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
517  return reinterpret_cast<Redirection*>(addr_of_redirection);
518  }
519 
520  static void* ReverseRedirection(int64_t reg) {
521  Redirection* redirection =
522  FromHltInstruction(reinterpret_cast<Instruction*>(reg));
523  return redirection->external_function<void*>();
524  }
525 
526  private:
527  void* external_function_;
528  Instruction redirect_call_;
530  Redirection* next_;
531 };
532 
533 
534 // Calls into the V8 runtime are based on this very simple interface.
535 // Note: To be able to return two values from some calls the code in runtime.cc
536 // uses the ObjectPair structure.
537 // The simulator assumes all runtime calls return two 64-bits values. If they
538 // don't, register x1 is clobbered. This is fine because x1 is caller-saved.
539 struct ObjectPair {
540  int64_t res0;
541  int64_t res1;
542 };
543 
544 
545 typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
546  int64_t arg1,
547  int64_t arg2,
548  int64_t arg3,
549  int64_t arg4,
550  int64_t arg5,
551  int64_t arg6,
552  int64_t arg7);
553 
554 typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
555 typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
556 typedef double (*SimulatorRuntimeFPCall)(double arg1);
557 typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
558 
559 // This signature supports direct call in to API function native callback
560 // (refer to InvocationCallback in v8.h).
561 typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
562 typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
563 
564 // This signature supports direct call to accessor getter callback.
565 typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
566 typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
567  void* arg2);
568 
569 void Simulator::DoRuntimeCall(Instruction* instr) {
570  Redirection* redirection = Redirection::FromHltInstruction(instr);
571 
572  // The called C code might itself call simulated code, so any
573  // caller-saved registers (including lr) could still be clobbered by a
574  // redirected call.
575  Instruction* return_address = lr();
576 
577  int64_t external = redirection->external_function<int64_t>();
578 
579  TraceSim("Call to host function at %p\n",
580  redirection->external_function<void*>());
581 
582  // SP must be 16-byte-aligned at the call interface.
583  bool stack_alignment_exception = ((sp() & 0xf) != 0);
584  if (stack_alignment_exception) {
585  TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
586  FATAL("ALIGNMENT EXCEPTION");
587  }
588 
589  switch (redirection->type()) {
590  default:
591  TraceSim("Type: Unknown.\n");
592  UNREACHABLE();
593  break;
594 
595  case ExternalReference::BUILTIN_CALL: {
596  // MaybeObject* f(v8::internal::Arguments).
597  TraceSim("Type: BUILTIN_CALL\n");
598  SimulatorRuntimeCall target =
599  reinterpret_cast<SimulatorRuntimeCall>(external);
600 
601  // We don't know how many arguments are being passed, but we can
602  // pass 8 without touching the stack. They will be ignored by the
603  // host function if they aren't used.
604  TraceSim("Arguments: "
605  "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
606  "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
607  "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
608  "0x%016" PRIx64 ", 0x%016" PRIx64,
609  xreg(0), xreg(1), xreg(2), xreg(3),
610  xreg(4), xreg(5), xreg(6), xreg(7));
611  ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
612  xreg(4), xreg(5), xreg(6), xreg(7));
613  TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
614  result.res0, result.res1);
615 #ifdef DEBUG
616  CorruptAllCallerSavedCPURegisters();
617 #endif
618  set_xreg(0, result.res0);
619  set_xreg(1, result.res1);
620  break;
621  }
622 
623  case ExternalReference::DIRECT_API_CALL: {
624  // void f(v8::FunctionCallbackInfo&)
625  TraceSim("Type: DIRECT_API_CALL\n");
626  SimulatorRuntimeDirectApiCall target =
627  reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
628  TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
629  target(xreg(0));
630  TraceSim("No return value.");
631 #ifdef DEBUG
632  CorruptAllCallerSavedCPURegisters();
633 #endif
634  break;
635  }
636 
637  case ExternalReference::BUILTIN_COMPARE_CALL: {
638  // int f(double, double)
639  TraceSim("Type: BUILTIN_COMPARE_CALL\n");
640  SimulatorRuntimeCompareCall target =
641  reinterpret_cast<SimulatorRuntimeCompareCall>(external);
642  TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
643  int64_t result = target(dreg(0), dreg(1));
644  TraceSim("Returned: %" PRId64 "\n", result);
645 #ifdef DEBUG
646  CorruptAllCallerSavedCPURegisters();
647 #endif
648  set_xreg(0, result);
649  break;
650  }
651 
652  case ExternalReference::BUILTIN_FP_CALL: {
653  // double f(double)
654  TraceSim("Type: BUILTIN_FP_CALL\n");
655  SimulatorRuntimeFPCall target =
656  reinterpret_cast<SimulatorRuntimeFPCall>(external);
657  TraceSim("Argument: %f\n", dreg(0));
658  double result = target(dreg(0));
659  TraceSim("Returned: %f\n", result);
660 #ifdef DEBUG
661  CorruptAllCallerSavedCPURegisters();
662 #endif
663  set_dreg(0, result);
664  break;
665  }
666 
667  case ExternalReference::BUILTIN_FP_FP_CALL: {
668  // double f(double, double)
669  TraceSim("Type: BUILTIN_FP_FP_CALL\n");
670  SimulatorRuntimeFPFPCall target =
671  reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
672  TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
673  double result = target(dreg(0), dreg(1));
674  TraceSim("Returned: %f\n", result);
675 #ifdef DEBUG
676  CorruptAllCallerSavedCPURegisters();
677 #endif
678  set_dreg(0, result);
679  break;
680  }
681 
682  case ExternalReference::BUILTIN_FP_INT_CALL: {
683  // double f(double, int)
684  TraceSim("Type: BUILTIN_FP_INT_CALL\n");
685  SimulatorRuntimeFPIntCall target =
686  reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
687  TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
688  double result = target(dreg(0), wreg(0));
689  TraceSim("Returned: %f\n", result);
690 #ifdef DEBUG
691  CorruptAllCallerSavedCPURegisters();
692 #endif
693  set_dreg(0, result);
694  break;
695  }
696 
697  case ExternalReference::DIRECT_GETTER_CALL: {
698  // void f(Local<String> property, PropertyCallbackInfo& info)
699  TraceSim("Type: DIRECT_GETTER_CALL\n");
700  SimulatorRuntimeDirectGetterCall target =
701  reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
702  TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
703  xreg(0), xreg(1));
704  target(xreg(0), xreg(1));
705  TraceSim("No return value.");
706 #ifdef DEBUG
707  CorruptAllCallerSavedCPURegisters();
708 #endif
709  break;
710  }
711 
712  case ExternalReference::PROFILING_API_CALL: {
713  // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
714  TraceSim("Type: PROFILING_API_CALL\n");
715  SimulatorRuntimeProfilingApiCall target =
716  reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
717  void* arg1 = Redirection::ReverseRedirection(xreg(1));
718  TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
719  target(xreg(0), arg1);
720  TraceSim("No return value.");
721 #ifdef DEBUG
722  CorruptAllCallerSavedCPURegisters();
723 #endif
724  break;
725  }
726 
727  case ExternalReference::PROFILING_GETTER_CALL: {
728  // void f(Local<String> property, PropertyCallbackInfo& info,
729  // AccessorGetterCallback callback)
730  TraceSim("Type: PROFILING_GETTER_CALL\n");
731  SimulatorRuntimeProfilingGetterCall target =
732  reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
733  external);
734  void* arg2 = Redirection::ReverseRedirection(xreg(2));
735  TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
736  xreg(0), xreg(1), arg2);
737  target(xreg(0), xreg(1), arg2);
738  TraceSim("No return value.");
739 #ifdef DEBUG
740  CorruptAllCallerSavedCPURegisters();
741 #endif
742  break;
743  }
744  }
745 
746  set_lr(return_address);
747  set_pc(return_address);
748 }
749 
750 
751 void* Simulator::RedirectExternalReference(void* external_function,
753  Redirection* redirection = Redirection::Get(external_function, type);
754  return redirection->address_of_redirect_call();
755 }
756 
757 
758 const char* Simulator::xreg_names[] = {
759 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
760 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
761 "ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
762 "x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
763 
764 const char* Simulator::wreg_names[] = {
765 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
766 "w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
767 "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
768 "w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
769 
770 const char* Simulator::sreg_names[] = {
771 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
772 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
773 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
774 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
775 
776 const char* Simulator::dreg_names[] = {
777 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
778 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
779 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
780 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
781 
782 const char* Simulator::vreg_names[] = {
783 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
784 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
785 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
786 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
787 
788 
789 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
790  ASSERT(code < kNumberOfRegisters);
791  // If the code represents the stack pointer, index the name after zr.
792  if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
793  code = kZeroRegCode + 1;
794  }
795  return wreg_names[code];
796 }
797 
798 
799 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
800  ASSERT(code < kNumberOfRegisters);
801  // If the code represents the stack pointer, index the name after zr.
802  if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
803  code = kZeroRegCode + 1;
804  }
805  return xreg_names[code];
806 }
807 
808 
809 const char* Simulator::SRegNameForCode(unsigned code) {
810  ASSERT(code < kNumberOfFPRegisters);
811  return sreg_names[code];
812 }
813 
814 
815 const char* Simulator::DRegNameForCode(unsigned code) {
816  ASSERT(code < kNumberOfFPRegisters);
817  return dreg_names[code];
818 }
819 
820 
821 const char* Simulator::VRegNameForCode(unsigned code) {
822  ASSERT(code < kNumberOfFPRegisters);
823  return vreg_names[code];
824 }
825 
826 
827 int Simulator::CodeFromName(const char* name) {
828  for (unsigned i = 0; i < kNumberOfRegisters; i++) {
829  if ((strcmp(xreg_names[i], name) == 0) ||
830  (strcmp(wreg_names[i], name) == 0)) {
831  return i;
832  }
833  }
834  for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
835  if ((strcmp(vreg_names[i], name) == 0) ||
836  (strcmp(dreg_names[i], name) == 0) ||
837  (strcmp(sreg_names[i], name) == 0)) {
838  return i;
839  }
840  }
841  if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
842  return kSPRegInternalCode;
843  }
844  return -1;
845 }
846 
847 
848 // Helpers ---------------------------------------------------------------------
849 int64_t Simulator::AddWithCarry(unsigned reg_size,
850  bool set_flags,
851  int64_t src1,
852  int64_t src2,
853  int64_t carry_in) {
854  ASSERT((carry_in == 0) || (carry_in == 1));
855  ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
856 
857  uint64_t u1, u2;
858  int64_t result;
859  int64_t signed_sum = src1 + src2 + carry_in;
860 
861  bool N, Z, C, V;
862 
863  if (reg_size == kWRegSizeInBits) {
864  u1 = static_cast<uint64_t>(src1) & kWRegMask;
865  u2 = static_cast<uint64_t>(src2) & kWRegMask;
866 
867  result = signed_sum & kWRegMask;
868  // Compute the C flag by comparing the sum to the max unsigned integer.
869  C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
870  ((kWMaxUInt - u1 - carry_in) < u2);
871  // Overflow iff the sign bit is the same for the two inputs and different
872  // for the result.
873  int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits);
874  int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits);
875  int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits);
876  V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
877 
878  } else {
879  u1 = static_cast<uint64_t>(src1);
880  u2 = static_cast<uint64_t>(src2);
881 
882  result = signed_sum;
883  // Compute the C flag by comparing the sum to the max unsigned integer.
884  C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
885  ((kXMaxUInt - u1 - carry_in) < u2);
886  // Overflow iff the sign bit is the same for the two inputs and different
887  // for the result.
888  V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
889  }
890 
891  N = CalcNFlag(result, reg_size);
892  Z = CalcZFlag(result);
893 
894  if (set_flags) {
895  nzcv().SetN(N);
896  nzcv().SetZ(Z);
897  nzcv().SetC(C);
898  nzcv().SetV(V);
899  }
900  return result;
901 }
902 
903 
904 int64_t Simulator::ShiftOperand(unsigned reg_size,
905  int64_t value,
906  Shift shift_type,
907  unsigned amount) {
908  if (amount == 0) {
909  return value;
910  }
911  int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
912  switch (shift_type) {
913  case LSL:
914  return (value << amount) & mask;
915  case LSR:
916  return static_cast<uint64_t>(value) >> amount;
917  case ASR: {
918  // Shift used to restore the sign.
919  unsigned s_shift = kXRegSizeInBits - reg_size;
920  // Value with its sign restored.
921  int64_t s_value = (value << s_shift) >> s_shift;
922  return (s_value >> amount) & mask;
923  }
924  case ROR: {
925  if (reg_size == kWRegSizeInBits) {
926  value &= kWRegMask;
927  }
928  return (static_cast<uint64_t>(value) >> amount) |
929  ((value & ((1L << amount) - 1L)) << (reg_size - amount));
930  }
931  default:
932  UNIMPLEMENTED();
933  return 0;
934  }
935 }
936 
937 
938 int64_t Simulator::ExtendValue(unsigned reg_size,
939  int64_t value,
940  Extend extend_type,
941  unsigned left_shift) {
942  switch (extend_type) {
943  case UXTB:
944  value &= kByteMask;
945  break;
946  case UXTH:
947  value &= kHalfWordMask;
948  break;
949  case UXTW:
950  value &= kWordMask;
951  break;
952  case SXTB:
953  value = (value << 56) >> 56;
954  break;
955  case SXTH:
956  value = (value << 48) >> 48;
957  break;
958  case SXTW:
959  value = (value << 32) >> 32;
960  break;
961  case UXTX:
962  case SXTX:
963  break;
964  default:
965  UNREACHABLE();
966  }
967  int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
968  return (value << left_shift) & mask;
969 }
970 
971 
972 template<> double Simulator::FPDefaultNaN<double>() const {
973  return kFP64DefaultNaN;
974 }
975 
976 
977 template<> float Simulator::FPDefaultNaN<float>() const {
978  return kFP32DefaultNaN;
979 }
980 
981 
982 void Simulator::FPCompare(double val0, double val1) {
983  AssertSupportedFPCR();
984 
985  // TODO(jbramley): This assumes that the C++ implementation handles
986  // comparisons in the way that we expect (as per AssertSupportedFPCR()).
987  if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
988  nzcv().SetRawValue(FPUnorderedFlag);
989  } else if (val0 < val1) {
990  nzcv().SetRawValue(FPLessThanFlag);
991  } else if (val0 > val1) {
992  nzcv().SetRawValue(FPGreaterThanFlag);
993  } else if (val0 == val1) {
994  nzcv().SetRawValue(FPEqualFlag);
995  } else {
996  UNREACHABLE();
997  }
998 }
999 
1000 
1001 void Simulator::SetBreakpoint(Instruction* location) {
1002  for (unsigned i = 0; i < breakpoints_.size(); i++) {
1003  if (breakpoints_.at(i).location == location) {
1004  PrintF("Existing breakpoint at %p was %s\n",
1005  reinterpret_cast<void*>(location),
1006  breakpoints_.at(i).enabled ? "disabled" : "enabled");
1007  breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
1008  return;
1009  }
1010  }
1011  Breakpoint new_breakpoint = {location, true};
1012  breakpoints_.push_back(new_breakpoint);
1013  PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
1014 }
1015 
1016 
1017 void Simulator::ListBreakpoints() {
1018  PrintF("Breakpoints:\n");
1019  for (unsigned i = 0; i < breakpoints_.size(); i++) {
1020  PrintF("%p : %s\n",
1021  reinterpret_cast<void*>(breakpoints_.at(i).location),
1022  breakpoints_.at(i).enabled ? "enabled" : "disabled");
1023  }
1024 }
1025 
1026 
1027 void Simulator::CheckBreakpoints() {
1028  bool hit_a_breakpoint = false;
1029  for (unsigned i = 0; i < breakpoints_.size(); i++) {
1030  if ((breakpoints_.at(i).location == pc_) &&
1031  breakpoints_.at(i).enabled) {
1032  hit_a_breakpoint = true;
1033  // Disable this breakpoint.
1034  breakpoints_.at(i).enabled = false;
1035  }
1036  }
1037  if (hit_a_breakpoint) {
1038  PrintF("Hit and disabled a breakpoint at %p.\n",
1039  reinterpret_cast<void*>(pc_));
1040  Debug();
1041  }
1042 }
1043 
1044 
1045 void Simulator::CheckBreakNext() {
1046  // If the current instruction is a BL, insert a breakpoint just after it.
1047  if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
1048  SetBreakpoint(pc_->following());
1049  break_on_next_ = false;
1050  }
1051 }
1052 
1053 
1054 void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
1055  Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
1056  for (Instruction* pc = start; pc < end; pc = pc->following()) {
1057  disassembler_decoder_->Decode(pc);
1058  }
1059 }
1060 
1061 
1062 void Simulator::PrintSystemRegisters(bool print_all) {
1063  static bool first_run = true;
1064 
1065  static SimSystemRegister last_nzcv;
1066  if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
1067  fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
1068  clr_flag_name,
1069  clr_flag_value,
1070  nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
1071  clr_normal);
1072  }
1073  last_nzcv = nzcv();
1074 
1075  static SimSystemRegister last_fpcr;
1076  if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
1077  static const char * rmode[] = {
1078  "0b00 (Round to Nearest)",
1079  "0b01 (Round towards Plus Infinity)",
1080  "0b10 (Round towards Minus Infinity)",
1081  "0b11 (Round towards Zero)"
1082  };
1083  ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
1084  fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1085  clr_flag_name,
1086  clr_flag_value,
1087  fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
1088  clr_normal);
1089  }
1090  last_fpcr = fpcr();
1091 
1092  first_run = false;
1093 }
1094 
1095 
1096 void Simulator::PrintRegisters(bool print_all_regs) {
1097  static bool first_run = true;
1098  static int64_t last_regs[kNumberOfRegisters];
1099 
1100  for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1101  if (print_all_regs || first_run ||
1102  (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
1103  fprintf(stream_,
1104  "# %s%4s:%s 0x%016" PRIx64 "%s\n",
1105  clr_reg_name,
1106  XRegNameForCode(i, Reg31IsStackPointer),
1107  clr_reg_value,
1108  xreg(i, Reg31IsStackPointer),
1109  clr_normal);
1110  }
1111  // Cache the new register value so the next run can detect any changes.
1112  last_regs[i] = xreg(i, Reg31IsStackPointer);
1113  }
1114  first_run = false;
1115 }
1116 
1117 
1118 void Simulator::PrintFPRegisters(bool print_all_regs) {
1119  static bool first_run = true;
1120  static uint64_t last_regs[kNumberOfFPRegisters];
1121 
1122  // Print as many rows of registers as necessary, keeping each individual
1123  // register in the same column each time (to make it easy to visually scan
1124  // for changes).
1125  for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
1126  if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
1127  fprintf(stream_,
1128  "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
1129  clr_fpreg_name,
1130  VRegNameForCode(i),
1131  clr_fpreg_value,
1132  dreg_bits(i),
1133  clr_normal,
1134  clr_fpreg_name,
1135  DRegNameForCode(i),
1136  clr_fpreg_value,
1137  dreg(i),
1138  clr_fpreg_name,
1139  SRegNameForCode(i),
1140  clr_fpreg_value,
1141  sreg(i),
1142  clr_normal);
1143  }
1144  // Cache the new register value so the next run can detect any changes.
1145  last_regs[i] = dreg_bits(i);
1146  }
1147  first_run = false;
1148 }
1149 
1150 
1151 void Simulator::PrintProcessorState() {
1152  PrintSystemRegisters();
1153  PrintRegisters();
1154  PrintFPRegisters();
1155 }
1156 
1157 
1158 void Simulator::PrintWrite(uint8_t* address,
1159  uint64_t value,
1160  unsigned num_bytes) {
1161  // The template is "# value -> address". The template is not directly used
1162  // in the printf since compilers tend to struggle with the parametrized
1163  // width (%0*).
1164  const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
1165  fprintf(stream_,
1166  format,
1167  clr_memory_value,
1168  num_bytes * 2, // The width in hexa characters.
1169  value,
1170  clr_normal,
1171  clr_memory_address,
1172  address,
1173  clr_normal);
1174 }
1175 
1176 
1177 // Visitors---------------------------------------------------------------------
1178 
1179 void Simulator::VisitUnimplemented(Instruction* instr) {
1180  fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
1181  reinterpret_cast<void*>(instr), instr->InstructionBits());
1182  UNIMPLEMENTED();
1183 }
1184 
1185 
1186 void Simulator::VisitUnallocated(Instruction* instr) {
1187  fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
1188  reinterpret_cast<void*>(instr), instr->InstructionBits());
1189  UNIMPLEMENTED();
1190 }
1191 
1192 
1193 void Simulator::VisitPCRelAddressing(Instruction* instr) {
1194  switch (instr->Mask(PCRelAddressingMask)) {
1195  case ADR:
1196  set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
1197  break;
1198  case ADRP: // Not implemented in the assembler.
1199  UNIMPLEMENTED();
1200  break;
1201  default:
1202  UNREACHABLE();
1203  break;
1204  }
1205 }
1206 
1207 
1208 void Simulator::VisitUnconditionalBranch(Instruction* instr) {
1209  switch (instr->Mask(UnconditionalBranchMask)) {
1210  case BL:
1211  set_lr(instr->following());
1212  // Fall through.
1213  case B:
1214  set_pc(instr->ImmPCOffsetTarget());
1215  break;
1216  default:
1217  UNREACHABLE();
1218  }
1219 }
1220 
1221 
1222 void Simulator::VisitConditionalBranch(Instruction* instr) {
1223  ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
1224  if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
1225  set_pc(instr->ImmPCOffsetTarget());
1226  }
1227 }
1228 
1229 
1230 void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
1231  Instruction* target = reg<Instruction*>(instr->Rn());
1232  switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1233  case BLR: {
1234  set_lr(instr->following());
1235  if (instr->Rn() == 31) {
1236  // BLR XZR is used as a guard for the constant pool. We should never hit
1237  // this, but if we do trap to allow debugging.
1238  Debug();
1239  }
1240  // Fall through.
1241  }
1242  case BR:
1243  case RET: set_pc(target); break;
1244  default: UNIMPLEMENTED();
1245  }
1246 }
1247 
1248 
1249 void Simulator::VisitTestBranch(Instruction* instr) {
1250  unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
1251  instr->ImmTestBranchBit40();
1252  bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
1253  switch (instr->Mask(TestBranchMask)) {
1254  case TBZ: break;
1255  case TBNZ: take_branch = !take_branch; break;
1256  default: UNIMPLEMENTED();
1257  }
1258  if (take_branch) {
1259  set_pc(instr->ImmPCOffsetTarget());
1260  }
1261 }
1262 
1263 
1264 void Simulator::VisitCompareBranch(Instruction* instr) {
1265  unsigned rt = instr->Rt();
1266  bool take_branch = false;
1267  switch (instr->Mask(CompareBranchMask)) {
1268  case CBZ_w: take_branch = (wreg(rt) == 0); break;
1269  case CBZ_x: take_branch = (xreg(rt) == 0); break;
1270  case CBNZ_w: take_branch = (wreg(rt) != 0); break;
1271  case CBNZ_x: take_branch = (xreg(rt) != 0); break;
1272  default: UNIMPLEMENTED();
1273  }
1274  if (take_branch) {
1275  set_pc(instr->ImmPCOffsetTarget());
1276  }
1277 }
1278 
1279 
1280 void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
1281  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1282  : kWRegSizeInBits;
1283  bool set_flags = instr->FlagsUpdate();
1284  int64_t new_val = 0;
1285  Instr operation = instr->Mask(AddSubOpMask);
1286 
1287  switch (operation) {
1288  case ADD:
1289  case ADDS: {
1290  new_val = AddWithCarry(reg_size,
1291  set_flags,
1292  reg(reg_size, instr->Rn(), instr->RnMode()),
1293  op2);
1294  break;
1295  }
1296  case SUB:
1297  case SUBS: {
1298  new_val = AddWithCarry(reg_size,
1299  set_flags,
1300  reg(reg_size, instr->Rn(), instr->RnMode()),
1301  ~op2,
1302  1);
1303  break;
1304  }
1305  default: UNREACHABLE();
1306  }
1307 
1308  set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
1309 }
1310 
1311 
1312 void Simulator::VisitAddSubShifted(Instruction* instr) {
1313  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1314  : kWRegSizeInBits;
1315  int64_t op2 = ShiftOperand(reg_size,
1316  reg(reg_size, instr->Rm()),
1317  static_cast<Shift>(instr->ShiftDP()),
1318  instr->ImmDPShift());
1319  AddSubHelper(instr, op2);
1320 }
1321 
1322 
1323 void Simulator::VisitAddSubImmediate(Instruction* instr) {
1324  int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
1325  AddSubHelper(instr, op2);
1326 }
1327 
1328 
1329 void Simulator::VisitAddSubExtended(Instruction* instr) {
1330  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1331  : kWRegSizeInBits;
1332  int64_t op2 = ExtendValue(reg_size,
1333  reg(reg_size, instr->Rm()),
1334  static_cast<Extend>(instr->ExtendMode()),
1335  instr->ImmExtendShift());
1336  AddSubHelper(instr, op2);
1337 }
1338 
1339 
1340 void Simulator::VisitAddSubWithCarry(Instruction* instr) {
1341  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1342  : kWRegSizeInBits;
1343  int64_t op2 = reg(reg_size, instr->Rm());
1344  int64_t new_val;
1345 
1346  if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
1347  op2 = ~op2;
1348  }
1349 
1350  new_val = AddWithCarry(reg_size,
1351  instr->FlagsUpdate(),
1352  reg(reg_size, instr->Rn()),
1353  op2,
1354  nzcv().C());
1355 
1356  set_reg(reg_size, instr->Rd(), new_val);
1357 }
1358 
1359 
1360 void Simulator::VisitLogicalShifted(Instruction* instr) {
1361  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1362  : kWRegSizeInBits;
1363  Shift shift_type = static_cast<Shift>(instr->ShiftDP());
1364  unsigned shift_amount = instr->ImmDPShift();
1365  int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
1366  shift_amount);
1367  if (instr->Mask(NOT) == NOT) {
1368  op2 = ~op2;
1369  }
1370  LogicalHelper(instr, op2);
1371 }
1372 
1373 
1374 void Simulator::VisitLogicalImmediate(Instruction* instr) {
1375  LogicalHelper(instr, instr->ImmLogical());
1376 }
1377 
1378 
1379 void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
1380  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1381  : kWRegSizeInBits;
1382  int64_t op1 = reg(reg_size, instr->Rn());
1383  int64_t result = 0;
1384  bool update_flags = false;
1385 
1386  // Switch on the logical operation, stripping out the NOT bit, as it has a
1387  // different meaning for logical immediate instructions.
1388  switch (instr->Mask(LogicalOpMask & ~NOT)) {
1389  case ANDS: update_flags = true; // Fall through.
1390  case AND: result = op1 & op2; break;
1391  case ORR: result = op1 | op2; break;
1392  case EOR: result = op1 ^ op2; break;
1393  default:
1394  UNIMPLEMENTED();
1395  }
1396 
1397  if (update_flags) {
1398  nzcv().SetN(CalcNFlag(result, reg_size));
1399  nzcv().SetZ(CalcZFlag(result));
1400  nzcv().SetC(0);
1401  nzcv().SetV(0);
1402  }
1403 
1404  set_reg(reg_size, instr->Rd(), result, instr->RdMode());
1405 }
1406 
1407 
1408 void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
1409  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1410  : kWRegSizeInBits;
1411  ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
1412 }
1413 
1414 
1415 void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
1416  ConditionalCompareHelper(instr, instr->ImmCondCmp());
1417 }
1418 
1419 
1420 void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
1421  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1422  : kWRegSizeInBits;
1423  int64_t op1 = reg(reg_size, instr->Rn());
1424 
1425  if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
1426  // If the condition passes, set the status flags to the result of comparing
1427  // the operands.
1428  if (instr->Mask(ConditionalCompareMask) == CCMP) {
1429  AddWithCarry(reg_size, true, op1, ~op2, 1);
1430  } else {
1431  ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
1432  AddWithCarry(reg_size, true, op1, op2, 0);
1433  }
1434  } else {
1435  // If the condition fails, set the status flags to the nzcv immediate.
1436  nzcv().SetFlags(instr->Nzcv());
1437  }
1438 }
1439 
1440 
1441 void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
1442  int offset = instr->ImmLSUnsigned() << instr->SizeLS();
1443  LoadStoreHelper(instr, offset, Offset);
1444 }
1445 
1446 
1447 void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
1448  LoadStoreHelper(instr, instr->ImmLS(), Offset);
1449 }
1450 
1451 
1452 void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
1453  LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
1454 }
1455 
1456 
1457 void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
1458  LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
1459 }
1460 
1461 
1462 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
1463  Extend ext = static_cast<Extend>(instr->ExtendMode());
1464  ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
1465  unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
1466 
1467  int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext,
1468  shift_amount);
1469  LoadStoreHelper(instr, offset, Offset);
1470 }
1471 
1472 
1473 void Simulator::LoadStoreHelper(Instruction* instr,
1474  int64_t offset,
1475  AddrMode addrmode) {
1476  unsigned srcdst = instr->Rt();
1477  unsigned addr_reg = instr->Rn();
1478  uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
1479  int num_bytes = 1 << instr->SizeLS();
1480  uint8_t* stack = NULL;
1481 
1482  // Handle the writeback for stores before the store. On a CPU the writeback
1483  // and the store are atomic, but when running on the simulator it is possible
1484  // to be interrupted in between. The simulator is not thread safe and V8 does
1485  // not require it to be to run JavaScript therefore the profiler may sample
1486  // the "simulated" CPU in the middle of load/store with writeback. The code
1487  // below ensures that push operations are safe even when interrupted: the
1488  // stack pointer will be decremented before adding an element to the stack.
1489  if (instr->IsStore()) {
1490  LoadStoreWriteBack(addr_reg, offset, addrmode);
1491 
1492  // For store the address post writeback is used to check access below the
1493  // stack.
1494  stack = reinterpret_cast<uint8_t*>(sp());
1495  }
1496 
1497  LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
1498  switch (op) {
1499  case LDRB_w:
1500  case LDRH_w:
1501  case LDR_w:
1502  case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
1503  case STRB_w:
1504  case STRH_w:
1505  case STR_w:
1506  case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
1507  case LDRSB_w: {
1508  set_wreg(srcdst,
1509  ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB));
1510  break;
1511  }
1512  case LDRSB_x: {
1513  set_xreg(srcdst,
1514  ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB));
1515  break;
1516  }
1517  case LDRSH_w: {
1518  set_wreg(srcdst,
1519  ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH));
1520  break;
1521  }
1522  case LDRSH_x: {
1523  set_xreg(srcdst,
1524  ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH));
1525  break;
1526  }
1527  case LDRSW_x: {
1528  set_xreg(srcdst,
1529  ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
1530  break;
1531  }
1532  case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
1533  case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
1534  case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
1535  case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
1536  default: UNIMPLEMENTED();
1537  }
1538 
1539  // Handle the writeback for loads after the load to ensure safe pop
1540  // operation even when interrupted in the middle of it. The stack pointer
1541  // is only updated after the load so pop(fp) will never break the invariant
1542  // sp <= fp expected while walking the stack in the sampler.
1543  if (instr->IsLoad()) {
1544  // For loads the address pre writeback is used to check access below the
1545  // stack.
1546  stack = reinterpret_cast<uint8_t*>(sp());
1547 
1548  LoadStoreWriteBack(addr_reg, offset, addrmode);
1549  }
1550 
1551  // Accesses below the stack pointer (but above the platform stack limit) are
1552  // not allowed in the ABI.
1553  CheckMemoryAccess(address, stack);
1554 }
1555 
1556 
1557 void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
1558  LoadStorePairHelper(instr, Offset);
1559 }
1560 
1561 
1562 void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
1563  LoadStorePairHelper(instr, PreIndex);
1564 }
1565 
1566 
1567 void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
1568  LoadStorePairHelper(instr, PostIndex);
1569 }
1570 
1571 
1572 void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
1573  LoadStorePairHelper(instr, Offset);
1574 }
1575 
1576 
1577 void Simulator::LoadStorePairHelper(Instruction* instr,
1578  AddrMode addrmode) {
1579  unsigned rt = instr->Rt();
1580  unsigned rt2 = instr->Rt2();
1581  unsigned addr_reg = instr->Rn();
1582  int offset = instr->ImmLSPair() << instr->SizeLSPair();
1583  uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
1584  uint8_t* stack = NULL;
1585 
1586  // Handle the writeback for stores before the store. On a CPU the writeback
1587  // and the store are atomic, but when running on the simulator it is possible
1588  // to be interrupted in between. The simulator is not thread safe and V8 does
1589  // not require it to be to run JavaScript therefore the profiler may sample
1590  // the "simulated" CPU in the middle of load/store with writeback. The code
1591  // below ensures that push operations are safe even when interrupted: the
1592  // stack pointer will be decremented before adding an element to the stack.
1593  if (instr->IsStore()) {
1594  LoadStoreWriteBack(addr_reg, offset, addrmode);
1595 
1596  // For store the address post writeback is used to check access below the
1597  // stack.
1598  stack = reinterpret_cast<uint8_t*>(sp());
1599  }
1600 
1601  LoadStorePairOp op =
1602  static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
1603 
1604  // 'rt' and 'rt2' can only be aliased for stores.
1605  ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
1606 
1607  switch (op) {
1608  case LDP_w: {
1609  set_wreg(rt, MemoryRead32(address));
1610  set_wreg(rt2, MemoryRead32(address + kWRegSize));
1611  break;
1612  }
1613  case LDP_s: {
1614  set_sreg(rt, MemoryReadFP32(address));
1615  set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
1616  break;
1617  }
1618  case LDP_x: {
1619  set_xreg(rt, MemoryRead64(address));
1620  set_xreg(rt2, MemoryRead64(address + kXRegSize));
1621  break;
1622  }
1623  case LDP_d: {
1624  set_dreg(rt, MemoryReadFP64(address));
1625  set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
1626  break;
1627  }
1628  case LDPSW_x: {
1629  set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
1630  set_xreg(rt2, ExtendValue(kXRegSizeInBits,
1631  MemoryRead32(address + kWRegSize), SXTW));
1632  break;
1633  }
1634  case STP_w: {
1635  MemoryWrite32(address, wreg(rt));
1636  MemoryWrite32(address + kWRegSize, wreg(rt2));
1637  break;
1638  }
1639  case STP_s: {
1640  MemoryWriteFP32(address, sreg(rt));
1641  MemoryWriteFP32(address + kSRegSize, sreg(rt2));
1642  break;
1643  }
1644  case STP_x: {
1645  MemoryWrite64(address, xreg(rt));
1646  MemoryWrite64(address + kXRegSize, xreg(rt2));
1647  break;
1648  }
1649  case STP_d: {
1650  MemoryWriteFP64(address, dreg(rt));
1651  MemoryWriteFP64(address + kDRegSize, dreg(rt2));
1652  break;
1653  }
1654  default: UNREACHABLE();
1655  }
1656 
1657  // Handle the writeback for loads after the load to ensure safe pop
1658  // operation even when interrupted in the middle of it. The stack pointer
1659  // is only updated after the load so pop(fp) will never break the invariant
1660  // sp <= fp expected while walking the stack in the sampler.
1661  if (instr->IsLoad()) {
1662  // For loads the address pre writeback is used to check access below the
1663  // stack.
1664  stack = reinterpret_cast<uint8_t*>(sp());
1665 
1666  LoadStoreWriteBack(addr_reg, offset, addrmode);
1667  }
1668 
1669  // Accesses below the stack pointer (but above the platform stack limit) are
1670  // not allowed in the ABI.
1671  CheckMemoryAccess(address, stack);
1672 }
1673 
1674 
1675 void Simulator::VisitLoadLiteral(Instruction* instr) {
1676  uint8_t* address = instr->LiteralAddress();
1677  unsigned rt = instr->Rt();
1678 
1679  switch (instr->Mask(LoadLiteralMask)) {
1680  case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
1681  case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
1682  case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
1683  case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
1684  default: UNREACHABLE();
1685  }
1686 }
1687 
1688 
1689 uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
1690  int64_t offset,
1691  AddrMode addrmode) {
1692  const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
1693  int64_t address = xreg(addr_reg, Reg31IsStackPointer);
1694  if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
1695  // When the base register is SP the stack pointer is required to be
1696  // quadword aligned prior to the address calculation and write-backs.
1697  // Misalignment will cause a stack alignment fault.
1698  FATAL("ALIGNMENT EXCEPTION");
1699  }
1700 
1701  if ((addrmode == Offset) || (addrmode == PreIndex)) {
1702  address += offset;
1703  }
1704 
1705  return reinterpret_cast<uint8_t*>(address);
1706 }
1707 
1708 
1709 void Simulator::LoadStoreWriteBack(unsigned addr_reg,
1710  int64_t offset,
1711  AddrMode addrmode) {
1712  if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
1713  ASSERT(offset != 0);
1714  uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
1715  set_reg(addr_reg, address + offset, Reg31IsStackPointer);
1716  }
1717 }
1718 
1719 
1720 void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
1721  if ((address >= stack_limit_) && (address < stack)) {
1722  fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
1723  fprintf(stream_, " sp is here: 0x%16p\n", stack);
1724  fprintf(stream_, " access was here: 0x%16p\n", address);
1725  fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
1726  fprintf(stream_, "\n");
1727  FATAL("ACCESS BELOW STACK POINTER");
1728  }
1729 }
1730 
1731 
1732 uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
1733  ASSERT(address != NULL);
1734  ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
1735  uint64_t read = 0;
1736  memcpy(&read, address, num_bytes);
1737  return read;
1738 }
1739 
1740 
1741 uint8_t Simulator::MemoryRead8(uint8_t* address) {
1742  return MemoryRead(address, sizeof(uint8_t));
1743 }
1744 
1745 
1746 uint16_t Simulator::MemoryRead16(uint8_t* address) {
1747  return MemoryRead(address, sizeof(uint16_t));
1748 }
1749 
1750 
1751 uint32_t Simulator::MemoryRead32(uint8_t* address) {
1752  return MemoryRead(address, sizeof(uint32_t));
1753 }
1754 
1755 
1756 float Simulator::MemoryReadFP32(uint8_t* address) {
1757  return rawbits_to_float(MemoryRead32(address));
1758 }
1759 
1760 
1761 uint64_t Simulator::MemoryRead64(uint8_t* address) {
1762  return MemoryRead(address, sizeof(uint64_t));
1763 }
1764 
1765 
1766 double Simulator::MemoryReadFP64(uint8_t* address) {
1767  return rawbits_to_double(MemoryRead64(address));
1768 }
1769 
1770 
1771 void Simulator::MemoryWrite(uint8_t* address,
1772  uint64_t value,
1773  unsigned num_bytes) {
1774  ASSERT(address != NULL);
1775  ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
1776 
1777  LogWrite(address, value, num_bytes);
1778  memcpy(address, &value, num_bytes);
1779 }
1780 
1781 
1782 void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
1783  MemoryWrite(address, value, sizeof(uint32_t));
1784 }
1785 
1786 
1787 void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
1788  MemoryWrite32(address, float_to_rawbits(value));
1789 }
1790 
1791 
1792 void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
1793  MemoryWrite(address, value, sizeof(uint64_t));
1794 }
1795 
1796 
1797 void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
1798  MemoryWrite64(address, double_to_rawbits(value));
1799 }
1800 
1801 
1802 void Simulator::VisitMoveWideImmediate(Instruction* instr) {
1803  MoveWideImmediateOp mov_op =
1804  static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
1805  int64_t new_xn_val = 0;
1806 
1807  bool is_64_bits = instr->SixtyFourBits() == 1;
1808  // Shift is limited for W operations.
1809  ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
1810 
1811  // Get the shifted immediate.
1812  int64_t shift = instr->ShiftMoveWide() * 16;
1813  int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
1814 
1815  // Compute the new value.
1816  switch (mov_op) {
1817  case MOVN_w:
1818  case MOVN_x: {
1819  new_xn_val = ~shifted_imm16;
1820  if (!is_64_bits) new_xn_val &= kWRegMask;
1821  break;
1822  }
1823  case MOVK_w:
1824  case MOVK_x: {
1825  unsigned reg_code = instr->Rd();
1826  int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
1827  : wreg(reg_code);
1828  new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
1829  break;
1830  }
1831  case MOVZ_w:
1832  case MOVZ_x: {
1833  new_xn_val = shifted_imm16;
1834  break;
1835  }
1836  default:
1837  UNREACHABLE();
1838  }
1839 
1840  // Update the destination register.
1841  set_xreg(instr->Rd(), new_xn_val);
1842 }
1843 
1844 
1845 void Simulator::VisitConditionalSelect(Instruction* instr) {
1846  uint64_t new_val = xreg(instr->Rn());
1847 
1848  if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
1849  new_val = xreg(instr->Rm());
1850  switch (instr->Mask(ConditionalSelectMask)) {
1851  case CSEL_w:
1852  case CSEL_x: break;
1853  case CSINC_w:
1854  case CSINC_x: new_val++; break;
1855  case CSINV_w:
1856  case CSINV_x: new_val = ~new_val; break;
1857  case CSNEG_w:
1858  case CSNEG_x: new_val = -new_val; break;
1859  default: UNIMPLEMENTED();
1860  }
1861  }
1862  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1863  : kWRegSizeInBits;
1864  set_reg(reg_size, instr->Rd(), new_val);
1865 }
1866 
1867 
1868 void Simulator::VisitDataProcessing1Source(Instruction* instr) {
1869  unsigned dst = instr->Rd();
1870  unsigned src = instr->Rn();
1871 
1872  switch (instr->Mask(DataProcessing1SourceMask)) {
1873  case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
1874  case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
1875  case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
1876  case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
1877  case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
1878  case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
1879  case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
1880  case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
1881  break;
1882  case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
1883  break;
1884  case CLS_w: {
1885  set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
1886  break;
1887  }
1888  case CLS_x: {
1889  set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
1890  break;
1891  }
1892  default: UNIMPLEMENTED();
1893  }
1894 }
1895 
1896 
1897 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
1898  ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
1899  uint64_t result = 0;
1900  for (unsigned i = 0; i < num_bits; i++) {
1901  result = (result << 1) | (value & 1);
1902  value >>= 1;
1903  }
1904  return result;
1905 }
1906 
1907 
1908 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
1909  // Split the 64-bit value into an 8-bit array, where b[0] is the least
1910  // significant byte, and b[7] is the most significant.
1911  uint8_t bytes[8];
1912  uint64_t mask = 0xff00000000000000UL;
1913  for (int i = 7; i >= 0; i--) {
1914  bytes[i] = (value & mask) >> (i * 8);
1915  mask >>= 8;
1916  }
1917 
1918  // Permutation tables for REV instructions.
1919  // permute_table[Reverse16] is used by REV16_x, REV16_w
1920  // permute_table[Reverse32] is used by REV32_x, REV_w
1921  // permute_table[Reverse64] is used by REV_x
1922  ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
1923  static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
1924  {4, 5, 6, 7, 0, 1, 2, 3},
1925  {0, 1, 2, 3, 4, 5, 6, 7} };
1926  uint64_t result = 0;
1927  for (int i = 0; i < 8; i++) {
1928  result <<= 8;
1929  result |= bytes[permute_table[mode][i]];
1930  }
1931  return result;
1932 }
1933 
1934 
1935 void Simulator::VisitDataProcessing2Source(Instruction* instr) {
1936  Shift shift_op = NO_SHIFT;
1937  int64_t result = 0;
1938  switch (instr->Mask(DataProcessing2SourceMask)) {
1939  case SDIV_w: {
1940  int32_t rn = wreg(instr->Rn());
1941  int32_t rm = wreg(instr->Rm());
1942  if ((rn == kWMinInt) && (rm == -1)) {
1943  result = kWMinInt;
1944  } else if (rm == 0) {
1945  // Division by zero can be trapped, but not on A-class processors.
1946  result = 0;
1947  } else {
1948  result = rn / rm;
1949  }
1950  break;
1951  }
1952  case SDIV_x: {
1953  int64_t rn = xreg(instr->Rn());
1954  int64_t rm = xreg(instr->Rm());
1955  if ((rn == kXMinInt) && (rm == -1)) {
1956  result = kXMinInt;
1957  } else if (rm == 0) {
1958  // Division by zero can be trapped, but not on A-class processors.
1959  result = 0;
1960  } else {
1961  result = rn / rm;
1962  }
1963  break;
1964  }
1965  case UDIV_w: {
1966  uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
1967  uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
1968  if (rm == 0) {
1969  // Division by zero can be trapped, but not on A-class processors.
1970  result = 0;
1971  } else {
1972  result = rn / rm;
1973  }
1974  break;
1975  }
1976  case UDIV_x: {
1977  uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
1978  uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
1979  if (rm == 0) {
1980  // Division by zero can be trapped, but not on A-class processors.
1981  result = 0;
1982  } else {
1983  result = rn / rm;
1984  }
1985  break;
1986  }
1987  case LSLV_w:
1988  case LSLV_x: shift_op = LSL; break;
1989  case LSRV_w:
1990  case LSRV_x: shift_op = LSR; break;
1991  case ASRV_w:
1992  case ASRV_x: shift_op = ASR; break;
1993  case RORV_w:
1994  case RORV_x: shift_op = ROR; break;
1995  default: UNIMPLEMENTED();
1996  }
1997 
1998  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
1999  : kWRegSizeInBits;
2000  if (shift_op != NO_SHIFT) {
2001  // Shift distance encoded in the least-significant five/six bits of the
2002  // register.
2003  int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
2004  unsigned shift = wreg(instr->Rm()) & mask;
2005  result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
2006  shift);
2007  }
2008  set_reg(reg_size, instr->Rd(), result);
2009 }
2010 
2011 
2012 // The algorithm used is described in section 8.2 of
2013 // Hacker's Delight, by Henry S. Warren, Jr.
2014 // It assumes that a right shift on a signed integer is an arithmetic shift.
2015 static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
2016  uint64_t u0, v0, w0;
2017  int64_t u1, v1, w1, w2, t;
2018 
2019  u0 = u & 0xffffffffL;
2020  u1 = u >> 32;
2021  v0 = v & 0xffffffffL;
2022  v1 = v >> 32;
2023 
2024  w0 = u0 * v0;
2025  t = u1 * v0 + (w0 >> 32);
2026  w1 = t & 0xffffffffL;
2027  w2 = t >> 32;
2028  w1 = u0 * v1 + w1;
2029 
2030  return u1 * v1 + w2 + (w1 >> 32);
2031 }
2032 
2033 
2034 void Simulator::VisitDataProcessing3Source(Instruction* instr) {
2035  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
2036  : kWRegSizeInBits;
2037 
2038  int64_t result = 0;
2039  // Extract and sign- or zero-extend 32-bit arguments for widening operations.
2040  uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
2041  uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
2042  int64_t rn_s32 = reg<int32_t>(instr->Rn());
2043  int64_t rm_s32 = reg<int32_t>(instr->Rm());
2044  switch (instr->Mask(DataProcessing3SourceMask)) {
2045  case MADD_w:
2046  case MADD_x:
2047  result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
2048  break;
2049  case MSUB_w:
2050  case MSUB_x:
2051  result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
2052  break;
2053  case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
2054  case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
2055  case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
2056  case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
2057  case SMULH_x:
2058  ASSERT(instr->Ra() == kZeroRegCode);
2059  result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
2060  break;
2061  default: UNIMPLEMENTED();
2062  }
2063  set_reg(reg_size, instr->Rd(), result);
2064 }
2065 
2066 
2067 void Simulator::VisitBitfield(Instruction* instr) {
2068  unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
2069  : kWRegSizeInBits;
2070  int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
2071  int64_t R = instr->ImmR();
2072  int64_t S = instr->ImmS();
2073  int64_t diff = S - R;
2074  int64_t mask;
2075  if (diff >= 0) {
2076  mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
2077  : reg_mask;
2078  } else {
2079  mask = ((1L << (S + 1)) - 1);
2080  mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
2081  diff += reg_size;
2082  }
2083 
2084  // inzero indicates if the extracted bitfield is inserted into the
2085  // destination register value or in zero.
2086  // If extend is true, extend the sign of the extracted bitfield.
2087  bool inzero = false;
2088  bool extend = false;
2089  switch (instr->Mask(BitfieldMask)) {
2090  case BFM_x:
2091  case BFM_w:
2092  break;
2093  case SBFM_x:
2094  case SBFM_w:
2095  inzero = true;
2096  extend = true;
2097  break;
2098  case UBFM_x:
2099  case UBFM_w:
2100  inzero = true;
2101  break;
2102  default:
2103  UNIMPLEMENTED();
2104  }
2105 
2106  int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
2107  int64_t src = reg(reg_size, instr->Rn());
2108  // Rotate source bitfield into place.
2109  int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
2110  // Determine the sign extension.
2111  int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
2112  int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
2113 
2114  // Merge sign extension, dest/zero and bitfield.
2115  result = signbits | (result & mask) | (dst & ~mask);
2116 
2117  set_reg(reg_size, instr->Rd(), result);
2118 }
2119 
2120 
2121 void Simulator::VisitExtract(Instruction* instr) {
2122  unsigned lsb = instr->ImmS();
2123  unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
2124  : kWRegSizeInBits;
2125  set_reg(reg_size,
2126  instr->Rd(),
2127  (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
2128  (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
2129 }
2130 
2131 
2132 void Simulator::VisitFPImmediate(Instruction* instr) {
2133  AssertSupportedFPCR();
2134 
2135  unsigned dest = instr->Rd();
2136  switch (instr->Mask(FPImmediateMask)) {
2137  case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
2138  case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
2139  default: UNREACHABLE();
2140  }
2141 }
2142 
2143 
2144 void Simulator::VisitFPIntegerConvert(Instruction* instr) {
2145  AssertSupportedFPCR();
2146 
2147  unsigned dst = instr->Rd();
2148  unsigned src = instr->Rn();
2149 
2150  FPRounding round = fpcr().RMode();
2151 
2152  switch (instr->Mask(FPIntegerConvertMask)) {
2153  case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
2154  case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
2155  case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
2156  case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
2157  case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
2158  case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
2159  case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
2160  case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
2161  case FCVTMS_ws:
2162  set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
2163  break;
2164  case FCVTMS_xs:
2165  set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
2166  break;
2167  case FCVTMS_wd:
2168  set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
2169  break;
2170  case FCVTMS_xd:
2171  set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
2172  break;
2173  case FCVTMU_ws:
2174  set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
2175  break;
2176  case FCVTMU_xs:
2177  set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
2178  break;
2179  case FCVTMU_wd:
2180  set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
2181  break;
2182  case FCVTMU_xd:
2183  set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
2184  break;
2185  case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
2186  case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
2187  case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
2188  case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
2189  case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
2190  case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
2191  case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
2192  case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
2193  case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
2194  case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
2195  case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
2196  case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
2197  case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
2198  case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
2199  case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
2200  case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
2201  case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
2202  case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
2203  case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
2204  case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
2205 
2206  // A 32-bit input can be handled in the same way as a 64-bit input, since
2207  // the sign- or zero-extension will not affect the conversion.
2208  case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
2209  case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
2210  case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
2211  case UCVTF_dw: {
2212  set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
2213  break;
2214  }
2215  case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
2216  case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
2217  case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
2218  case UCVTF_sw: {
2219  set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
2220  break;
2221  }
2222 
2223  default: UNREACHABLE();
2224  }
2225 }
2226 
2227 
2228 void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
2229  AssertSupportedFPCR();
2230 
2231  unsigned dst = instr->Rd();
2232  unsigned src = instr->Rn();
2233  int fbits = 64 - instr->FPScale();
2234 
2235  FPRounding round = fpcr().RMode();
2236 
2237  switch (instr->Mask(FPFixedPointConvertMask)) {
2238  // A 32-bit input can be handled in the same way as a 64-bit input, since
2239  // the sign- or zero-extension will not affect the conversion.
2240  case SCVTF_dx_fixed:
2241  set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
2242  break;
2243  case SCVTF_dw_fixed:
2244  set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
2245  break;
2246  case UCVTF_dx_fixed:
2247  set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
2248  break;
2249  case UCVTF_dw_fixed: {
2250  set_dreg(dst,
2251  UFixedToDouble(reg<uint32_t>(src), fbits, round));
2252  break;
2253  }
2254  case SCVTF_sx_fixed:
2255  set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
2256  break;
2257  case SCVTF_sw_fixed:
2258  set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
2259  break;
2260  case UCVTF_sx_fixed:
2261  set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
2262  break;
2263  case UCVTF_sw_fixed: {
2264  set_sreg(dst,
2265  UFixedToFloat(reg<uint32_t>(src), fbits, round));
2266  break;
2267  }
2268  default: UNREACHABLE();
2269  }
2270 }
2271 
2272 
2273 int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
2274  value = FPRoundInt(value, rmode);
2275  if (value >= kWMaxInt) {
2276  return kWMaxInt;
2277  } else if (value < kWMinInt) {
2278  return kWMinInt;
2279  }
2280  return std::isnan(value) ? 0 : static_cast<int32_t>(value);
2281 }
2282 
2283 
2284 int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
2285  value = FPRoundInt(value, rmode);
2286  if (value >= kXMaxInt) {
2287  return kXMaxInt;
2288  } else if (value < kXMinInt) {
2289  return kXMinInt;
2290  }
2291  return std::isnan(value) ? 0 : static_cast<int64_t>(value);
2292 }
2293 
2294 
2295 uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
2296  value = FPRoundInt(value, rmode);
2297  if (value >= kWMaxUInt) {
2298  return kWMaxUInt;
2299  } else if (value < 0.0) {
2300  return 0;
2301  }
2302  return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
2303 }
2304 
2305 
2306 uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
2307  value = FPRoundInt(value, rmode);
2308  if (value >= kXMaxUInt) {
2309  return kXMaxUInt;
2310  } else if (value < 0.0) {
2311  return 0;
2312  }
2313  return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
2314 }
2315 
2316 
2317 void Simulator::VisitFPCompare(Instruction* instr) {
2318  AssertSupportedFPCR();
2319 
2320  unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
2321  : kSRegSizeInBits;
2322  double fn_val = fpreg(reg_size, instr->Rn());
2323 
2324  switch (instr->Mask(FPCompareMask)) {
2325  case FCMP_s:
2326  case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
2327  case FCMP_s_zero:
2328  case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
2329  default: UNIMPLEMENTED();
2330  }
2331 }
2332 
2333 
2334 void Simulator::VisitFPConditionalCompare(Instruction* instr) {
2335  AssertSupportedFPCR();
2336 
2337  switch (instr->Mask(FPConditionalCompareMask)) {
2338  case FCCMP_s:
2339  case FCCMP_d: {
2340  if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2341  // If the condition passes, set the status flags to the result of
2342  // comparing the operands.
2343  unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
2344  : kSRegSizeInBits;
2345  FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
2346  } else {
2347  // If the condition fails, set the status flags to the nzcv immediate.
2348  nzcv().SetFlags(instr->Nzcv());
2349  }
2350  break;
2351  }
2352  default: UNIMPLEMENTED();
2353  }
2354 }
2355 
2356 
2357 void Simulator::VisitFPConditionalSelect(Instruction* instr) {
2358  AssertSupportedFPCR();
2359 
2360  Instr selected;
2361  if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2362  selected = instr->Rn();
2363  } else {
2364  selected = instr->Rm();
2365  }
2366 
2367  switch (instr->Mask(FPConditionalSelectMask)) {
2368  case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
2369  case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
2370  default: UNIMPLEMENTED();
2371  }
2372 }
2373 
2374 
2375 void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
2376  AssertSupportedFPCR();
2377 
2378  unsigned fd = instr->Rd();
2379  unsigned fn = instr->Rn();
2380 
2381  switch (instr->Mask(FPDataProcessing1SourceMask)) {
2382  case FMOV_s: set_sreg(fd, sreg(fn)); break;
2383  case FMOV_d: set_dreg(fd, dreg(fn)); break;
2384  case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
2385  case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
2386  case FNEG_s: set_sreg(fd, -sreg(fn)); break;
2387  case FNEG_d: set_dreg(fd, -dreg(fn)); break;
2388  case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
2389  case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
2390  case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
2391  case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
2392  case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
2393  case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
2394  case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
2395  case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
2396  case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
2397  case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
2398  default: UNIMPLEMENTED();
2399  }
2400 }
2401 
2402 
2403 // Assemble the specified IEEE-754 components into the target type and apply
2404 // appropriate rounding.
2405 // sign: 0 = positive, 1 = negative
2406 // exponent: Unbiased IEEE-754 exponent.
2407 // mantissa: The mantissa of the input. The top bit (which is not encoded for
2408 // normal IEEE-754 values) must not be omitted. This bit has the
2409 // value 'pow(2, exponent)'.
2410 //
2411 // The input value is assumed to be a normalized value. That is, the input may
2412 // not be infinity or NaN. If the source value is subnormal, it must be
2413 // normalized before calling this function such that the highest set bit in the
2414 // mantissa has the value 'pow(2, exponent)'.
2415 //
2416 // Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
2417 // calling a templated FPRound.
2418 template <class T, int ebits, int mbits>
2419 static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
2420  FPRounding round_mode) {
2421  ASSERT((sign == 0) || (sign == 1));
2422 
2423  // Only the FPTieEven rounding mode is implemented.
2424  ASSERT(round_mode == FPTieEven);
2425  USE(round_mode);
2426 
2427  // Rounding can promote subnormals to normals, and normals to infinities. For
2428  // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
2429  // encodable as a float, but rounding based on the low-order mantissa bits
2430  // could make it overflow. With ties-to-even rounding, this value would become
2431  // an infinity.
2432 
2433  // ---- Rounding Method ----
2434  //
2435  // The exponent is irrelevant in the rounding operation, so we treat the
2436  // lowest-order bit that will fit into the result ('onebit') as having
2437  // the value '1'. Similarly, the highest-order bit that won't fit into
2438  // the result ('halfbit') has the value '0.5'. The 'point' sits between
2439  // 'onebit' and 'halfbit':
2440  //
2441  // These bits fit into the result.
2442  // |---------------------|
2443  // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
2444  // ||
2445  // / |
2446  // / halfbit
2447  // onebit
2448  //
2449  // For subnormal outputs, the range of representable bits is smaller and
2450  // the position of onebit and halfbit depends on the exponent of the
2451  // input, but the method is otherwise similar.
2452  //
2453  // onebit(frac)
2454  // |
2455  // | halfbit(frac) halfbit(adjusted)
2456  // | / /
2457  // | | |
2458  // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
2459  // 0b00.0... -> 0b00.0... -> 0b00
2460  // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
2461  // 0b00.1... -> 0b00.1... -> 0b01
2462  // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
2463  // 0b01.0... -> 0b01.0... -> 0b01
2464  // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
2465  // 0b01.1... -> 0b01.1... -> 0b10
2466  // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
2467  // 0b10.0... -> 0b10.0... -> 0b10
2468  // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
2469  // 0b10.1... -> 0b10.1... -> 0b11
2470  // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
2471  // ... / | / |
2472  // / | / |
2473  // / |
2474  // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
2475  //
2476  // mantissa = (mantissa >> shift) + halfbit(adjusted);
2477 
2478  static const int mantissa_offset = 0;
2479  static const int exponent_offset = mantissa_offset + mbits;
2480  static const int sign_offset = exponent_offset + ebits;
2481  STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
2482 
2483  // Bail out early for zero inputs.
2484  if (mantissa == 0) {
2485  return sign << sign_offset;
2486  }
2487 
2488  // If all bits in the exponent are set, the value is infinite or NaN.
2489  // This is true for all binary IEEE-754 formats.
2490  static const int infinite_exponent = (1 << ebits) - 1;
2491  static const int max_normal_exponent = infinite_exponent - 1;
2492 
2493  // Apply the exponent bias to encode it for the result. Doing this early makes
2494  // it easy to detect values that will be infinite or subnormal.
2495  exponent += max_normal_exponent >> 1;
2496 
2497  if (exponent > max_normal_exponent) {
2498  // Overflow: The input is too large for the result type to represent. The
2499  // FPTieEven rounding mode handles overflows using infinities.
2500  exponent = infinite_exponent;
2501  mantissa = 0;
2502  return (sign << sign_offset) |
2503  (exponent << exponent_offset) |
2504  (mantissa << mantissa_offset);
2505  }
2506 
2507  // Calculate the shift required to move the top mantissa bit to the proper
2508  // place in the destination type.
2509  const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
2510  int shift = highest_significant_bit - mbits;
2511 
2512  if (exponent <= 0) {
2513  // The output will be subnormal (before rounding).
2514 
2515  // For subnormal outputs, the shift must be adjusted by the exponent. The +1
2516  // is necessary because the exponent of a subnormal value (encoded as 0) is
2517  // the same as the exponent of the smallest normal value (encoded as 1).
2518  shift += -exponent + 1;
2519 
2520  // Handle inputs that would produce a zero output.
2521  //
2522  // Shifts higher than highest_significant_bit+1 will always produce a zero
2523  // result. A shift of exactly highest_significant_bit+1 might produce a
2524  // non-zero result after rounding.
2525  if (shift > (highest_significant_bit + 1)) {
2526  // The result will always be +/-0.0.
2527  return sign << sign_offset;
2528  }
2529 
2530  // Properly encode the exponent for a subnormal output.
2531  exponent = 0;
2532  } else {
2533  // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
2534  // normal values.
2535  mantissa &= ~(1UL << highest_significant_bit);
2536  }
2537 
2538  if (shift > 0) {
2539  // We have to shift the mantissa to the right. Some precision is lost, so we
2540  // need to apply rounding.
2541  uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
2542  uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
2543  uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
2544  T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
2545 
2546  T result = (sign << sign_offset) |
2547  (exponent << exponent_offset) |
2548  ((mantissa >> shift) << mantissa_offset);
2549 
2550  // A very large mantissa can overflow during rounding. If this happens, the
2551  // exponent should be incremented and the mantissa set to 1.0 (encoded as
2552  // 0). Applying halfbit_adjusted after assembling the float has the nice
2553  // side-effect that this case is handled for free.
2554  //
2555  // This also handles cases where a very large finite value overflows to
2556  // infinity, or where a very large subnormal value overflows to become
2557  // normal.
2558  return result + halfbit_adjusted;
2559  } else {
2560  // We have to shift the mantissa to the left (or not at all). The input
2561  // mantissa is exactly representable in the output mantissa, so apply no
2562  // rounding correction.
2563  return (sign << sign_offset) |
2564  (exponent << exponent_offset) |
2565  ((mantissa << -shift) << mantissa_offset);
2566  }
2567 }
2568 
2569 
2570 // See FPRound for a description of this function.
2571 static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
2572  uint64_t mantissa, FPRounding round_mode) {
2573  int64_t bits =
2574  FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
2575  exponent,
2576  mantissa,
2577  round_mode);
2578  return rawbits_to_double(bits);
2579 }
2580 
2581 
2582 // See FPRound for a description of this function.
2583 static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
2584  uint64_t mantissa, FPRounding round_mode) {
2585  int32_t bits =
2586  FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
2587  exponent,
2588  mantissa,
2589  round_mode);
2590  return rawbits_to_float(bits);
2591 }
2592 
2593 
2594 double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
2595  if (src >= 0) {
2596  return UFixedToDouble(src, fbits, round);
2597  } else {
2598  // This works for all negative values, including INT64_MIN.
2599  return -UFixedToDouble(-src, fbits, round);
2600  }
2601 }
2602 
2603 
2604 double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
2605  // An input of 0 is a special case because the result is effectively
2606  // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
2607  if (src == 0) {
2608  return 0.0;
2609  }
2610 
2611  // Calculate the exponent. The highest significant bit will have the value
2612  // 2^exponent.
2613  const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
2614  const int64_t exponent = highest_significant_bit - fbits;
2615 
2616  return FPRoundToDouble(0, exponent, src, round);
2617 }
2618 
2619 
2620 float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
2621  if (src >= 0) {
2622  return UFixedToFloat(src, fbits, round);
2623  } else {
2624  // This works for all negative values, including INT64_MIN.
2625  return -UFixedToFloat(-src, fbits, round);
2626  }
2627 }
2628 
2629 
2630 float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
2631  // An input of 0 is a special case because the result is effectively
2632  // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
2633  if (src == 0) {
2634  return 0.0f;
2635  }
2636 
2637  // Calculate the exponent. The highest significant bit will have the value
2638  // 2^exponent.
2639  const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
2640  const int32_t exponent = highest_significant_bit - fbits;
2641 
2642  return FPRoundToFloat(0, exponent, src, round);
2643 }
2644 
2645 
2646 double Simulator::FPRoundInt(double value, FPRounding round_mode) {
2647  if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
2648  (value == kFP64NegativeInfinity)) {
2649  return value;
2650  } else if (std::isnan(value)) {
2651  return FPProcessNaN(value);
2652  }
2653 
2654  double int_result = floor(value);
2655  double error = value - int_result;
2656  switch (round_mode) {
2657  case FPTieAway: {
2658  // If the error is greater than 0.5, or is equal to 0.5 and the integer
2659  // result is positive, round up.
2660  if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
2661  int_result++;
2662  }
2663  break;
2664  }
2665  case FPTieEven: {
2666  // If the error is greater than 0.5, or is equal to 0.5 and the integer
2667  // result is odd, round up.
2668  if ((error > 0.5) ||
2669  ((error == 0.5) && (fmod(int_result, 2) != 0))) {
2670  int_result++;
2671  }
2672  break;
2673  }
2674  case FPZero: {
2675  // If value > 0 then we take floor(value)
2676  // otherwise, ceil(value)
2677  if (value < 0) {
2678  int_result = ceil(value);
2679  }
2680  break;
2681  }
2682  case FPNegativeInfinity: {
2683  // We always use floor(value).
2684  break;
2685  }
2686  default: UNIMPLEMENTED();
2687  }
2688  return int_result;
2689 }
2690 
2691 
2692 double Simulator::FPToDouble(float value) {
2693  switch (std::fpclassify(value)) {
2694  case FP_NAN: {
2695  if (fpcr().DN()) return kFP64DefaultNaN;
2696 
2697  // Convert NaNs as the processor would:
2698  // - The sign is propagated.
2699  // - The payload (mantissa) is transferred entirely, except that the top
2700  // bit is forced to '1', making the result a quiet NaN. The unused
2701  // (low-order) payload bits are set to 0.
2702  uint32_t raw = float_to_rawbits(value);
2703 
2704  uint64_t sign = raw >> 31;
2705  uint64_t exponent = (1 << 11) - 1;
2706  uint64_t payload = unsigned_bitextract_64(21, 0, raw);
2707  payload <<= (52 - 23); // The unused low-order bits should be 0.
2708  payload |= (1L << 51); // Force a quiet NaN.
2709 
2710  return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
2711  }
2712 
2713  case FP_ZERO:
2714  case FP_NORMAL:
2715  case FP_SUBNORMAL:
2716  case FP_INFINITE: {
2717  // All other inputs are preserved in a standard cast, because every value
2718  // representable using an IEEE-754 float is also representable using an
2719  // IEEE-754 double.
2720  return static_cast<double>(value);
2721  }
2722  }
2723 
2724  UNREACHABLE();
2725  return static_cast<double>(value);
2726 }
2727 
2728 
2729 float Simulator::FPToFloat(double value, FPRounding round_mode) {
2730  // Only the FPTieEven rounding mode is implemented.
2731  ASSERT(round_mode == FPTieEven);
2732  USE(round_mode);
2733 
2734  switch (std::fpclassify(value)) {
2735  case FP_NAN: {
2736  if (fpcr().DN()) return kFP32DefaultNaN;
2737 
2738  // Convert NaNs as the processor would:
2739  // - The sign is propagated.
2740  // - The payload (mantissa) is transferred as much as possible, except
2741  // that the top bit is forced to '1', making the result a quiet NaN.
2742  uint64_t raw = double_to_rawbits(value);
2743 
2744  uint32_t sign = raw >> 63;
2745  uint32_t exponent = (1 << 8) - 1;
2746  uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
2747  payload |= (1 << 22); // Force a quiet NaN.
2748 
2749  return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
2750  }
2751 
2752  case FP_ZERO:
2753  case FP_INFINITE: {
2754  // In a C++ cast, any value representable in the target type will be
2755  // unchanged. This is always the case for +/-0.0 and infinities.
2756  return static_cast<float>(value);
2757  }
2758 
2759  case FP_NORMAL:
2760  case FP_SUBNORMAL: {
2761  // Convert double-to-float as the processor would, assuming that FPCR.FZ
2762  // (flush-to-zero) is not set.
2763  uint64_t raw = double_to_rawbits(value);
2764  // Extract the IEEE-754 double components.
2765  uint32_t sign = raw >> 63;
2766  // Extract the exponent and remove the IEEE-754 encoding bias.
2767  int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
2768  // Extract the mantissa and add the implicit '1' bit.
2769  uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
2770  if (std::fpclassify(value) == FP_NORMAL) {
2771  mantissa |= (1UL << 52);
2772  }
2773  return FPRoundToFloat(sign, exponent, mantissa, round_mode);
2774  }
2775  }
2776 
2777  UNREACHABLE();
2778  return value;
2779 }
2780 
2781 
2782 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
2783  AssertSupportedFPCR();
2784 
2785  unsigned fd = instr->Rd();
2786  unsigned fn = instr->Rn();
2787  unsigned fm = instr->Rm();
2788 
2789  // Fmaxnm and Fminnm have special NaN handling.
2790  switch (instr->Mask(FPDataProcessing2SourceMask)) {
2791  case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
2792  case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
2793  case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
2794  case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
2795  default:
2796  break; // Fall through.
2797  }
2798 
2799  if (FPProcessNaNs(instr)) return;
2800 
2801  switch (instr->Mask(FPDataProcessing2SourceMask)) {
2802  case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
2803  case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
2804  case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
2805  case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
2806  case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
2807  case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
2808  case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
2809  case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
2810  case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
2811  case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
2812  case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
2813  case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
2814  case FMAXNM_s:
2815  case FMAXNM_d:
2816  case FMINNM_s:
2817  case FMINNM_d:
2818  // These were handled before the standard FPProcessNaNs() stage.
2819  UNREACHABLE();
2820  default: UNIMPLEMENTED();
2821  }
2822 }
2823 
2824 
2825 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
2826  AssertSupportedFPCR();
2827 
2828  unsigned fd = instr->Rd();
2829  unsigned fn = instr->Rn();
2830  unsigned fm = instr->Rm();
2831  unsigned fa = instr->Ra();
2832 
2833  switch (instr->Mask(FPDataProcessing3SourceMask)) {
2834  // fd = fa +/- (fn * fm)
2835  case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
2836  case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
2837  case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
2838  case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
2839  // Negated variants of the above.
2840  case FNMADD_s:
2841  set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
2842  break;
2843  case FNMSUB_s:
2844  set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
2845  break;
2846  case FNMADD_d:
2847  set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
2848  break;
2849  case FNMSUB_d:
2850  set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
2851  break;
2852  default: UNIMPLEMENTED();
2853  }
2854 }
2855 
2856 
2857 template <typename T>
2858 T Simulator::FPAdd(T op1, T op2) {
2859  // NaNs should be handled elsewhere.
2860  ASSERT(!std::isnan(op1) && !std::isnan(op2));
2861 
2862  if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
2863  // inf + -inf returns the default NaN.
2864  return FPDefaultNaN<T>();
2865  } else {
2866  // Other cases should be handled by standard arithmetic.
2867  return op1 + op2;
2868  }
2869 }
2870 
2871 
2872 template <typename T>
2873 T Simulator::FPDiv(T op1, T op2) {
2874  // NaNs should be handled elsewhere.
2875  ASSERT(!std::isnan(op1) && !std::isnan(op2));
2876 
2877  if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
2878  // inf / inf and 0.0 / 0.0 return the default NaN.
2879  return FPDefaultNaN<T>();
2880  } else {
2881  // Other cases should be handled by standard arithmetic.
2882  return op1 / op2;
2883  }
2884 }
2885 
2886 
2887 template <typename T>
2888 T Simulator::FPMax(T a, T b) {
2889  // NaNs should be handled elsewhere.
2890  ASSERT(!std::isnan(a) && !std::isnan(b));
2891 
2892  if ((a == 0.0) && (b == 0.0) &&
2893  (copysign(1.0, a) != copysign(1.0, b))) {
2894  // a and b are zero, and the sign differs: return +0.0.
2895  return 0.0;
2896  } else {
2897  return (a > b) ? a : b;
2898  }
2899 }
2900 
2901 
2902 template <typename T>
2903 T Simulator::FPMaxNM(T a, T b) {
2904  if (IsQuietNaN(a) && !IsQuietNaN(b)) {
2905  a = kFP64NegativeInfinity;
2906  } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
2907  b = kFP64NegativeInfinity;
2908  }
2909 
2910  T result = FPProcessNaNs(a, b);
2911  return std::isnan(result) ? result : FPMax(a, b);
2912 }
2913 
2914 template <typename T>
2915 T Simulator::FPMin(T a, T b) {
2916  // NaNs should be handled elsewhere.
2917  ASSERT(!isnan(a) && !isnan(b));
2918 
2919  if ((a == 0.0) && (b == 0.0) &&
2920  (copysign(1.0, a) != copysign(1.0, b))) {
2921  // a and b are zero, and the sign differs: return -0.0.
2922  return -0.0;
2923  } else {
2924  return (a < b) ? a : b;
2925  }
2926 }
2927 
2928 
2929 template <typename T>
2930 T Simulator::FPMinNM(T a, T b) {
2931  if (IsQuietNaN(a) && !IsQuietNaN(b)) {
2932  a = kFP64PositiveInfinity;
2933  } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
2934  b = kFP64PositiveInfinity;
2935  }
2936 
2937  T result = FPProcessNaNs(a, b);
2938  return std::isnan(result) ? result : FPMin(a, b);
2939 }
2940 
2941 
2942 template <typename T>
2943 T Simulator::FPMul(T op1, T op2) {
2944  // NaNs should be handled elsewhere.
2945  ASSERT(!std::isnan(op1) && !std::isnan(op2));
2946 
2947  if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
2948  // inf * 0.0 returns the default NaN.
2949  return FPDefaultNaN<T>();
2950  } else {
2951  // Other cases should be handled by standard arithmetic.
2952  return op1 * op2;
2953  }
2954 }
2955 
2956 
2957 template<typename T>
2958 T Simulator::FPMulAdd(T a, T op1, T op2) {
2959  T result = FPProcessNaNs3(a, op1, op2);
2960 
2961  T sign_a = copysign(1.0, a);
2962  T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
2963  bool isinf_prod = std::isinf(op1) || std::isinf(op2);
2964  bool operation_generates_nan =
2965  (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
2966  (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
2967  (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
2968 
2969  if (std::isnan(result)) {
2970  // Generated NaNs override quiet NaNs propagated from a.
2971  if (operation_generates_nan && IsQuietNaN(a)) {
2972  return FPDefaultNaN<T>();
2973  } else {
2974  return result;
2975  }
2976  }
2977 
2978  // If the operation would produce a NaN, return the default NaN.
2979  if (operation_generates_nan) {
2980  return FPDefaultNaN<T>();
2981  }
2982 
2983  // Work around broken fma implementations for exact zero results: The sign of
2984  // exact 0.0 results is positive unless both a and op1 * op2 are negative.
2985  if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
2986  return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
2987  }
2988 
2989  result = FusedMultiplyAdd(op1, op2, a);
2990  ASSERT(!std::isnan(result));
2991 
2992  // Work around broken fma implementations for rounded zero results: If a is
2993  // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
2994  if ((a == 0.0) && (result == 0.0)) {
2995  return copysign(0.0, sign_prod);
2996  }
2997 
2998  return result;
2999 }
3000 
3001 
3002 template <typename T>
3003 T Simulator::FPSqrt(T op) {
3004  if (std::isnan(op)) {
3005  return FPProcessNaN(op);
3006  } else if (op < 0.0) {
3007  return FPDefaultNaN<T>();
3008  } else {
3009  return std::sqrt(op);
3010  }
3011 }
3012 
3013 
3014 template <typename T>
3015 T Simulator::FPSub(T op1, T op2) {
3016  // NaNs should be handled elsewhere.
3017  ASSERT(!std::isnan(op1) && !std::isnan(op2));
3018 
3019  if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
3020  // inf - inf returns the default NaN.
3021  return FPDefaultNaN<T>();
3022  } else {
3023  // Other cases should be handled by standard arithmetic.
3024  return op1 - op2;
3025  }
3026 }
3027 
3028 
3029 template <typename T>
3030 T Simulator::FPProcessNaN(T op) {
3031  ASSERT(std::isnan(op));
3032  return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
3033 }
3034 
3035 
3036 template <typename T>
3037 T Simulator::FPProcessNaNs(T op1, T op2) {
3038  if (IsSignallingNaN(op1)) {
3039  return FPProcessNaN(op1);
3040  } else if (IsSignallingNaN(op2)) {
3041  return FPProcessNaN(op2);
3042  } else if (std::isnan(op1)) {
3043  ASSERT(IsQuietNaN(op1));
3044  return FPProcessNaN(op1);
3045  } else if (std::isnan(op2)) {
3046  ASSERT(IsQuietNaN(op2));
3047  return FPProcessNaN(op2);
3048  } else {
3049  return 0.0;
3050  }
3051 }
3052 
3053 
3054 template <typename T>
3055 T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
3056  if (IsSignallingNaN(op1)) {
3057  return FPProcessNaN(op1);
3058  } else if (IsSignallingNaN(op2)) {
3059  return FPProcessNaN(op2);
3060  } else if (IsSignallingNaN(op3)) {
3061  return FPProcessNaN(op3);
3062  } else if (std::isnan(op1)) {
3063  ASSERT(IsQuietNaN(op1));
3064  return FPProcessNaN(op1);
3065  } else if (std::isnan(op2)) {
3066  ASSERT(IsQuietNaN(op2));
3067  return FPProcessNaN(op2);
3068  } else if (std::isnan(op3)) {
3069  ASSERT(IsQuietNaN(op3));
3070  return FPProcessNaN(op3);
3071  } else {
3072  return 0.0;
3073  }
3074 }
3075 
3076 
3077 bool Simulator::FPProcessNaNs(Instruction* instr) {
3078  unsigned fd = instr->Rd();
3079  unsigned fn = instr->Rn();
3080  unsigned fm = instr->Rm();
3081  bool done = false;
3082 
3083  if (instr->Mask(FP64) == FP64) {
3084  double result = FPProcessNaNs(dreg(fn), dreg(fm));
3085  if (std::isnan(result)) {
3086  set_dreg(fd, result);
3087  done = true;
3088  }
3089  } else {
3090  float result = FPProcessNaNs(sreg(fn), sreg(fm));
3091  if (std::isnan(result)) {
3092  set_sreg(fd, result);
3093  done = true;
3094  }
3095  }
3096 
3097  return done;
3098 }
3099 
3100 
3101 void Simulator::VisitSystem(Instruction* instr) {
3102  // Some system instructions hijack their Op and Cp fields to represent a
3103  // range of immediates instead of indicating a different instruction. This
3104  // makes the decoding tricky.
3105  if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
3106  switch (instr->Mask(SystemSysRegMask)) {
3107  case MRS: {
3108  switch (instr->ImmSystemRegister()) {
3109  case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
3110  case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
3111  default: UNIMPLEMENTED();
3112  }
3113  break;
3114  }
3115  case MSR: {
3116  switch (instr->ImmSystemRegister()) {
3117  case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
3118  case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
3119  default: UNIMPLEMENTED();
3120  }
3121  break;
3122  }
3123  }
3124  } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
3125  ASSERT(instr->Mask(SystemHintMask) == HINT);
3126  switch (instr->ImmHint()) {
3127  case NOP: break;
3128  default: UNIMPLEMENTED();
3129  }
3130  } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
3131  __sync_synchronize();
3132  } else {
3133  UNIMPLEMENTED();
3134  }
3135 }
3136 
3137 
3138 bool Simulator::GetValue(const char* desc, int64_t* value) {
3139  int regnum = CodeFromName(desc);
3140  if (regnum >= 0) {
3141  unsigned code = regnum;
3142  if (code == kZeroRegCode) {
3143  // Catch the zero register and return 0.
3144  *value = 0;
3145  return true;
3146  } else if (code == kSPRegInternalCode) {
3147  // Translate the stack pointer code to 31, for Reg31IsStackPointer.
3148  code = 31;
3149  }
3150  if (desc[0] == 'w') {
3151  *value = wreg(code, Reg31IsStackPointer);
3152  } else {
3153  *value = xreg(code, Reg31IsStackPointer);
3154  }
3155  return true;
3156  } else if (strncmp(desc, "0x", 2) == 0) {
3157  return SScanF(desc + 2, "%" SCNx64,
3158  reinterpret_cast<uint64_t*>(value)) == 1;
3159  } else {
3160  return SScanF(desc, "%" SCNu64,
3161  reinterpret_cast<uint64_t*>(value)) == 1;
3162  }
3163 }
3164 
3165 
3166 bool Simulator::PrintValue(const char* desc) {
3167  if (strcmp(desc, "csp") == 0) {
3168  ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3169  PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
3170  clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
3171  return true;
3172  } else if (strcmp(desc, "wcsp") == 0) {
3173  ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3174  PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
3175  clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
3176  return true;
3177  }
3178 
3179  int i = CodeFromName(desc);
3180  STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
3181  if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
3182 
3183  if (desc[0] == 'v') {
3184  PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
3185  clr_fpreg_name, VRegNameForCode(i),
3186  clr_fpreg_value, double_to_rawbits(dreg(i)),
3187  clr_normal,
3188  clr_fpreg_name, DRegNameForCode(i),
3189  clr_fpreg_value, dreg(i),
3190  clr_fpreg_name, SRegNameForCode(i),
3191  clr_fpreg_value, sreg(i),
3192  clr_normal);
3193  return true;
3194  } else if (desc[0] == 'd') {
3195  PrintF("%s %s:%s %g%s\n",
3196  clr_fpreg_name, DRegNameForCode(i),
3197  clr_fpreg_value, dreg(i),
3198  clr_normal);
3199  return true;
3200  } else if (desc[0] == 's') {
3201  PrintF("%s %s:%s %g%s\n",
3202  clr_fpreg_name, SRegNameForCode(i),
3203  clr_fpreg_value, sreg(i),
3204  clr_normal);
3205  return true;
3206  } else if (desc[0] == 'w') {
3207  PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
3208  clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
3209  return true;
3210  } else {
3211  // X register names have a wide variety of starting characters, but anything
3212  // else will be an X register.
3213  PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
3214  clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
3215  return true;
3216  }
3217 }
3218 
3219 
3220 void Simulator::Debug() {
3221 #define COMMAND_SIZE 63
3222 #define ARG_SIZE 255
3223 
3224 #define STR(a) #a
3225 #define XSTR(a) STR(a)
3226 
3227  char cmd[COMMAND_SIZE + 1];
3228  char arg1[ARG_SIZE + 1];
3229  char arg2[ARG_SIZE + 1];
3230  char* argv[3] = { cmd, arg1, arg2 };
3231 
3232  // Make sure to have a proper terminating character if reaching the limit.
3233  cmd[COMMAND_SIZE] = 0;
3234  arg1[ARG_SIZE] = 0;
3235  arg2[ARG_SIZE] = 0;
3236 
3237  bool done = false;
3238  bool cleared_log_disasm_bit = false;
3239 
3240  while (!done) {
3241  // Disassemble the next instruction to execute before doing anything else.
3242  PrintInstructionsAt(pc_, 1);
3243  // Read the command line.
3244  char* line = ReadLine("sim> ");
3245  if (line == NULL) {
3246  break;
3247  } else {
3248  // Repeat last command by default.
3249  char* last_input = last_debugger_input();
3250  if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
3251  DeleteArray(line);
3252  line = last_input;
3253  } else {
3254  // Update the latest command ran
3255  set_last_debugger_input(line);
3256  }
3257 
3258  // Use sscanf to parse the individual parts of the command line. At the
3259  // moment no command expects more than two parameters.
3260  int argc = SScanF(line,
3261  "%" XSTR(COMMAND_SIZE) "s "
3262  "%" XSTR(ARG_SIZE) "s "
3263  "%" XSTR(ARG_SIZE) "s",
3264  cmd, arg1, arg2);
3265 
3266  // stepi / si ------------------------------------------------------------
3267  if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
3268  // We are about to execute instructions, after which by default we
3269  // should increment the pc_. If it was set when reaching this debug
3270  // instruction, it has not been cleared because this instruction has not
3271  // completed yet. So clear it manually.
3272  pc_modified_ = false;
3273 
3274  if (argc == 1) {
3275  ExecuteInstruction();
3276  } else {
3277  int64_t number_of_instructions_to_execute = 1;
3278  GetValue(arg1, &number_of_instructions_to_execute);
3279 
3280  set_log_parameters(log_parameters() | LOG_DISASM);
3281  while (number_of_instructions_to_execute-- > 0) {
3282  ExecuteInstruction();
3283  }
3284  set_log_parameters(log_parameters() & ~LOG_DISASM);
3285  PrintF("\n");
3286  }
3287 
3288  // If it was necessary, the pc has already been updated or incremented
3289  // when executing the instruction. So we do not want it to be updated
3290  // again. It will be cleared when exiting.
3291  pc_modified_ = true;
3292 
3293  // next / n --------------------------------------------------------------
3294  } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
3295  // Tell the simulator to break after the next executed BL.
3296  break_on_next_ = true;
3297  // Continue.
3298  done = true;
3299 
3300  // continue / cont / c ---------------------------------------------------
3301  } else if ((strcmp(cmd, "continue") == 0) ||
3302  (strcmp(cmd, "cont") == 0) ||
3303  (strcmp(cmd, "c") == 0)) {
3304  // Leave the debugger shell.
3305  done = true;
3306 
3307  // disassemble / disasm / di ---------------------------------------------
3308  } else if (strcmp(cmd, "disassemble") == 0 ||
3309  strcmp(cmd, "disasm") == 0 ||
3310  strcmp(cmd, "di") == 0) {
3311  int64_t n_of_instrs_to_disasm = 10; // default value.
3312  int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
3313  if (argc >= 2) { // disasm <n of instrs>
3314  GetValue(arg1, &n_of_instrs_to_disasm);
3315  }
3316  if (argc >= 3) { // disasm <n of instrs> <address>
3317  GetValue(arg2, &address);
3318  }
3319 
3320  // Disassemble.
3321  PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
3322  n_of_instrs_to_disasm);
3323  PrintF("\n");
3324 
3325  // print / p -------------------------------------------------------------
3326  } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
3327  if (argc == 2) {
3328  if (strcmp(arg1, "all") == 0) {
3329  PrintRegisters(true);
3330  PrintFPRegisters(true);
3331  } else {
3332  if (!PrintValue(arg1)) {
3333  PrintF("%s unrecognized\n", arg1);
3334  }
3335  }
3336  } else {
3337  PrintF(
3338  "print <register>\n"
3339  " Print the content of a register. (alias 'p')\n"
3340  " 'print all' will print all registers.\n"
3341  " Use 'printobject' to get more details about the value.\n");
3342  }
3343 
3344  // printobject / po ------------------------------------------------------
3345  } else if ((strcmp(cmd, "printobject") == 0) ||
3346  (strcmp(cmd, "po") == 0)) {
3347  if (argc == 2) {
3348  int64_t value;
3349  if (GetValue(arg1, &value)) {
3350  Object* obj = reinterpret_cast<Object*>(value);
3351  PrintF("%s: \n", arg1);
3352 #ifdef DEBUG
3353  obj->PrintLn();
3354 #else
3355  obj->ShortPrint();
3356  PrintF("\n");
3357 #endif
3358  } else {
3359  PrintF("%s unrecognized\n", arg1);
3360  }
3361  } else {
3362  PrintF("printobject <value>\n"
3363  "printobject <register>\n"
3364  " Print details about the value. (alias 'po')\n");
3365  }
3366 
3367  // stack / mem ----------------------------------------------------------
3368  } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
3369  int64_t* cur = NULL;
3370  int64_t* end = NULL;
3371  int next_arg = 1;
3372 
3373  if (strcmp(cmd, "stack") == 0) {
3374  cur = reinterpret_cast<int64_t*>(jssp());
3375 
3376  } else { // "mem"
3377  int64_t value;
3378  if (!GetValue(arg1, &value)) {
3379  PrintF("%s unrecognized\n", arg1);
3380  continue;
3381  }
3382  cur = reinterpret_cast<int64_t*>(value);
3383  next_arg++;
3384  }
3385 
3386  int64_t words = 0;
3387  if (argc == next_arg) {
3388  words = 10;
3389  } else if (argc == next_arg + 1) {
3390  if (!GetValue(argv[next_arg], &words)) {
3391  PrintF("%s unrecognized\n", argv[next_arg]);
3392  PrintF("Printing 10 double words by default");
3393  words = 10;
3394  }
3395  } else {
3396  UNREACHABLE();
3397  }
3398  end = cur + words;
3399 
3400  while (cur < end) {
3401  PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
3402  reinterpret_cast<uint64_t>(cur), *cur, *cur);
3403  HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
3404  int64_t value = *cur;
3405  Heap* current_heap = v8::internal::Isolate::Current()->heap();
3406  if (((value & 1) == 0) || current_heap->Contains(obj)) {
3407  PrintF(" (");
3408  if ((value & kSmiTagMask) == 0) {
3410  int32_t untagged = (value >> kSmiShift) & 0xffffffff;
3411  PrintF("smi %" PRId32, untagged);
3412  } else {
3413  obj->ShortPrint();
3414  }
3415  PrintF(")");
3416  }
3417  PrintF("\n");
3418  cur++;
3419  }
3420 
3421  // trace / t -------------------------------------------------------------
3422  } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
3423  if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
3424  (LOG_DISASM | LOG_REGS)) {
3425  PrintF("Enabling disassembly and registers tracing\n");
3426  set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
3427  } else {
3428  PrintF("Disabling disassembly and registers tracing\n");
3429  set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
3430  }
3431 
3432  // break / b -------------------------------------------------------------
3433  } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
3434  if (argc == 2) {
3435  int64_t value;
3436  if (GetValue(arg1, &value)) {
3437  SetBreakpoint(reinterpret_cast<Instruction*>(value));
3438  } else {
3439  PrintF("%s unrecognized\n", arg1);
3440  }
3441  } else {
3442  ListBreakpoints();
3443  PrintF("Use `break <address>` to set or disable a breakpoint\n");
3444  }
3445 
3446  // gdb -------------------------------------------------------------------
3447  } else if (strcmp(cmd, "gdb") == 0) {
3448  PrintF("Relinquishing control to gdb.\n");
3449  OS::DebugBreak();
3450  PrintF("Regaining control from gdb.\n");
3451 
3452  // sysregs ---------------------------------------------------------------
3453  } else if (strcmp(cmd, "sysregs") == 0) {
3454  PrintSystemRegisters();
3455 
3456  // help / h --------------------------------------------------------------
3457  } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
3458  PrintF(
3459  "stepi / si\n"
3460  " stepi <n>\n"
3461  " Step <n> instructions.\n"
3462  "next / n\n"
3463  " Continue execution until a BL instruction is reached.\n"
3464  " At this point a breakpoint is set just after this BL.\n"
3465  " Then execution is resumed. It will probably later hit the\n"
3466  " breakpoint just set.\n"
3467  "continue / cont / c\n"
3468  " Continue execution from here.\n"
3469  "disassemble / disasm / di\n"
3470  " disassemble <n> <address>\n"
3471  " Disassemble <n> instructions from current <address>.\n"
3472  " By default <n> is 20 and <address> is the current pc.\n"
3473  "print / p\n"
3474  " print <register>\n"
3475  " Print the content of a register.\n"
3476  " 'print all' will print all registers.\n"
3477  " Use 'printobject' to get more details about the value.\n"
3478  "printobject / po\n"
3479  " printobject <value>\n"
3480  " printobject <register>\n"
3481  " Print details about the value.\n"
3482  "stack\n"
3483  " stack [<words>]\n"
3484  " Dump stack content, default dump 10 words\n"
3485  "mem\n"
3486  " mem <address> [<words>]\n"
3487  " Dump memory content, default dump 10 words\n"
3488  "trace / t\n"
3489  " Toggle disassembly and register tracing\n"
3490  "break / b\n"
3491  " break : list all breakpoints\n"
3492  " break <address> : set / enable / disable a breakpoint.\n"
3493  "gdb\n"
3494  " Enter gdb.\n"
3495  "sysregs\n"
3496  " Print all system registers (including NZCV).\n");
3497  } else {
3498  PrintF("Unknown command: %s\n", cmd);
3499  PrintF("Use 'help' for more information.\n");
3500  }
3501  }
3502  if (cleared_log_disasm_bit == true) {
3503  set_log_parameters(log_parameters_ | LOG_DISASM);
3504  }
3505  }
3506 }
3507 
3508 
3509 void Simulator::VisitException(Instruction* instr) {
3510  switch (instr->Mask(ExceptionMask)) {
3511  case HLT: {
3512  if (instr->ImmException() == kImmExceptionIsDebug) {
3513  // Read the arguments encoded inline in the instruction stream.
3514  uint32_t code;
3515  uint32_t parameters;
3516 
3517  memcpy(&code,
3518  pc_->InstructionAtOffset(kDebugCodeOffset),
3519  sizeof(code));
3520  memcpy(&parameters,
3521  pc_->InstructionAtOffset(kDebugParamsOffset),
3522  sizeof(parameters));
3523  char const *message =
3524  reinterpret_cast<char const*>(
3525  pc_->InstructionAtOffset(kDebugMessageOffset));
3526 
3527  // Always print something when we hit a debug point that breaks.
3528  // We are going to break, so printing something is not an issue in
3529  // terms of speed.
3530  if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
3531  if (message != NULL) {
3532  PrintF("%sDebugger hit %d: %s%s%s\n",
3533  clr_debug_number,
3534  code,
3535  clr_debug_message,
3536  message,
3537  clr_normal);
3538  } else {
3539  PrintF("%sDebugger hit %d.%s\n",
3540  clr_debug_number,
3541  code,
3542  clr_normal);
3543  }
3544  }
3545 
3546  // Other options.
3547  switch (parameters & kDebuggerTracingDirectivesMask) {
3548  case TRACE_ENABLE:
3549  set_log_parameters(log_parameters() | parameters);
3550  if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
3551  if (parameters & LOG_REGS) { PrintRegisters(); }
3552  if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
3553  break;
3554  case TRACE_DISABLE:
3555  set_log_parameters(log_parameters() & ~parameters);
3556  break;
3557  case TRACE_OVERRIDE:
3558  set_log_parameters(parameters);
3559  break;
3560  default:
3561  // We don't support a one-shot LOG_DISASM.
3562  ASSERT((parameters & LOG_DISASM) == 0);
3563  // Don't print information that is already being traced.
3564  parameters &= ~log_parameters();
3565  // Print the requested information.
3566  if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
3567  if (parameters & LOG_REGS) PrintRegisters(true);
3568  if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
3569  }
3570 
3571  // The stop parameters are inlined in the code. Skip them:
3572  // - Skip to the end of the message string.
3573  size_t size = kDebugMessageOffset + strlen(message) + 1;
3574  pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
3575  // - Verify that the unreachable marker is present.
3576  ASSERT(pc_->Mask(ExceptionMask) == HLT);
3577  ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
3578  // - Skip past the unreachable marker.
3579  set_pc(pc_->following());
3580 
3581  // Check if the debugger should break.
3582  if (parameters & BREAK) Debug();
3583 
3584  } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
3585  DoRuntimeCall(instr);
3586  } else if (instr->ImmException() == kImmExceptionIsPrintf) {
3587  // Read the argument encoded inline in the instruction stream.
3588  uint32_t type;
3589  memcpy(&type,
3590  pc_->InstructionAtOffset(kPrintfTypeOffset),
3591  sizeof(type));
3592 
3593  const char* format = reg<const char*>(0);
3594 
3595  // Pass all of the relevant PCS registers onto printf. It doesn't
3596  // matter if we pass too many as the extra ones won't be read.
3597  int result;
3598  fputs(clr_printf, stream_);
3599  if (type == CPURegister::kRegister) {
3600  result = fprintf(stream_, format,
3601  xreg(1), xreg(2), xreg(3), xreg(4),
3602  xreg(5), xreg(6), xreg(7));
3603  } else if (type == CPURegister::kFPRegister) {
3604  result = fprintf(stream_, format,
3605  dreg(0), dreg(1), dreg(2), dreg(3),
3606  dreg(4), dreg(5), dreg(6), dreg(7));
3607  } else {
3609  result = fprintf(stream_, "%s", format);
3610  }
3611  fputs(clr_normal, stream_);
3612 
3613 #ifdef DEBUG
3614  CorruptAllCallerSavedCPURegisters();
3615 #endif
3616 
3617  set_xreg(0, result);
3618 
3619  // The printf parameters are inlined in the code, so skip them.
3620  set_pc(pc_->InstructionAtOffset(kPrintfLength));
3621 
3622  // Set LR as if we'd just called a native printf function.
3623  set_lr(pc());
3624 
3625  } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
3626  fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
3627  reinterpret_cast<void*>(pc_));
3628  abort();
3629 
3630  } else {
3631  OS::DebugBreak();
3632  }
3633  break;
3634  }
3635 
3636  default:
3637  UNIMPLEMENTED();
3638  }
3639 }
3640 
3641 #endif // USE_SIMULATOR
3642 
3643 } } // namespace v8::internal
3644 
3645 #endif // V8_TARGET_ARCH_ARM64
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
int fpclassify(double x)
const intptr_t kSmiTagMask
Definition: v8.h:5480
#define CHECK_EQ(expected, value)
Definition: checks.h:252
bool IsQuietNaN(T num)
Definition: utils-arm64.h:105
const RegList kCallerSaved
Definition: frames-arm.h:75
const unsigned kDebugMessageOffset
const Instr kImmExceptionIsRedirectedCall
void PrintF(const char *format,...)
Definition: v8utils.cc:40
const unsigned kWRegSize
#define FATAL(msg)
Definition: checks.h:48
const LowDwVfpRegister d0
const unsigned kDRegSizeInBits
const int KB
Definition: globals.h:245
const int64_t kWRegMask
const unsigned kZeroRegCode
int CountLeadingZeros(uint64_t value, int width)
const int kSmiValueSize
Definition: v8.h:5540
const unsigned kXRegSizeInBits
#define kCallerSavedFP
const int64_t kXMaxInt
int CountLeadingSignBits(int64_t value, int width)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
TypeImpl< ZoneTypeConfig > Type
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
Definition: flags.cc:665
int int32_t
Definition: unicode.cc:47
#define ASSERT(condition)
Definition: checks.h:329
const int64_t kWordMask
const uint64_t kWMaxUInt
unsigned short uint16_t
Definition: unicode.cc:46
const unsigned kDebuggerTracingDirectivesMask
#define CHECK(condition)
Definition: checks.h:75
char * ReadLine(const char *prompt)
Definition: v8utils.cc:70
const int kNumberOfCalleeSavedRegisters
const Instr kImmExceptionIsPrintf
bool is_intn(int64_t x, unsigned n)
Definition: utils.h:1102
int isnan(double x)
uint8_t byte
Definition: globals.h:185
const unsigned kWRegSizeInBits
uint64_t ObjectPair
Definition: runtime.cc:9288
const Register sp
const uint32_t kSlotsZapValue
Definition: v8globals.h:86
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Instr ImmException(int imm16)
bool IsSignallingNaN(double num)
Definition: utils-arm64.h:86
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
const uint64_t kXMaxUInt
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
#define OFFSET_OF(type, field)
Definition: globals.h:325
double FusedMultiplyAdd(double op1, double op2, double a)
Definition: utils-arm64.h:124
const unsigned kPrintfLength
const int kPointerSize
Definition: globals.h:268
#define PRINTF_CHECKING
Definition: v8utils.h:52
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
Definition: flags.cc:211
const unsigned kDebugParamsOffset
const unsigned kByteSize
#define kCalleeSavedFP
const unsigned kSRegSizeInBits
const RegList kCalleeSaved
Definition: frames-arm.h:63
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void VPrint(const char *format, va_list args)
const Register pc
const unsigned kSPRegInternalCode
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
const unsigned kNumberOfFPRegisters
const int kSmiShift
const unsigned kSRegSize
static void DebugBreak()
int isinf(double x)
#define T(name, string, precedence)
Definition: token.cc:48
const unsigned kXRegSize
const Register lr
const unsigned kNumberOfRegisters
const unsigned kRegCodeMask
const Instr kImmExceptionIsUnreachable
const unsigned kDebugCodeOffset
#define UNIMPLEMENTED()
Definition: checks.h:50
const unsigned kDRegSize
const int64_t kByteMask
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void USE(T)
Definition: globals.h:341
StringCharacterStream *const stream_
const unsigned kPrintfTypeOffset
static int ActivationFrameAlignment()
const int kNumberOfCalleeSavedFPRegisters
const int32_t kWMinInt
HeapObject * obj
double ToQuietNaN(double num)
Definition: utils-arm64.h:111
void DeleteArray(T *array)
Definition: allocation.h:91
const int64_t kHalfWordMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
const Instr kImmExceptionIsDebug
const int64_t kXMinInt
const int32_t kWMaxInt
const int64_t kXRegMask
uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x)
Definition: utils.h:1088
bool is_uintn(int64_t x, unsigned n)
Definition: utils.h:1108