33 #if V8_TARGET_ARCH_ARM64
44 #if defined(USE_SIMULATOR)
51 #define SScanF sscanf // NOLINT
57 #define COLOUR(colour_code) "\033[" colour_code "m"
58 #define BOLD(colour_code) "1;" colour_code
67 typedef char const *
const TEXT_COLOUR;
68 TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(
NORMAL) :
"";
69 TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) :
"";
70 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) :
"";
71 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) :
"";
72 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) :
"";
73 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) :
"";
74 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) :
"";
75 TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) :
"";
76 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) :
"";
77 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) :
"";
78 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) :
"";
79 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) :
"";
86 va_start(arguments, format);
93 const Instruction* Simulator::kEndOfSimAddress =
NULL;
96 void SimSystemRegister::SetBits(
int msb,
int lsb, uint32_t bits) {
97 int width = msb - lsb + 1;
101 uint32_t mask = ((1 << width) - 1) << lsb;
102 ASSERT((mask & write_ignore_mask_) == 0);
104 value_ = (value_ & ~mask) | (bits & mask);
108 SimSystemRegister SimSystemRegister::DefaultValueFor(
SystemRegister id) {
111 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
113 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
116 return SimSystemRegister();
121 void Simulator::Initialize(Isolate* isolate) {
122 if (isolate->simulator_initialized())
return;
123 isolate->set_simulator_initialized(
true);
124 ExternalReference::set_redirector(isolate, &RedirectExternalReference);
129 Simulator* Simulator::current(Isolate* isolate) {
130 Isolate::PerIsolateThreadData* isolate_data =
131 isolate->FindOrAllocatePerThreadDataForThisThread();
134 Simulator* sim = isolate_data->simulator();
136 if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
137 sim =
new Simulator(
new Decoder<DispatchingDecoderVisitor>(), isolate);
139 sim =
new Decoder<Simulator>();
140 sim->isolate_ = isolate;
142 isolate_data->set_simulator(sim);
148 void Simulator::CallVoid(
byte* entry, CallArgument* args) {
152 std::vector<int64_t> stack_args(0);
153 for (
int i = 0; !args[i].IsEnd(); i++) {
154 CallArgument arg = args[i];
155 if (arg.IsX() && (index_x < 8)) {
156 set_xreg(index_x++, arg.bits());
157 }
else if (arg.IsD() && (index_d < 8)) {
158 set_dreg_bits(index_d++, arg.bits());
160 ASSERT(arg.IsD() || arg.IsX());
161 stack_args.push_back(arg.bits());
166 uintptr_t original_stack =
sp();
167 uintptr_t entry_stack = original_stack -
168 stack_args.size() *
sizeof(stack_args[0]);
172 char * stack =
reinterpret_cast<char*
>(entry_stack);
173 std::vector<int64_t>::const_iterator it;
174 for (it = stack_args.begin(); it != stack_args.end(); it++) {
175 memcpy(stack, &(*it),
sizeof(*it));
176 stack +=
sizeof(*it);
179 ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
184 set_lr(kEndOfSimAddress);
185 CheckPCSComplianceAndRun();
187 set_sp(original_stack);
191 int64_t Simulator::CallInt64(
byte* entry, CallArgument* args) {
192 CallVoid(entry, args);
197 double Simulator::CallDouble(
byte* entry, CallArgument* args) {
198 CallVoid(entry, args);
203 int64_t Simulator::CallJS(
byte* entry,
204 byte* function_entry,
209 CallArgument args[] = {
210 CallArgument(function_entry),
217 return CallInt64(entry, args);
220 int64_t Simulator::CallRegExp(
byte* entry,
222 int64_t start_offset,
223 const byte* input_start,
224 const byte* input_end,
229 void* return_address,
231 CallArgument args[] = {
233 CallArgument(start_offset),
234 CallArgument(input_start),
235 CallArgument(input_end),
236 CallArgument(output),
237 CallArgument(output_size),
238 CallArgument(stack_base),
239 CallArgument(direct_call),
240 CallArgument(return_address),
241 CallArgument(isolate),
244 return CallInt64(entry, args);
248 void Simulator::CheckPCSComplianceAndRun() {
262 saved_registers[i] = xreg(register_list.PopLowestIndex().code());
265 saved_fpregisters[i] =
266 dreg_bits(fpregister_list.PopLowestIndex().code());
268 int64_t original_stack =
sp();
278 CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
281 ASSERT(saved_fpregisters[i] ==
282 dreg_bits(fpregister_list.PopLowestIndex().code()));
290 register_list.Remove(x0);
291 register_list.Remove(x1);
296 fpregister_list.Remove(
d0);
298 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
299 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
307 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
309 while (!list->IsEmpty()) {
310 unsigned code = list->PopLowestIndex().code();
311 set_xreg(code, value | code);
315 while (!list->IsEmpty()) {
316 unsigned code = list->PopLowestIndex().code();
317 set_dreg_bits(code, value | code);
323 void Simulator::CorruptAllCallerSavedCPURegisters() {
328 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
329 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
335 uintptr_t Simulator::PushAddress(uintptr_t address) {
338 uintptr_t* alignment_slot =
339 reinterpret_cast<uintptr_t*
>(new_sp +
kXRegSize);
341 uintptr_t* stack_slot =
reinterpret_cast<uintptr_t*
>(new_sp);
348 uintptr_t Simulator::PopAddress() {
349 intptr_t current_sp =
sp();
350 uintptr_t* stack_slot =
reinterpret_cast<uintptr_t*
>(current_sp);
351 uintptr_t address = *stack_slot;
359 uintptr_t Simulator::StackLimit()
const {
362 return reinterpret_cast<uintptr_t
>(stack_limit_) + 1024;
366 Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
367 Isolate* isolate, FILE* stream)
369 last_debugger_input_(
NULL),
373 decoder_->AppendVisitor(
this);
377 if (FLAG_trace_sim) {
378 decoder_->InsertVisitorBefore(print_disasm_,
this);
382 if (FLAG_log_instruction_stats) {
383 instrument_ =
new Instrument(FLAG_log_instruction_file,
384 FLAG_log_instruction_period);
385 decoder_->AppendVisitor(instrument_);
390 Simulator::Simulator()
392 last_debugger_input_(
NULL),
396 CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
400 void Simulator::Init(FILE* stream) {
404 stack_size_ = (FLAG_sim_stack_size *
KB) + (2 * stack_protection_size_);
405 stack_ =
new byte[stack_size_];
406 stack_limit_ = stack_ + stack_protection_size_;
407 byte* tos = stack_ + stack_size_ - stack_protection_size_;
409 set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
412 print_disasm_ =
new PrintDisassembler(
stream_);
416 disassembler_decoder_ =
new Decoder<DispatchingDecoderVisitor>();
417 disassembler_decoder_->AppendVisitor(print_disasm_);
421 void Simulator::ResetState() {
423 nzcv_ = SimSystemRegister::DefaultValueFor(
NZCV);
424 fpcr_ = SimSystemRegister::DefaultValueFor(
FPCR);
429 set_xreg(i, 0xbadbeef);
433 set_dreg_bits(i, 0x7ff000007f800001UL);
436 set_lr(kEndOfSimAddress);
439 breakpoints_.empty();
440 break_on_next_=
false;
444 Simulator::~Simulator() {
446 if (FLAG_log_instruction_stats) {
449 delete disassembler_decoder_;
450 delete print_disasm_;
456 void Simulator::Run() {
457 pc_modified_ =
false;
458 while (pc_ != kEndOfSimAddress) {
459 ExecuteInstruction();
464 void Simulator::RunFrom(Instruction* start) {
480 : external_function_(external_function),
483 redirect_call_.SetInstructionBits(
485 Isolate* isolate = Isolate::Current();
486 next_ = isolate->simulator_redirection();
488 isolate->set_simulator_redirection(
this);
491 void* address_of_redirect_call() {
492 return reinterpret_cast<void*
>(&redirect_call_);
495 template <
typename T>
496 T external_function() {
return reinterpret_cast<T>(external_function_); }
500 static Redirection* Get(
void* external_function,
502 Isolate* isolate = Isolate::Current();
503 Redirection* current = isolate->simulator_redirection();
504 for (; current !=
NULL; current = current->next_) {
505 if (current->external_function_ == external_function) {
510 return new Redirection(external_function, type);
513 static Redirection* FromHltInstruction(Instruction* redirect_call) {
514 char* addr_of_hlt =
reinterpret_cast<char*
>(redirect_call);
515 char* addr_of_redirection =
516 addr_of_hlt -
OFFSET_OF(Redirection, redirect_call_);
517 return reinterpret_cast<Redirection*
>(addr_of_redirection);
520 static void* ReverseRedirection(int64_t reg) {
521 Redirection* redirection =
522 FromHltInstruction(reinterpret_cast<Instruction*>(reg));
523 return redirection->external_function<
void*>();
527 void* external_function_;
528 Instruction redirect_call_;
545 typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
554 typedef int64_t (*SimulatorRuntimeCompareCall)(
double arg1,
double arg2);
555 typedef double (*SimulatorRuntimeFPFPCall)(
double arg1,
double arg2);
556 typedef double (*SimulatorRuntimeFPCall)(
double arg1);
557 typedef double (*SimulatorRuntimeFPIntCall)(
double arg1,
int32_t arg2);
561 typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
562 typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0,
void* arg1);
565 typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
566 typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
569 void Simulator::DoRuntimeCall(Instruction* instr) {
570 Redirection* redirection = Redirection::FromHltInstruction(instr);
575 Instruction* return_address =
lr();
577 int64_t external = redirection->external_function<int64_t>();
579 TraceSim(
"Call to host function at %p\n",
580 redirection->external_function<
void*>());
583 bool stack_alignment_exception = ((
sp() & 0xf) != 0);
584 if (stack_alignment_exception) {
585 TraceSim(
" with unaligned stack 0x%016" PRIx64
".\n",
sp());
586 FATAL(
"ALIGNMENT EXCEPTION");
589 switch (redirection->type()) {
591 TraceSim(
"Type: Unknown.\n");
595 case ExternalReference::BUILTIN_CALL: {
597 TraceSim(
"Type: BUILTIN_CALL\n");
598 SimulatorRuntimeCall target =
599 reinterpret_cast<SimulatorRuntimeCall
>(external);
604 TraceSim(
"Arguments: "
605 "0x%016" PRIx64
", 0x%016" PRIx64
", "
606 "0x%016" PRIx64
", 0x%016" PRIx64
", "
607 "0x%016" PRIx64
", 0x%016" PRIx64
", "
608 "0x%016" PRIx64
", 0x%016" PRIx64,
609 xreg(0), xreg(1), xreg(2), xreg(3),
610 xreg(4), xreg(5), xreg(6), xreg(7));
611 ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
612 xreg(4), xreg(5), xreg(6), xreg(7));
613 TraceSim(
"Returned: {0x%" PRIx64
", 0x%" PRIx64
"}\n",
614 result.res0, result.res1);
616 CorruptAllCallerSavedCPURegisters();
618 set_xreg(0, result.res0);
619 set_xreg(1, result.res1);
623 case ExternalReference::DIRECT_API_CALL: {
625 TraceSim(
"Type: DIRECT_API_CALL\n");
626 SimulatorRuntimeDirectApiCall target =
627 reinterpret_cast<SimulatorRuntimeDirectApiCall
>(external);
628 TraceSim(
"Arguments: 0x%016" PRIx64
"\n", xreg(0));
630 TraceSim(
"No return value.");
632 CorruptAllCallerSavedCPURegisters();
637 case ExternalReference::BUILTIN_COMPARE_CALL: {
639 TraceSim(
"Type: BUILTIN_COMPARE_CALL\n");
640 SimulatorRuntimeCompareCall target =
641 reinterpret_cast<SimulatorRuntimeCompareCall
>(external);
642 TraceSim(
"Arguments: %f, %f\n", dreg(0), dreg(1));
643 int64_t result = target(dreg(0), dreg(1));
644 TraceSim(
"Returned: %" PRId64
"\n", result);
646 CorruptAllCallerSavedCPURegisters();
652 case ExternalReference::BUILTIN_FP_CALL: {
654 TraceSim(
"Type: BUILTIN_FP_CALL\n");
655 SimulatorRuntimeFPCall target =
656 reinterpret_cast<SimulatorRuntimeFPCall
>(external);
657 TraceSim(
"Argument: %f\n", dreg(0));
658 double result = target(dreg(0));
659 TraceSim(
"Returned: %f\n", result);
661 CorruptAllCallerSavedCPURegisters();
667 case ExternalReference::BUILTIN_FP_FP_CALL: {
669 TraceSim(
"Type: BUILTIN_FP_FP_CALL\n");
670 SimulatorRuntimeFPFPCall target =
671 reinterpret_cast<SimulatorRuntimeFPFPCall
>(external);
672 TraceSim(
"Arguments: %f, %f\n", dreg(0), dreg(1));
673 double result = target(dreg(0), dreg(1));
674 TraceSim(
"Returned: %f\n", result);
676 CorruptAllCallerSavedCPURegisters();
682 case ExternalReference::BUILTIN_FP_INT_CALL: {
684 TraceSim(
"Type: BUILTIN_FP_INT_CALL\n");
685 SimulatorRuntimeFPIntCall target =
686 reinterpret_cast<SimulatorRuntimeFPIntCall
>(external);
687 TraceSim(
"Arguments: %f, %d\n", dreg(0), wreg(0));
688 double result = target(dreg(0), wreg(0));
689 TraceSim(
"Returned: %f\n", result);
691 CorruptAllCallerSavedCPURegisters();
697 case ExternalReference::DIRECT_GETTER_CALL: {
699 TraceSim(
"Type: DIRECT_GETTER_CALL\n");
700 SimulatorRuntimeDirectGetterCall target =
701 reinterpret_cast<SimulatorRuntimeDirectGetterCall
>(external);
702 TraceSim(
"Arguments: 0x%016" PRIx64
", 0x%016" PRIx64
"\n",
704 target(xreg(0), xreg(1));
705 TraceSim(
"No return value.");
707 CorruptAllCallerSavedCPURegisters();
712 case ExternalReference::PROFILING_API_CALL: {
714 TraceSim(
"Type: PROFILING_API_CALL\n");
715 SimulatorRuntimeProfilingApiCall target =
716 reinterpret_cast<SimulatorRuntimeProfilingApiCall
>(external);
717 void* arg1 = Redirection::ReverseRedirection(xreg(1));
718 TraceSim(
"Arguments: 0x%016" PRIx64
", %p\n", xreg(0), arg1);
719 target(xreg(0), arg1);
720 TraceSim(
"No return value.");
722 CorruptAllCallerSavedCPURegisters();
727 case ExternalReference::PROFILING_GETTER_CALL: {
730 TraceSim(
"Type: PROFILING_GETTER_CALL\n");
731 SimulatorRuntimeProfilingGetterCall target =
732 reinterpret_cast<SimulatorRuntimeProfilingGetterCall
>(
734 void* arg2 = Redirection::ReverseRedirection(xreg(2));
735 TraceSim(
"Arguments: 0x%016" PRIx64
", 0x%016" PRIx64
", %p\n",
736 xreg(0), xreg(1), arg2);
737 target(xreg(0), xreg(1), arg2);
738 TraceSim(
"No return value.");
740 CorruptAllCallerSavedCPURegisters();
746 set_lr(return_address);
747 set_pc(return_address);
751 void* Simulator::RedirectExternalReference(
void* external_function,
753 Redirection* redirection = Redirection::Get(external_function, type);
754 return redirection->address_of_redirect_call();
758 const char* Simulator::xreg_names[] = {
759 "x0",
"x1",
"x2",
"x3",
"x4",
"x5",
"x6",
"x7",
760 "x8",
"x9",
"x10",
"x11",
"x12",
"x13",
"x14",
"x15",
761 "ip0",
"ip1",
"x18",
"x19",
"x20",
"x21",
"x22",
"x23",
762 "x24",
"x25",
"x26",
"cp",
"jssp",
"fp",
"lr",
"xzr",
"csp"};
764 const char* Simulator::wreg_names[] = {
765 "w0",
"w1",
"w2",
"w3",
"w4",
"w5",
"w6",
"w7",
766 "w8",
"w9",
"w10",
"w11",
"w12",
"w13",
"w14",
"w15",
767 "w16",
"w17",
"w18",
"w19",
"w20",
"w21",
"w22",
"w23",
768 "w24",
"w25",
"w26",
"wcp",
"wjssp",
"wfp",
"wlr",
"wzr",
"wcsp"};
770 const char* Simulator::sreg_names[] = {
771 "s0",
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
772 "s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
773 "s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
774 "s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31"};
776 const char* Simulator::dreg_names[] = {
777 "d0",
"d1",
"d2",
"d3",
"d4",
"d5",
"d6",
"d7",
778 "d8",
"d9",
"d10",
"d11",
"d12",
"d13",
"d14",
"d15",
779 "d16",
"d17",
"d18",
"d19",
"d20",
"d21",
"d22",
"d23",
780 "d24",
"d25",
"d26",
"d27",
"d28",
"d29",
"d30",
"d31"};
782 const char* Simulator::vreg_names[] = {
783 "v0",
"v1",
"v2",
"v3",
"v4",
"v5",
"v6",
"v7",
784 "v8",
"v9",
"v10",
"v11",
"v12",
"v13",
"v14",
"v15",
785 "v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v22",
"v23",
786 "v24",
"v25",
"v26",
"v27",
"v28",
"v29",
"v30",
"v31"};
789 const char* Simulator::WRegNameForCode(
unsigned code,
Reg31Mode mode) {
790 ASSERT(code < kNumberOfRegisters);
795 return wreg_names[
code];
799 const char* Simulator::XRegNameForCode(
unsigned code,
Reg31Mode mode) {
800 ASSERT(code < kNumberOfRegisters);
805 return xreg_names[
code];
809 const char* Simulator::SRegNameForCode(
unsigned code) {
810 ASSERT(code < kNumberOfFPRegisters);
811 return sreg_names[
code];
815 const char* Simulator::DRegNameForCode(
unsigned code) {
816 ASSERT(code < kNumberOfFPRegisters);
817 return dreg_names[
code];
821 const char* Simulator::VRegNameForCode(
unsigned code) {
822 ASSERT(code < kNumberOfFPRegisters);
823 return vreg_names[
code];
827 int Simulator::CodeFromName(
const char*
name) {
829 if ((strcmp(xreg_names[i], name) == 0) ||
830 (strcmp(wreg_names[i], name) == 0)) {
835 if ((strcmp(vreg_names[i], name) == 0) ||
836 (strcmp(dreg_names[i], name) == 0) ||
837 (strcmp(sreg_names[i], name) == 0)) {
841 if ((strcmp(
"csp", name) == 0) || (strcmp(
"wcsp", name) == 0)) {
849 int64_t Simulator::AddWithCarry(
unsigned reg_size,
854 ASSERT((carry_in == 0) || (carry_in == 1));
859 int64_t signed_sum = src1 + src2 + carry_in;
864 u1 =
static_cast<uint64_t
>(src1) &
kWRegMask;
865 u2 =
static_cast<uint64_t
>(src2) &
kWRegMask;
869 C = ((
kWMaxUInt - u1) < (u2 + carry_in)) ||
876 V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
879 u1 =
static_cast<uint64_t
>(src1);
880 u2 =
static_cast<uint64_t
>(src2);
884 C = ((
kXMaxUInt - u1) < (u2 + carry_in)) ||
888 V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
891 N = CalcNFlag(result, reg_size);
892 Z = CalcZFlag(result);
904 int64_t Simulator::ShiftOperand(
unsigned reg_size,
912 switch (shift_type) {
914 return (value << amount) & mask;
916 return static_cast<uint64_t
>(value) >> amount;
921 int64_t s_value = (value << s_shift) >> s_shift;
922 return (s_value >> amount) & mask;
928 return (static_cast<uint64_t>(value) >> amount) |
929 ((value & ((1
L << amount) - 1
L)) << (reg_size - amount));
938 int64_t Simulator::ExtendValue(
unsigned reg_size,
941 unsigned left_shift) {
942 switch (extend_type) {
953 value = (value << 56) >> 56;
956 value = (value << 48) >> 48;
959 value = (value << 32) >> 32;
968 return (value << left_shift) & mask;
972 template<>
double Simulator::FPDefaultNaN<double>()
const {
973 return kFP64DefaultNaN;
977 template<>
float Simulator::FPDefaultNaN<float>()
const {
978 return kFP32DefaultNaN;
982 void Simulator::FPCompare(
double val0,
double val1) {
983 AssertSupportedFPCR();
989 }
else if (val0 < val1) {
991 }
else if (val0 > val1) {
993 }
else if (val0 == val1) {
1001 void Simulator::SetBreakpoint(Instruction* location) {
1002 for (
unsigned i = 0; i < breakpoints_.size(); i++) {
1003 if (breakpoints_.at(i).location == location) {
1004 PrintF(
"Existing breakpoint at %p was %s\n",
1005 reinterpret_cast<void*>(location),
1006 breakpoints_.at(i).enabled ?
"disabled" :
"enabled");
1007 breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
1011 Breakpoint new_breakpoint = {location,
true};
1012 breakpoints_.push_back(new_breakpoint);
1013 PrintF(
"Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
1017 void Simulator::ListBreakpoints() {
1018 PrintF(
"Breakpoints:\n");
1019 for (
unsigned i = 0; i < breakpoints_.size(); i++) {
1021 reinterpret_cast<void*>(breakpoints_.at(i).location),
1022 breakpoints_.at(i).enabled ?
"enabled" :
"disabled");
1027 void Simulator::CheckBreakpoints() {
1028 bool hit_a_breakpoint =
false;
1029 for (
unsigned i = 0; i < breakpoints_.size(); i++) {
1030 if ((breakpoints_.at(i).location == pc_) &&
1031 breakpoints_.at(i).enabled) {
1032 hit_a_breakpoint =
true;
1034 breakpoints_.at(i).enabled =
false;
1037 if (hit_a_breakpoint) {
1038 PrintF(
"Hit and disabled a breakpoint at %p.\n",
1039 reinterpret_cast<void*>(pc_));
1045 void Simulator::CheckBreakNext() {
1047 if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
1048 SetBreakpoint(pc_->following());
1049 break_on_next_ =
false;
1054 void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
1056 for (Instruction*
pc = start;
pc < end;
pc =
pc->following()) {
1057 disassembler_decoder_->Decode(
pc);
1062 void Simulator::PrintSystemRegisters(
bool print_all) {
1063 static bool first_run =
true;
1065 static SimSystemRegister last_nzcv;
1066 if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
1067 fprintf(
stream_,
"# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
1070 nzcv().
N(), nzcv().Z(), nzcv().C(), nzcv().V(),
1075 static SimSystemRegister last_fpcr;
1076 if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
1077 static const char * rmode[] = {
1078 "0b00 (Round to Nearest)",
1079 "0b01 (Round towards Plus Infinity)",
1080 "0b10 (Round towards Minus Infinity)",
1081 "0b11 (Round towards Zero)"
1083 ASSERT(fpcr().RMode() <= (
sizeof(rmode) /
sizeof(rmode[0])));
1084 fprintf(
stream_,
"# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1087 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
1096 void Simulator::PrintRegisters(
bool print_all_regs) {
1097 static bool first_run =
true;
1101 if (print_all_regs || first_run ||
1104 "# %s%4s:%s 0x%016" PRIx64
"%s\n",
1118 void Simulator::PrintFPRegisters(
bool print_all_regs) {
1119 static bool first_run =
true;
1126 if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
1128 "# %s %4s:%s 0x%016" PRIx64
"%s (%s%s:%s %g%s %s:%s %g%s)\n",
1145 last_regs[i] = dreg_bits(i);
1151 void Simulator::PrintProcessorState() {
1152 PrintSystemRegisters();
1158 void Simulator::PrintWrite(uint8_t* address,
1160 unsigned num_bytes) {
1164 const char* format =
"# %s0x%0*" PRIx64
"%s -> %s0x%016" PRIx64
"%s\n";
1179 void Simulator::VisitUnimplemented(Instruction* instr) {
1180 fprintf(
stream_,
"Unimplemented instruction at %p: 0x%08" PRIx32
"\n",
1181 reinterpret_cast<void*>(instr), instr->InstructionBits());
1186 void Simulator::VisitUnallocated(Instruction* instr) {
1187 fprintf(
stream_,
"Unallocated instruction at %p: 0x%08" PRIx32
"\n",
1188 reinterpret_cast<void*>(instr), instr->InstructionBits());
1193 void Simulator::VisitPCRelAddressing(Instruction* instr) {
1196 set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
1208 void Simulator::VisitUnconditionalBranch(Instruction* instr) {
1211 set_lr(instr->following());
1214 set_pc(instr->ImmPCOffsetTarget());
1222 void Simulator::VisitConditionalBranch(Instruction* instr) {
1224 if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
1225 set_pc(instr->ImmPCOffsetTarget());
1230 void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
1231 Instruction* target = reg<Instruction*>(instr->Rn());
1234 set_lr(instr->following());
1235 if (instr->Rn() == 31) {
1243 case RET: set_pc(target);
break;
1249 void Simulator::VisitTestBranch(Instruction* instr) {
1250 unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
1251 instr->ImmTestBranchBit40();
1252 bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
1255 case TBNZ: take_branch = !take_branch;
break;
1259 set_pc(instr->ImmPCOffsetTarget());
1264 void Simulator::VisitCompareBranch(Instruction* instr) {
1265 unsigned rt = instr->Rt();
1266 bool take_branch =
false;
1268 case CBZ_w: take_branch = (wreg(rt) == 0);
break;
1269 case CBZ_x: take_branch = (xreg(rt) == 0);
break;
1270 case CBNZ_w: take_branch = (wreg(rt) != 0);
break;
1271 case CBNZ_x: take_branch = (xreg(rt) != 0);
break;
1275 set_pc(instr->ImmPCOffsetTarget());
1280 void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
1283 bool set_flags = instr->FlagsUpdate();
1284 int64_t new_val = 0;
1287 switch (operation) {
1290 new_val = AddWithCarry(reg_size,
1292 reg(reg_size, instr->Rn(), instr->RnMode()),
1298 new_val = AddWithCarry(reg_size,
1300 reg(reg_size, instr->Rn(), instr->RnMode()),
1308 set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
1312 void Simulator::VisitAddSubShifted(Instruction* instr) {
1315 int64_t op2 = ShiftOperand(reg_size,
1316 reg(reg_size, instr->Rm()),
1317 static_cast<Shift>(instr->ShiftDP()),
1318 instr->ImmDPShift());
1319 AddSubHelper(instr, op2);
1323 void Simulator::VisitAddSubImmediate(Instruction* instr) {
1324 int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
1325 AddSubHelper(instr, op2);
1329 void Simulator::VisitAddSubExtended(Instruction* instr) {
1332 int64_t op2 = ExtendValue(reg_size,
1333 reg(reg_size, instr->Rm()),
1334 static_cast<Extend>(instr->ExtendMode()),
1335 instr->ImmExtendShift());
1336 AddSubHelper(instr, op2);
1340 void Simulator::VisitAddSubWithCarry(Instruction* instr) {
1343 int64_t op2 = reg(reg_size, instr->Rm());
1350 new_val = AddWithCarry(reg_size,
1351 instr->FlagsUpdate(),
1352 reg(reg_size, instr->Rn()),
1356 set_reg(reg_size, instr->Rd(), new_val);
1360 void Simulator::VisitLogicalShifted(Instruction* instr) {
1363 Shift shift_type =
static_cast<Shift>(instr->ShiftDP());
1364 unsigned shift_amount = instr->ImmDPShift();
1365 int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
1367 if (instr->Mask(
NOT) ==
NOT) {
1370 LogicalHelper(instr, op2);
1374 void Simulator::VisitLogicalImmediate(Instruction* instr) {
1375 LogicalHelper(instr, instr->ImmLogical());
1379 void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
1382 int64_t op1 = reg(reg_size, instr->Rn());
1384 bool update_flags =
false;
1389 case ANDS: update_flags =
true;
1390 case AND: result = op1 & op2;
break;
1391 case ORR: result = op1 | op2;
break;
1392 case EOR: result = op1 ^ op2;
break;
1398 nzcv().SetN(CalcNFlag(result, reg_size));
1399 nzcv().SetZ(CalcZFlag(result));
1404 set_reg(reg_size, instr->Rd(), result, instr->RdMode());
1408 void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
1411 ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
1415 void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
1416 ConditionalCompareHelper(instr, instr->ImmCondCmp());
1420 void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
1423 int64_t op1 = reg(reg_size, instr->Rn());
1425 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
1429 AddWithCarry(reg_size,
true, op1, ~op2, 1);
1432 AddWithCarry(reg_size,
true, op1, op2, 0);
1436 nzcv().SetFlags(instr->Nzcv());
1441 void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
1442 int offset = instr->ImmLSUnsigned() << instr->SizeLS();
1443 LoadStoreHelper(instr, offset,
Offset);
1447 void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
1448 LoadStoreHelper(instr, instr->ImmLS(),
Offset);
1452 void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
1453 LoadStoreHelper(instr, instr->ImmLS(),
PreIndex);
1457 void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
1458 LoadStoreHelper(instr, instr->ImmLS(),
PostIndex);
1462 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
1465 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
1469 LoadStoreHelper(instr, offset,
Offset);
1473 void Simulator::LoadStoreHelper(Instruction* instr,
1476 unsigned srcdst = instr->Rt();
1477 unsigned addr_reg = instr->Rn();
1478 uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
1479 int num_bytes = 1 << instr->SizeLS();
1480 uint8_t* stack =
NULL;
1489 if (instr->IsStore()) {
1490 LoadStoreWriteBack(addr_reg, offset, addrmode);
1494 stack =
reinterpret_cast<uint8_t*
>(
sp());
1502 case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes));
break;
1506 case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes);
break;
1532 case LDR_s: set_sreg(srcdst, MemoryReadFP32(address));
break;
1533 case LDR_d: set_dreg(srcdst, MemoryReadFP64(address));
break;
1534 case STR_s: MemoryWriteFP32(address, sreg(srcdst));
break;
1535 case STR_d: MemoryWriteFP64(address, dreg(srcdst));
break;
1543 if (instr->IsLoad()) {
1546 stack =
reinterpret_cast<uint8_t*
>(
sp());
1548 LoadStoreWriteBack(addr_reg, offset, addrmode);
1553 CheckMemoryAccess(address, stack);
1557 void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
1558 LoadStorePairHelper(instr,
Offset);
1562 void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
1563 LoadStorePairHelper(instr,
PreIndex);
1567 void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
1572 void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
1573 LoadStorePairHelper(instr,
Offset);
1577 void Simulator::LoadStorePairHelper(Instruction* instr,
1579 unsigned rt = instr->Rt();
1580 unsigned rt2 = instr->Rt2();
1581 unsigned addr_reg = instr->Rn();
1582 int offset = instr->ImmLSPair() << instr->SizeLSPair();
1583 uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
1584 uint8_t* stack =
NULL;
1593 if (instr->IsStore()) {
1594 LoadStoreWriteBack(addr_reg, offset, addrmode);
1598 stack =
reinterpret_cast<uint8_t*
>(
sp());
1609 set_wreg(rt, MemoryRead32(address));
1610 set_wreg(rt2, MemoryRead32(address +
kWRegSize));
1614 set_sreg(rt, MemoryReadFP32(address));
1615 set_sreg(rt2, MemoryReadFP32(address +
kSRegSize));
1619 set_xreg(rt, MemoryRead64(address));
1620 set_xreg(rt2, MemoryRead64(address +
kXRegSize));
1624 set_dreg(rt, MemoryReadFP64(address));
1625 set_dreg(rt2, MemoryReadFP64(address +
kDRegSize));
1635 MemoryWrite32(address, wreg(rt));
1636 MemoryWrite32(address +
kWRegSize, wreg(rt2));
1640 MemoryWriteFP32(address, sreg(rt));
1641 MemoryWriteFP32(address +
kSRegSize, sreg(rt2));
1645 MemoryWrite64(address, xreg(rt));
1646 MemoryWrite64(address +
kXRegSize, xreg(rt2));
1650 MemoryWriteFP64(address, dreg(rt));
1651 MemoryWriteFP64(address +
kDRegSize, dreg(rt2));
1661 if (instr->IsLoad()) {
1664 stack =
reinterpret_cast<uint8_t*
>(
sp());
1666 LoadStoreWriteBack(addr_reg, offset, addrmode);
1671 CheckMemoryAccess(address, stack);
1675 void Simulator::VisitLoadLiteral(Instruction* instr) {
1676 uint8_t* address = instr->LiteralAddress();
1677 unsigned rt = instr->Rt();
1680 case LDR_w_lit: set_wreg(rt, MemoryRead32(address));
break;
1681 case LDR_x_lit: set_xreg(rt, MemoryRead64(address));
break;
1682 case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address));
break;
1683 case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address));
break;
1689 uint8_t* Simulator::LoadStoreAddress(
unsigned addr_reg,
1694 if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
1698 FATAL(
"ALIGNMENT EXCEPTION");
1705 return reinterpret_cast<uint8_t*
>(address);
1709 void Simulator::LoadStoreWriteBack(
unsigned addr_reg,
1720 void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
1721 if ((address >= stack_limit_) && (address < stack)) {
1722 fprintf(
stream_,
"ACCESS BELOW STACK POINTER:\n");
1723 fprintf(
stream_,
" sp is here: 0x%16p\n", stack);
1724 fprintf(
stream_,
" access was here: 0x%16p\n", address);
1725 fprintf(
stream_,
" stack limit is here: 0x%16p\n", stack_limit_);
1727 FATAL(
"ACCESS BELOW STACK POINTER");
1732 uint64_t Simulator::MemoryRead(uint8_t* address,
unsigned num_bytes) {
1734 ASSERT((num_bytes > 0) && (num_bytes <=
sizeof(uint64_t)));
1736 memcpy(&read, address, num_bytes);
1741 uint8_t Simulator::MemoryRead8(uint8_t* address) {
1742 return MemoryRead(address,
sizeof(uint8_t));
1746 uint16_t Simulator::MemoryRead16(uint8_t* address) {
1747 return MemoryRead(address,
sizeof(
uint16_t));
1751 uint32_t Simulator::MemoryRead32(uint8_t* address) {
1752 return MemoryRead(address,
sizeof(uint32_t));
1756 float Simulator::MemoryReadFP32(uint8_t* address) {
1757 return rawbits_to_float(MemoryRead32(address));
1761 uint64_t Simulator::MemoryRead64(uint8_t* address) {
1762 return MemoryRead(address,
sizeof(uint64_t));
1766 double Simulator::MemoryReadFP64(uint8_t* address) {
1767 return rawbits_to_double(MemoryRead64(address));
1771 void Simulator::MemoryWrite(uint8_t* address,
1773 unsigned num_bytes) {
1775 ASSERT((num_bytes > 0) && (num_bytes <=
sizeof(uint64_t)));
1777 LogWrite(address, value, num_bytes);
1778 memcpy(address, &value, num_bytes);
1782 void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
1783 MemoryWrite(address, value,
sizeof(uint32_t));
1787 void Simulator::MemoryWriteFP32(uint8_t* address,
float value) {
1788 MemoryWrite32(address, float_to_rawbits(value));
1792 void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
1793 MemoryWrite(address, value,
sizeof(uint64_t));
1797 void Simulator::MemoryWriteFP64(uint8_t* address,
double value) {
1798 MemoryWrite64(address, double_to_rawbits(value));
1802 void Simulator::VisitMoveWideImmediate(Instruction* instr) {
1805 int64_t new_xn_val = 0;
1807 bool is_64_bits = instr->SixtyFourBits() == 1;
1809 ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
1812 int64_t
shift = instr->ShiftMoveWide() * 16;
1813 int64_t shifted_imm16 = instr->ImmMoveWide() <<
shift;
1819 new_xn_val = ~shifted_imm16;
1820 if (!is_64_bits) new_xn_val &=
kWRegMask;
1825 unsigned reg_code = instr->Rd();
1826 int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
1828 new_xn_val = (prev_xn_val & ~(0xffff
L <<
shift)) | shifted_imm16;
1833 new_xn_val = shifted_imm16;
1841 set_xreg(instr->Rd(), new_xn_val);
1845 void Simulator::VisitConditionalSelect(Instruction* instr) {
1846 uint64_t new_val = xreg(instr->Rn());
1848 if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
1849 new_val = xreg(instr->Rm());
1854 case CSINC_x: new_val++;
break;
1856 case CSINV_x: new_val = ~new_val;
break;
1858 case CSNEG_x: new_val = -new_val;
break;
1864 set_reg(reg_size, instr->Rd(), new_val);
1868 void Simulator::VisitDataProcessing1Source(Instruction* instr) {
1869 unsigned dst = instr->Rd();
1870 unsigned src = instr->Rn();
1875 case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16));
break;
1876 case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16));
break;
1877 case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32));
break;
1878 case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32));
break;
1879 case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64));
break;
1897 uint64_t Simulator::ReverseBits(uint64_t value,
unsigned num_bits) {
1899 uint64_t result = 0;
1900 for (
unsigned i = 0; i < num_bits; i++) {
1901 result = (result << 1) | (value & 1);
1908 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
1912 uint64_t mask = 0xff00000000000000UL;
1913 for (
int i = 7; i >= 0; i--) {
1914 bytes[i] = (value & mask) >> (i * 8);
1922 ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
1923 static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
1924 {4, 5, 6, 7, 0, 1, 2, 3},
1925 {0, 1, 2, 3, 4, 5, 6, 7} };
1926 uint64_t result = 0;
1927 for (
int i = 0; i < 8; i++) {
1929 result |= bytes[permute_table[
mode][i]];
1935 void Simulator::VisitDataProcessing2Source(Instruction* instr) {
1940 int32_t rn = wreg(instr->Rn());
1941 int32_t rm = wreg(instr->Rm());
1942 if ((rn ==
kWMinInt) && (rm == -1)) {
1944 }
else if (rm == 0) {
1953 int64_t rn = xreg(instr->Rn());
1954 int64_t rm = xreg(instr->Rm());
1955 if ((rn ==
kXMinInt) && (rm == -1)) {
1957 }
else if (rm == 0) {
1966 uint32_t rn =
static_cast<uint32_t
>(wreg(instr->Rn()));
1967 uint32_t rm =
static_cast<uint32_t
>(wreg(instr->Rm()));
1977 uint64_t rn =
static_cast<uint64_t
>(xreg(instr->Rn()));
1978 uint64_t rm =
static_cast<uint64_t
>(xreg(instr->Rm()));
2003 int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
2004 unsigned shift = wreg(instr->Rm()) & mask;
2005 result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
2008 set_reg(reg_size, instr->Rd(), result);
2015 static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
2016 uint64_t u0, v0, w0;
2017 int64_t u1, v1, w1, w2, t;
2019 u0 = u & 0xffffffff
L;
2021 v0 = v & 0xffffffff
L;
2025 t = u1 * v0 + (w0 >> 32);
2026 w1 = t & 0xffffffff
L;
2030 return u1 * v1 + w2 + (w1 >> 32);
2034 void Simulator::VisitDataProcessing3Source(Instruction* instr) {
2040 uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
2041 uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
2042 int64_t rn_s32 = reg<int32_t>(instr->Rn());
2043 int64_t rm_s32 = reg<int32_t>(instr->Rm());
2047 result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
2051 result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
2053 case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32);
break;
2054 case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32);
break;
2055 case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32);
break;
2056 case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32);
break;
2059 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
2063 set_reg(reg_size, instr->Rd(), result);
2067 void Simulator::VisitBitfield(Instruction* instr) {
2071 int64_t R = instr->ImmR();
2072 int64_t
S = instr->ImmS();
2073 int64_t diff = S - R;
2076 mask = diff < reg_size - 1 ? (1
L << (diff + 1)) - 1
2079 mask = ((1
L << (S + 1)) - 1);
2080 mask = (
static_cast<uint64_t
>(mask) >> R) | (mask << (reg_size - R));
2087 bool inzero =
false;
2088 bool extend =
false;
2106 int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
2107 int64_t src = reg(reg_size, instr->Rn());
2109 int64_t result = (
static_cast<uint64_t
>(src) >> R) | (src << (reg_size - R));
2111 int64_t topbits = ((1
L << (reg_size - diff - 1)) - 1) << (diff + 1);
2112 int64_t signbits = extend && ((src >>
S) & 1) ? topbits : 0;
2115 result = signbits | (result & mask) | (dst & ~mask);
2117 set_reg(reg_size, instr->Rd(), result);
2121 void Simulator::VisitExtract(Instruction* instr) {
2122 unsigned lsb = instr->ImmS();
2127 (
static_cast<uint64_t
>(reg(reg_size, instr->Rm())) >> lsb) |
2128 (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
2132 void Simulator::VisitFPImmediate(Instruction* instr) {
2133 AssertSupportedFPCR();
2135 unsigned dest = instr->Rd();
2137 case FMOV_s_imm: set_sreg(dest, instr->ImmFP32());
break;
2138 case FMOV_d_imm: set_dreg(dest, instr->ImmFP64());
break;
2144 void Simulator::VisitFPIntegerConvert(Instruction* instr) {
2145 AssertSupportedFPCR();
2147 unsigned dst = instr->Rd();
2148 unsigned src = instr->Rn();
2201 case FMOV_ws: set_wreg(dst, sreg_bits(src));
break;
2202 case FMOV_xd: set_xreg(dst, dreg_bits(src));
break;
2203 case FMOV_sw: set_sreg_bits(dst, wreg(src));
break;
2204 case FMOV_dx: set_dreg_bits(dst, xreg(src));
break;
2208 case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round));
break;
2209 case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round));
break;
2210 case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round));
break;
2212 set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
2215 case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round));
break;
2216 case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round));
break;
2217 case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round));
break;
2219 set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
2228 void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
2229 AssertSupportedFPCR();
2231 unsigned dst = instr->Rd();
2232 unsigned src = instr->Rn();
2233 int fbits = 64 - instr->FPScale();
2241 set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
2244 set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
2247 set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
2251 UFixedToDouble(reg<uint32_t>(src), fbits, round));
2255 set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
2258 set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
2261 set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
2265 UFixedToFloat(reg<uint32_t>(src), fbits, round));
2274 value = FPRoundInt(value, rmode);
2284 int64_t Simulator::FPToInt64(
double value,
FPRounding rmode) {
2285 value = FPRoundInt(value, rmode);
2291 return std::isnan(value) ? 0 :
static_cast<int64_t
>(value);
2295 uint32_t Simulator::FPToUInt32(
double value,
FPRounding rmode) {
2296 value = FPRoundInt(value, rmode);
2299 }
else if (value < 0.0) {
2302 return std::isnan(value) ? 0 :
static_cast<uint32_t
>(value);
2306 uint64_t Simulator::FPToUInt64(
double value,
FPRounding rmode) {
2307 value = FPRoundInt(value, rmode);
2310 }
else if (value < 0.0) {
2313 return std::isnan(value) ? 0 :
static_cast<uint64_t
>(value);
2317 void Simulator::VisitFPCompare(Instruction* instr) {
2318 AssertSupportedFPCR();
2322 double fn_val = fpreg(reg_size, instr->Rn());
2326 case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm()));
break;
2334 void Simulator::VisitFPConditionalCompare(Instruction* instr) {
2335 AssertSupportedFPCR();
2340 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2345 FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
2348 nzcv().SetFlags(instr->Nzcv());
2357 void Simulator::VisitFPConditionalSelect(Instruction* instr) {
2358 AssertSupportedFPCR();
2361 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2362 selected = instr->Rn();
2364 selected = instr->Rm();
2368 case FCSEL_s: set_sreg(instr->Rd(), sreg(selected));
break;
2369 case FCSEL_d: set_dreg(instr->Rd(), dreg(selected));
break;
2375 void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
2376 AssertSupportedFPCR();
2378 unsigned fd = instr->Rd();
2379 unsigned fn = instr->Rn();
2382 case FMOV_s: set_sreg(fd, sreg(fn));
break;
2383 case FMOV_d: set_dreg(fd, dreg(fn));
break;
2384 case FABS_s: set_sreg(fd, std::fabs(sreg(fn)));
break;
2385 case FABS_d: set_dreg(fd, std::fabs(dreg(fn)));
break;
2386 case FNEG_s: set_sreg(fd, -sreg(fn));
break;
2387 case FNEG_d: set_dreg(fd, -dreg(fn));
break;
2388 case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn)));
break;
2389 case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn)));
break;
2394 case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn),
FPZero));
break;
2395 case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn),
FPZero));
break;
2396 case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn)));
break;
2418 template <
class T,
int ebits,
int mbits>
2419 static T FPRound(int64_t
sign, int64_t exponent, uint64_t mantissa,
2421 ASSERT((sign == 0) || (sign == 1));
2478 static const int mantissa_offset = 0;
2479 static const int exponent_offset = mantissa_offset + mbits;
2480 static const int sign_offset = exponent_offset + ebits;
2484 if (mantissa == 0) {
2485 return sign << sign_offset;
2490 static const int infinite_exponent = (1 << ebits) - 1;
2491 static const int max_normal_exponent = infinite_exponent - 1;
2495 exponent += max_normal_exponent >> 1;
2497 if (exponent > max_normal_exponent) {
2500 exponent = infinite_exponent;
2502 return (sign << sign_offset) |
2503 (exponent << exponent_offset) |
2504 (mantissa << mantissa_offset);
2510 int shift = highest_significant_bit - mbits;
2512 if (exponent <= 0) {
2518 shift += -exponent + 1;
2525 if (shift > (highest_significant_bit + 1)) {
2527 return sign << sign_offset;
2535 mantissa &= ~(1UL << highest_significant_bit);
2541 uint64_t onebit_mantissa = (mantissa >> (
shift)) & 1;
2542 uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
2543 uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
2544 T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
2546 T result = (sign << sign_offset) |
2547 (exponent << exponent_offset) |
2548 ((mantissa >>
shift) << mantissa_offset);
2558 return result + halfbit_adjusted;
2563 return (sign << sign_offset) |
2564 (exponent << exponent_offset) |
2565 ((mantissa << -shift) << mantissa_offset);
2571 static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
2574 FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(
sign,
2578 return rawbits_to_double(bits);
2583 static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
2586 FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(
sign,
2590 return rawbits_to_float(bits);
2594 double Simulator::FixedToDouble(int64_t src,
int fbits,
FPRounding round) {
2596 return UFixedToDouble(src, fbits, round);
2599 return -UFixedToDouble(-src, fbits, round);
2604 double Simulator::UFixedToDouble(uint64_t src,
int fbits,
FPRounding round) {
2614 const int64_t exponent = highest_significant_bit - fbits;
2616 return FPRoundToDouble(0, exponent, src, round);
2620 float Simulator::FixedToFloat(int64_t src,
int fbits,
FPRounding round) {
2622 return UFixedToFloat(src, fbits, round);
2625 return -UFixedToFloat(-src, fbits, round);
2630 float Simulator::UFixedToFloat(uint64_t src,
int fbits,
FPRounding round) {
2640 const int32_t exponent = highest_significant_bit - fbits;
2642 return FPRoundToFloat(0, exponent, src, round);
2646 double Simulator::FPRoundInt(
double value,
FPRounding round_mode) {
2647 if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
2648 (value == kFP64NegativeInfinity)) {
2651 return FPProcessNaN(value);
2654 double int_result = floor(value);
2655 double error = value - int_result;
2656 switch (round_mode) {
2660 if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
2668 if ((error > 0.5) ||
2669 ((error == 0.5) && (fmod(int_result, 2) != 0))) {
2678 int_result = ceil(value);
2692 double Simulator::FPToDouble(
float value) {
2695 if (fpcr().DN())
return kFP64DefaultNaN;
2702 uint32_t raw = float_to_rawbits(value);
2704 uint64_t sign = raw >> 31;
2705 uint64_t exponent = (1 << 11) - 1;
2707 payload <<= (52 - 23);
2708 payload |= (1
L << 51);
2710 return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
2720 return static_cast<double>(value);
2725 return static_cast<double>(value);
2729 float Simulator::FPToFloat(
double value,
FPRounding round_mode) {
2736 if (fpcr().DN())
return kFP32DefaultNaN;
2742 uint64_t raw = double_to_rawbits(value);
2744 uint32_t sign = raw >> 63;
2745 uint32_t exponent = (1 << 8) - 1;
2747 payload |= (1 << 22);
2749 return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
2756 return static_cast<float>(value);
2763 uint64_t raw = double_to_rawbits(value);
2765 uint32_t sign = raw >> 63;
2771 mantissa |= (1UL << 52);
2773 return FPRoundToFloat(sign, exponent, mantissa, round_mode);
2782 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
2783 AssertSupportedFPCR();
2785 unsigned fd = instr->Rd();
2786 unsigned fn = instr->Rn();
2787 unsigned fm = instr->Rm();
2791 case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm)));
return;
2792 case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm)));
return;
2793 case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm)));
return;
2794 case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm)));
return;
2799 if (FPProcessNaNs(instr))
return;
2802 case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm)));
break;
2803 case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm)));
break;
2804 case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm)));
break;
2805 case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm)));
break;
2806 case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm)));
break;
2807 case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm)));
break;
2808 case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm)));
break;
2809 case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm)));
break;
2810 case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm)));
break;
2811 case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm)));
break;
2812 case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm)));
break;
2813 case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm)));
break;
2825 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
2826 AssertSupportedFPCR();
2828 unsigned fd = instr->Rd();
2829 unsigned fn = instr->Rn();
2830 unsigned fm = instr->Rm();
2831 unsigned fa = instr->Ra();
2835 case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm)));
break;
2836 case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm)));
break;
2837 case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm)));
break;
2838 case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm)));
break;
2841 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
2844 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
2847 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
2850 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
2857 template <
typename T>
2858 T Simulator::FPAdd(
T op1,
T op2) {
2864 return FPDefaultNaN<T>();
2872 template <
typename T>
2873 T Simulator::FPDiv(
T op1,
T op2) {
2879 return FPDefaultNaN<T>();
2887 template <
typename T>
2888 T Simulator::FPMax(
T a,
T b) {
2892 if ((a == 0.0) && (b == 0.0) &&
2893 (copysign(1.0, a) != copysign(1.0, b))) {
2897 return (a > b) ? a : b;
2902 template <
typename T>
2903 T Simulator::FPMaxNM(
T a,
T b) {
2905 a = kFP64NegativeInfinity;
2907 b = kFP64NegativeInfinity;
2910 T result = FPProcessNaNs(a, b);
2911 return std::isnan(result) ? result : FPMax(a, b);
2914 template <
typename T>
2915 T Simulator::FPMin(
T a,
T b) {
2919 if ((a == 0.0) && (b == 0.0) &&
2920 (copysign(1.0, a) != copysign(1.0, b))) {
2924 return (a < b) ? a : b;
2929 template <
typename T>
2930 T Simulator::FPMinNM(
T a,
T b) {
2932 a = kFP64PositiveInfinity;
2934 b = kFP64PositiveInfinity;
2937 T result = FPProcessNaNs(a, b);
2938 return std::isnan(result) ? result : FPMin(a, b);
2942 template <
typename T>
2943 T Simulator::FPMul(
T op1,
T op2) {
2949 return FPDefaultNaN<T>();
2957 template<
typename T>
2958 T Simulator::FPMulAdd(
T a,
T op1,
T op2) {
2959 T result = FPProcessNaNs3(a, op1, op2);
2961 T sign_a = copysign(1.0, a);
2962 T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
2964 bool operation_generates_nan =
2967 (
std::isinf(a) && isinf_prod && (sign_a != sign_prod));
2971 if (operation_generates_nan &&
IsQuietNaN(a)) {
2972 return FPDefaultNaN<T>();
2979 if (operation_generates_nan) {
2980 return FPDefaultNaN<T>();
2985 if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
2986 return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
2994 if ((a == 0.0) && (result == 0.0)) {
2995 return copysign(0.0, sign_prod);
3002 template <
typename T>
3003 T Simulator::FPSqrt(
T op) {
3005 return FPProcessNaN(op);
3006 }
else if (op < 0.0) {
3007 return FPDefaultNaN<T>();
3009 return std::sqrt(op);
3014 template <
typename T>
3015 T Simulator::FPSub(
T op1,
T op2) {
3021 return FPDefaultNaN<T>();
3029 template <
typename T>
3030 T Simulator::FPProcessNaN(
T op) {
3032 return fpcr().DN() ? FPDefaultNaN<T>() :
ToQuietNaN(op);
3036 template <
typename T>
3037 T Simulator::FPProcessNaNs(
T op1,
T op2) {
3039 return FPProcessNaN(op1);
3041 return FPProcessNaN(op2);
3044 return FPProcessNaN(op1);
3047 return FPProcessNaN(op2);
3054 template <
typename T>
3055 T Simulator::FPProcessNaNs3(
T op1,
T op2,
T op3) {
3057 return FPProcessNaN(op1);
3059 return FPProcessNaN(op2);
3061 return FPProcessNaN(op3);
3064 return FPProcessNaN(op1);
3067 return FPProcessNaN(op2);
3070 return FPProcessNaN(op3);
3077 bool Simulator::FPProcessNaNs(Instruction* instr) {
3078 unsigned fd = instr->Rd();
3079 unsigned fn = instr->Rn();
3080 unsigned fm = instr->Rm();
3084 double result = FPProcessNaNs(dreg(fn), dreg(fm));
3086 set_dreg(fd, result);
3090 float result = FPProcessNaNs(sreg(fn), sreg(fm));
3092 set_sreg(fd, result);
3101 void Simulator::VisitSystem(Instruction* instr) {
3108 switch (instr->ImmSystemRegister()) {
3109 case NZCV: set_xreg(instr->Rt(), nzcv().RawValue());
break;
3110 case FPCR: set_xreg(instr->Rt(), fpcr().RawValue());
break;
3116 switch (instr->ImmSystemRegister()) {
3117 case NZCV: nzcv().SetRawValue(xreg(instr->Rt()));
break;
3118 case FPCR: fpcr().SetRawValue(xreg(instr->Rt()));
break;
3126 switch (instr->ImmHint()) {
3131 __sync_synchronize();
3138 bool Simulator::GetValue(
const char* desc, int64_t* value) {
3139 int regnum = CodeFromName(desc);
3141 unsigned code = regnum;
3150 if (desc[0] ==
'w') {
3156 }
else if (strncmp(desc,
"0x", 2) == 0) {
3157 return SScanF(desc + 2,
"%" SCNx64,
3158 reinterpret_cast<uint64_t*>(value)) == 1;
3160 return SScanF(desc,
"%" SCNu64,
3161 reinterpret_cast<uint64_t*>(value)) == 1;
3166 bool Simulator::PrintValue(
const char* desc) {
3167 if (strcmp(desc,
"csp") == 0) {
3169 PrintF(
"%s csp:%s 0x%016" PRIx64
"%s\n",
3172 }
else if (strcmp(desc,
"wcsp") == 0) {
3174 PrintF(
"%s wcsp:%s 0x%08" PRIx32
"%s\n",
3179 int i = CodeFromName(desc);
3181 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters)
return false;
3183 if (desc[0] ==
'v') {
3184 PrintF(
"%s %s:%s 0x%016" PRIx64
"%s (%s%s:%s %g%s %s:%s %g%s)\n",
3185 clr_fpreg_name, VRegNameForCode(i),
3186 clr_fpreg_value, double_to_rawbits(dreg(i)),
3188 clr_fpreg_name, DRegNameForCode(i),
3189 clr_fpreg_value, dreg(i),
3190 clr_fpreg_name, SRegNameForCode(i),
3191 clr_fpreg_value, sreg(i),
3194 }
else if (desc[0] ==
'd') {
3195 PrintF(
"%s %s:%s %g%s\n",
3196 clr_fpreg_name, DRegNameForCode(i),
3197 clr_fpreg_value, dreg(i),
3200 }
else if (desc[0] ==
's') {
3201 PrintF(
"%s %s:%s %g%s\n",
3202 clr_fpreg_name, SRegNameForCode(i),
3203 clr_fpreg_value, sreg(i),
3206 }
else if (desc[0] ==
'w') {
3207 PrintF(
"%s %s:%s 0x%08" PRIx32
"%s\n",
3208 clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
3213 PrintF(
"%s %s:%s 0x%016" PRIx64
"%s\n",
3214 clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
3220 void Simulator::Debug() {
3221 #define COMMAND_SIZE 63
3222 #define ARG_SIZE 255
3225 #define XSTR(a) STR(a)
3227 char cmd[COMMAND_SIZE + 1];
3228 char arg1[ARG_SIZE + 1];
3229 char arg2[ARG_SIZE + 1];
3230 char* argv[3] = { cmd, arg1, arg2 };
3233 cmd[COMMAND_SIZE] = 0;
3238 bool cleared_log_disasm_bit =
false;
3242 PrintInstructionsAt(pc_, 1);
3249 char* last_input = last_debugger_input();
3250 if (strcmp(line,
"\n") == 0 && (last_input !=
NULL)) {
3255 set_last_debugger_input(line);
3260 int argc = SScanF(line,
3261 "%" XSTR(COMMAND_SIZE)
"s "
3262 "%" XSTR(ARG_SIZE)
"s "
3263 "%" XSTR(ARG_SIZE)
"s",
3267 if ((strcmp(cmd,
"si") == 0) || (strcmp(cmd,
"stepi") == 0)) {
3272 pc_modified_ =
false;
3275 ExecuteInstruction();
3277 int64_t number_of_instructions_to_execute = 1;
3278 GetValue(arg1, &number_of_instructions_to_execute);
3280 set_log_parameters(log_parameters() |
LOG_DISASM);
3281 while (number_of_instructions_to_execute-- > 0) {
3282 ExecuteInstruction();
3284 set_log_parameters(log_parameters() & ~
LOG_DISASM);
3291 pc_modified_ =
true;
3294 }
else if ((strcmp(cmd,
"next") == 0) || (strcmp(cmd,
"n") == 0)) {
3296 break_on_next_ =
true;
3301 }
else if ((strcmp(cmd,
"continue") == 0) ||
3302 (strcmp(cmd,
"cont") == 0) ||
3303 (strcmp(cmd,
"c") == 0)) {
3308 }
else if (strcmp(cmd,
"disassemble") == 0 ||
3309 strcmp(cmd,
"disasm") == 0 ||
3310 strcmp(cmd,
"di") == 0) {
3311 int64_t n_of_instrs_to_disasm = 10;
3312 int64_t address =
reinterpret_cast<int64_t
>(pc_);
3314 GetValue(arg1, &n_of_instrs_to_disasm);
3317 GetValue(arg2, &address);
3321 PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
3322 n_of_instrs_to_disasm);
3326 }
else if ((strcmp(cmd,
"print") == 0) || (strcmp(cmd,
"p") == 0)) {
3328 if (strcmp(arg1,
"all") == 0) {
3329 PrintRegisters(
true);
3330 PrintFPRegisters(
true);
3332 if (!PrintValue(arg1)) {
3333 PrintF(
"%s unrecognized\n", arg1);
3338 "print <register>\n"
3339 " Print the content of a register. (alias 'p')\n"
3340 " 'print all' will print all registers.\n"
3341 " Use 'printobject' to get more details about the value.\n");
3345 }
else if ((strcmp(cmd,
"printobject") == 0) ||
3346 (strcmp(cmd,
"po") == 0)) {
3349 if (GetValue(arg1, &value)) {
3359 PrintF(
"%s unrecognized\n", arg1);
3362 PrintF(
"printobject <value>\n"
3363 "printobject <register>\n"
3364 " Print details about the value. (alias 'po')\n");
3368 }
else if (strcmp(cmd,
"stack") == 0 || strcmp(cmd,
"mem") == 0) {
3369 int64_t* cur =
NULL;
3370 int64_t* end =
NULL;
3373 if (strcmp(cmd,
"stack") == 0) {
3374 cur =
reinterpret_cast<int64_t*
>(jssp());
3378 if (!GetValue(arg1, &value)) {
3379 PrintF(
"%s unrecognized\n", arg1);
3382 cur =
reinterpret_cast<int64_t*
>(value);
3387 if (argc == next_arg) {
3389 }
else if (argc == next_arg + 1) {
3390 if (!GetValue(argv[next_arg], &words)) {
3391 PrintF(
"%s unrecognized\n", argv[next_arg]);
3392 PrintF(
"Printing 10 double words by default");
3401 PrintF(
" 0x%016" PRIx64
": 0x%016" PRIx64
" %10" PRId64,
3402 reinterpret_cast<uint64_t>(cur), *cur, *cur);
3403 HeapObject*
obj =
reinterpret_cast<HeapObject*
>(*cur);
3404 int64_t value = *cur;
3405 Heap* current_heap = v8::internal::Isolate::Current()->heap();
3406 if (((value & 1) == 0) || current_heap->Contains(obj)) {
3411 PrintF(
"smi %" PRId32, untagged);
3422 }
else if (strcmp(cmd,
"trace") == 0 || strcmp(cmd,
"t") == 0) {
3425 PrintF(
"Enabling disassembly and registers tracing\n");
3428 PrintF(
"Disabling disassembly and registers tracing\n");
3433 }
else if (strcmp(cmd,
"break") == 0 || strcmp(cmd,
"b") == 0) {
3436 if (GetValue(arg1, &value)) {
3437 SetBreakpoint(reinterpret_cast<Instruction*>(value));
3439 PrintF(
"%s unrecognized\n", arg1);
3443 PrintF(
"Use `break <address>` to set or disable a breakpoint\n");
3447 }
else if (strcmp(cmd,
"gdb") == 0) {
3448 PrintF(
"Relinquishing control to gdb.\n");
3450 PrintF(
"Regaining control from gdb.\n");
3453 }
else if (strcmp(cmd,
"sysregs") == 0) {
3454 PrintSystemRegisters();
3457 }
else if (strcmp(cmd,
"help") == 0 || strcmp(cmd,
"h") == 0) {
3461 " Step <n> instructions.\n"
3463 " Continue execution until a BL instruction is reached.\n"
3464 " At this point a breakpoint is set just after this BL.\n"
3465 " Then execution is resumed. It will probably later hit the\n"
3466 " breakpoint just set.\n"
3467 "continue / cont / c\n"
3468 " Continue execution from here.\n"
3469 "disassemble / disasm / di\n"
3470 " disassemble <n> <address>\n"
3471 " Disassemble <n> instructions from current <address>.\n"
3472 " By default <n> is 20 and <address> is the current pc.\n"
3474 " print <register>\n"
3475 " Print the content of a register.\n"
3476 " 'print all' will print all registers.\n"
3477 " Use 'printobject' to get more details about the value.\n"
3478 "printobject / po\n"
3479 " printobject <value>\n"
3480 " printobject <register>\n"
3481 " Print details about the value.\n"
3483 " stack [<words>]\n"
3484 " Dump stack content, default dump 10 words\n"
3486 " mem <address> [<words>]\n"
3487 " Dump memory content, default dump 10 words\n"
3489 " Toggle disassembly and register tracing\n"
3491 " break : list all breakpoints\n"
3492 " break <address> : set / enable / disable a breakpoint.\n"
3496 " Print all system registers (including NZCV).\n");
3498 PrintF(
"Unknown command: %s\n", cmd);
3499 PrintF(
"Use 'help' for more information.\n");
3502 if (cleared_log_disasm_bit ==
true) {
3503 set_log_parameters(log_parameters_ |
LOG_DISASM);
3509 void Simulator::VisitException(Instruction* instr) {
3515 uint32_t parameters;
3522 sizeof(parameters));
3524 reinterpret_cast<char const*
>(
3530 if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters &
BREAK)) {
3531 if (message !=
NULL) {
3532 PrintF(
"%sDebugger hit %d: %s%s%s\n",
3539 PrintF(
"%sDebugger hit %d.%s\n",
3549 set_log_parameters(log_parameters() | parameters);
3550 if (parameters &
LOG_SYS_REGS) { PrintSystemRegisters(); }
3551 if (parameters &
LOG_REGS) { PrintRegisters(); }
3552 if (parameters &
LOG_FP_REGS) { PrintFPRegisters(); }
3555 set_log_parameters(log_parameters() & ~parameters);
3558 set_log_parameters(parameters);
3564 parameters &= ~log_parameters();
3566 if (parameters & LOG_SYS_REGS) PrintSystemRegisters(
true);
3567 if (parameters & LOG_REGS) PrintRegisters(
true);
3568 if (parameters & LOG_FP_REGS) PrintFPRegisters(
true);
3574 pc_ = pc_->InstructionAtOffset(
RoundUp(size, kInstructionSize));
3579 set_pc(pc_->following());
3582 if (parameters & BREAK) Debug();
3585 DoRuntimeCall(instr);
3593 const char* format = reg<const char*>(0);
3600 result = fprintf(
stream_, format,
3601 xreg(1), xreg(2), xreg(3), xreg(4),
3602 xreg(5), xreg(6), xreg(7));
3604 result = fprintf(
stream_, format,
3605 dreg(0), dreg(1), dreg(2), dreg(3),
3606 dreg(4), dreg(5), dreg(6), dreg(7));
3609 result = fprintf(
stream_,
"%s", format);
3614 CorruptAllCallerSavedCPURegisters();
3617 set_xreg(0, result);
3626 fprintf(
stream_,
"Hit UNREACHABLE marker at PC=%p.\n",
3627 reinterpret_cast<void*>(pc_));
3641 #endif // USE_SIMULATOR
3645 #endif // V8_TARGET_ARCH_ARM64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const intptr_t kSmiTagMask
#define CHECK_EQ(expected, value)
const RegList kCallerSaved
const unsigned kDebugMessageOffset
const Instr kImmExceptionIsRedirectedCall
void PrintF(const char *format,...)
const LowDwVfpRegister d0
const unsigned kDRegSizeInBits
const unsigned kZeroRegCode
int CountLeadingZeros(uint64_t value, int width)
const unsigned kXRegSizeInBits
int CountLeadingSignBits(int64_t value, int width)
kSerializedDataOffset Object
TypeImpl< ZoneTypeConfig > Type
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
#define ASSERT(condition)
const unsigned kDebuggerTracingDirectivesMask
char * ReadLine(const char *prompt)
const int kNumberOfCalleeSavedRegisters
const Instr kImmExceptionIsPrintf
bool is_intn(int64_t x, unsigned n)
const unsigned kWRegSizeInBits
const uint32_t kSlotsZapValue
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static Instr ImmException(int imm16)
bool IsSignallingNaN(double num)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
#define OFFSET_OF(type, field)
double FusedMultiplyAdd(double op1, double op2, double a)
const unsigned kPrintfLength
const unsigned kInstructionSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
const unsigned kDebugParamsOffset
const unsigned kSRegSizeInBits
const RegList kCalleeSaved
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static void VPrint(const char *format, va_list args)
const unsigned kSPRegInternalCode
T RoundUp(T x, intptr_t m)
const unsigned kNumberOfFPRegisters
#define T(name, string, precedence)
const unsigned kNumberOfRegisters
const unsigned kRegCodeMask
const Instr kImmExceptionIsUnreachable
const unsigned kDebugCodeOffset
#define ASSERT_EQ(v1, v2)
StringCharacterStream *const stream_
const unsigned kPrintfTypeOffset
static int ActivationFrameAlignment()
const int kNumberOfCalleeSavedFPRegisters
double ToQuietNaN(double num)
void DeleteArray(T *array)
const int64_t kHalfWordMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
const Instr kImmExceptionIsDebug
uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x)
bool is_uintn(int64_t x, unsigned n)