35 using namespace v8::internal;
42 if (result != expected) {
43 printf(
"Expected 0x%08" PRIx32
"\t Found 0x%08" PRIx32
"\n",
47 return expected == result;
52 if (result != expected) {
53 printf(
"Expected 0x%016" PRIx64
"\t Found 0x%016" PRIx64
"\n",
57 return expected == result;
62 if (float_to_rawbits(expected) == float_to_rawbits(result)) {
65 if (
std::isnan(expected) || (expected == 0.0)) {
66 printf(
"Expected 0x%08" PRIx32
"\t Found 0x%08" PRIx32
"\n",
67 float_to_rawbits(expected), float_to_rawbits(result));
69 printf(
"Expected %.9f (0x%08" PRIx32
")\t "
70 "Found %.9f (0x%08" PRIx32
")\n",
71 expected, float_to_rawbits(expected),
72 result, float_to_rawbits(result));
80 if (double_to_rawbits(expected) == double_to_rawbits(result)) {
84 if (
std::isnan(expected) || (expected == 0.0)) {
85 printf(
"Expected 0x%016" PRIx64
"\t Found 0x%016" PRIx64
"\n",
86 double_to_rawbits(expected), double_to_rawbits(result));
88 printf(
"Expected %.17f (0x%016" PRIx64
")\t "
89 "Found %.17f (0x%016" PRIx64
")\n",
90 expected, double_to_rawbits(expected),
91 result, double_to_rawbits(result));
101 int64_t result_x = core->
xreg(reg.
code());
102 if ((result_x & 0xffffffff00000000L) != 0) {
103 printf(
"Expected 0x%08" PRIx32
"\t Found 0x%016" PRIx64
"\n",
107 uint32_t result_w = core->
wreg(reg.
code());
108 return Equal32(expected, core, result_w);
116 uint64_t result = core->
xreg(reg.
code());
117 return Equal64(expected, core, result);
128 if ((result_64 & 0xffffffff00000000L) != 0) {
129 printf(
"Expected 0x%08" PRIx32
" (%f)\t Found 0x%016" PRIx64
"\n",
130 float_to_rawbits(expected), expected, result_64);
150 int64_t expected = core->
xreg(reg0.
code());
151 int64_t result = core->
xreg(reg1.
code());
152 return Equal64(expected, core, result);
156 static char FlagN(uint32_t
flags) {
157 return (flags &
NFlag) ?
'N' :
'n';
161 static char FlagZ(uint32_t flags) {
162 return (flags &
ZFlag) ?
'Z' :
'z';
166 static char FlagC(uint32_t flags) {
167 return (flags &
CFlag) ?
'C' :
'c';
171 static char FlagV(uint32_t flags) {
172 return (flags &
VFlag) ?
'V' :
'v';
179 if (result != expected) {
180 printf(
"Expected: %c%c%c%c\t Found: %c%c%c%c\n",
181 FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
182 FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
193 printf(
"x%d\t Expected 0x%016" PRIx64
"\t Found 0x%016" PRIx64
"\n",
202 if (a_bits != b_bits) {
203 printf(
"d%d\t Expected 0x%016" PRIx64
"\t Found 0x%016" PRIx64
"\n",
214 int reg_size,
int reg_count,
RegList allowed) {
218 if (((1UL << n) & allowed) != 0) {
241 int reg_size,
int reg_count,
RegList allowed) {
245 if (((1UL << n) & allowed) != 0) {
270 if (reg_list & (1UL << i)) {
293 if (reg_list & (1UL << i)) {
340 const int x_offset = offsetof(dump_t, x_);
341 const int w_offset = offsetof(dump_t, w_);
342 const int d_offset = offsetof(dump_t, d_);
343 const int s_offset = offsetof(dump_t, s_);
344 const int sp_offset = offsetof(dump_t, sp_);
345 const int wsp_offset = offsetof(dump_t, wsp_);
346 const int flags_offset = offsetof(dump_t, flags_);
348 __ Push(xzr, dump_base, dump, tmp);
351 __ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
363 __ Add(dump, dump_base, x_offset);
370 __ Add(dump, dump_base, w_offset);
377 __ Add(dump, dump_base, d_offset);
384 __ Add(dump, dump_base, s_offset);
402 __ Mov(dump2_base, dump_base);
404 __ Pop(tmp, dump, dump_base, xzr);
406 __ Add(dump2, dump2_base, w_offset);
411 __ Add(dump2, dump2_base, x_offset);
static FPRegister Create(unsigned code, unsigned size)
int64_t xreg(unsigned code) const
float sreg(unsigned code) const
const unsigned kDRegSizeInBits
void Dump(MacroAssembler *assm)
static FPRegister SRegFromCode(unsigned code)
const unsigned kXRegSizeInBits
static Register Create(unsigned code, unsigned size)
bool EqualFP32(float expected, const RegisterDump *, float result)
uint64_t dreg_bits(unsigned code) const
#define ASSERT(condition)
int CountSetBits(uint64_t value, int width)
const unsigned kWRegSizeInBits
bool EqualRegisters(const RegisterDump *a, const RegisterDump *b)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
double dreg(unsigned code) const
const unsigned kSRegSizeInBits
static Register WRegFromCode(unsigned code)
const unsigned kNumberOfFPRegisters
void ClobberFP(MacroAssembler *masm, RegList reg_list, double const value)
void set_list(RegList new_list)
bool Equal32(uint32_t expected, const RegisterDump *, uint32_t result)
bool EqualFP64(double expected, const RegisterDump *, double result)
bool EqualNzcv(uint32_t expected, uint32_t result)
const unsigned kNumberOfRegisters
static FPRegister DRegFromCode(unsigned code)
static Register XRegFromCode(unsigned code)
CPURegister::RegisterType type() const
int32_t wreg(unsigned code) const
RegList PopulateRegisterArray(Register *w, Register *x, Register *r, int reg_size, int reg_count, RegList allowed)
void Clobber(MacroAssembler *masm, RegList reg_list, uint64_t const value)
RegList PopulateFPRegisterArray(FPRegister *s, FPRegister *d, FPRegister *v, int reg_size, int reg_count, RegList allowed)
bool Is(const CPURegister &other) const
bool Equal64(uint64_t expected, const RegisterDump *, uint64_t result)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)