30 #if V8_TARGET_ARCH_ARM64
39 #define __ ACCESS_MASM(masm)
41 #if defined(USE_SIMULATOR)
42 byte* fast_exp_arm64_machine_code =
NULL;
43 double fast_exp_simulator(
double x) {
44 Simulator * simulator = Simulator::current(Isolate::Current());
45 Simulator::CallArgument args[] = {
46 Simulator::CallArgument(x),
47 Simulator::CallArgument::End()
49 return simulator->CallDouble(fast_exp_arm64_machine_code, args);
55 if (!FLAG_fast_math)
return &std::exp;
62 if (buffer ==
NULL)
return &std::exp;
64 ExternalReference::InitializeMathExpData();
65 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
66 masm.SetStackPointer(csp);
79 double_temp1, double_temp2,
82 masm.Fmov(
d0, result);
87 ASSERT(!RelocInfo::RequiresRelocation(desc));
89 CPU::FlushICache(buffer, actual_size);
92 #if !defined(USE_SIMULATOR)
93 return FUNCTION_CAST<UnaryMathFunction>(buffer);
95 fast_exp_arm64_machine_code = buffer;
96 return &fast_exp_simulator;
111 ASSERT(!masm->has_frame());
112 masm->set_has_frame(
true);
118 ASSERT(masm->has_frame());
119 masm->set_has_frame(
false);
128 Label* allocation_memento_found) {
133 Register receiver = x2;
138 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
139 allocation_memento_found);
144 __ RecordWriteField(receiver,
157 ASM_LOCATION(
"ElementsTransitionGenerator::GenerateSmiToDouble");
165 Register receiver = x2;
166 Register target_map = x3;
168 Label gc_required, only_change_map;
171 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
176 Register elements = x4;
178 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
181 Register length = x5;
186 Register array_size = x6;
194 Register map_root = x6;
195 __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
196 __ SmiTag(x11, length);
213 Register src_elements = x10;
214 Register dst_elements = x11;
215 Register dst_end = x12;
220 FPRegister nan_d =
d1;
226 __ Bind(&only_change_map);
234 __ Bind(&gc_required);
250 __ Cmp(dst_elements, dst_end);
261 ASM_LOCATION(
"ElementsTransitionGenerator::GenerateDoubleToObject");
272 Register receiver = x2;
273 Register target_map = x3;
276 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
281 Label only_change_map;
282 Register elements = x4;
284 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
289 __ Push(target_map, receiver, key, value);
290 Register length = x5;
295 Register array_size = x6;
303 Register map_root = x6;
304 __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
305 __ SmiTag(x11, length);
310 Register src_elements = x10;
311 Register dst_elements = x11;
312 Register dst_end = x12;
313 __ Add(src_elements, elements,
319 Register the_hole = x14;
320 Register heap_num_map = x15;
321 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
322 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
328 __ Bind(&gc_required);
329 __ Pop(value, key, receiver, target_map);
334 Label loop, convert_hole;
338 __ B(
eq, &convert_hole);
341 Register heap_num = x5;
342 __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
344 __ Mov(x13, dst_elements);
352 __ Bind(&convert_hole);
356 __ Cmp(dst_elements, dst_end);
360 __ Pop(value, key, receiver, target_map);
368 __ Bind(&only_change_map);
381 void Code::GetCodeAgeAndParity(
byte* sequence, Age* age,
387 byte* target = sequence + kCodeAgeStubEntryOffset;
389 GetCodeAgeAndParity(stub, age, parity);
394 void Code::PatchPlatformCodeAge(Isolate* isolate,
398 PatchingAssembler patcher(sequence, kCodeAgeSequenceSize /
kInstructionSize);
402 Code * stub = GetCodeAgeStub(isolate, age, parity);
412 Label* call_runtime) {
413 ASSERT(
string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
419 Label check_sequential;
427 Label indirect_string_loaded;
431 __ Add(index, index, result.W());
432 __ B(&indirect_string_loaded);
439 __ Bind(&cons_string);
441 __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
445 __ Bind(&indirect_string_loaded);
452 Label external_string, check_encoding;
453 __ Bind(&check_sequential);
460 __ B(&check_encoding);
463 __ Bind(&external_string);
464 if (FLAG_debug_code) {
468 __ Assert(
eq, kExternalStringExpectedButNotFound);
475 __ B(
ne, call_runtime);
479 __ Bind(&check_encoding);
492 static MemOperand ExpConstant(Register base,
int index) {
510 double_temp1, double_temp2,
511 temp1, temp2, temp3));
512 ASSERT(ExternalReference::math_exp_constants(0).address() !=
NULL);
516 Register constants = temp3;
522 __ Mov(constants, ExternalReference::math_exp_constants(0));
529 Label result_is_finite_non_zero;
533 ExpConstant(constants, 0).offset()));
534 __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
536 __ Fcmp(input, double_temp1);
547 __ B(&result_is_finite_non_zero,
mi);
550 __ Ldr(double_temp2, ExpConstant(constants, 2));
553 __ Fcsel(result, fp_zero, double_temp2,
lo);
555 __ Fcsel(result, result, input,
vc);
559 __ Bind(&result_is_finite_non_zero);
563 ExpConstant(constants, 3).offset()));
564 __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
565 __ Fmadd(double_temp1, double_temp1, input, double_temp3);
566 __ Fmov(temp2.W(), double_temp1.S());
567 __ Fsub(double_temp1, double_temp1, double_temp3);
571 ExpConstant(constants, 5).offset()));
572 __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
574 __ Fmul(double_temp1, double_temp1, double_temp2);
575 __ Fsub(double_temp1, double_temp1, input);
577 __ Fmul(double_temp2, double_temp1, double_temp1);
578 __ Fsub(double_temp3, double_temp3, double_temp1);
579 __ Fmul(double_temp3, double_temp3, double_temp2);
581 __ Mov(temp1.W(), Operand(temp2.W(),
LSR, 11));
583 __ Ldr(double_temp2, ExpConstant(constants, 7));
585 __ Fmul(double_temp3, double_temp3, double_temp2);
586 __ Fsub(double_temp3, double_temp3, double_temp1);
591 __ Fmov(double_temp2, 1.0);
592 __ Fadd(double_temp3, double_temp3, double_temp2);
594 __ And(temp2, temp2, 0x7ff);
595 __ Add(temp1, temp1, 0x3ff);
598 __ Mov(temp3, ExternalReference::math_exp_log_table());
602 __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(),
LSL, 20));
603 __ Bfi(temp2, temp1, 32, 32);
604 __ Fmov(double_temp1, temp2);
606 __ Fmul(result, double_temp3, double_temp1);
615 #endif // V8_TARGET_ARCH_ARM64
static const int kResourceDataOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const int kDoubleSizeLog2
const uint32_t kTwoByteStringTag
const LowDwVfpRegister d0
#define ASM_LOCATION(message)
#define ASSERT(condition)
virtual void AfterCall(MacroAssembler *masm) const
const int kPointerSizeLog2
double(* UnaryMathFunction)(double x)
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
const uint32_t kShortExternalStringMask
UnaryMathFunction CreateExpFunction()
static const int kFirstOffset
static const int kParentOffset
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const uint64_t kHoleNanInt64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
const unsigned kDRegSizeLog2
static Address & Address_at(Address addr)
const unsigned kInstructionSize
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
const uint32_t kShortExternalStringTag
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
const LowDwVfpRegister d2
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
const LowDwVfpRegister d1
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static bool IsYoungSequence(byte *sequence)
void EmitFrameSetupForCodeAgePatching()
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
static const int kInstanceTypeOffset