30 #if V8_TARGET_ARCH_ARM
43 #if defined(USE_SIMULATOR)
44 byte* fast_exp_arm_machine_code =
NULL;
45 double fast_exp_simulator(
double x) {
46 return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
47 fast_exp_arm_machine_code, x, 0);
53 if (!FLAG_fast_math)
return &std::exp;
56 if (buffer ==
NULL)
return &std::exp;
57 ExternalReference::InitializeMathExpData();
59 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
62 DwVfpRegister input =
d0;
63 DwVfpRegister result =
d1;
64 DwVfpRegister double_scratch1 =
d2;
65 DwVfpRegister double_scratch2 =
d3;
70 if (masm.use_eabi_hardfloat()) {
75 __ Push(temp3, temp2, temp1);
77 &masm, input, result, double_scratch1, double_scratch2,
79 __ Pop(temp3, temp2, temp1);
80 if (masm.use_eabi_hardfloat()) {
90 ASSERT(!RelocInfo::RequiresRelocation(desc));
92 CPU::FlushICache(buffer, actual_size);
95 #if !defined(USE_SIMULATOR)
96 return FUNCTION_CAST<UnaryMathFunction>(buffer);
98 fast_exp_arm_machine_code = buffer;
99 return &fast_exp_simulator;
103 #if defined(V8_HOST_ARCH_ARM)
104 OS::MemCopyUint8Function CreateMemCopyUint8Function(
105 OS::MemCopyUint8Function stub) {
106 #if defined(USE_SIMULATOR)
114 if (buffer ==
NULL)
return stub;
116 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
125 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
126 Label size_less_than_8;
129 __ cmp(chars, Operand(8));
130 __ b(
lt, &size_less_than_8);
131 __ cmp(chars, Operand(32));
136 __ cmp(chars, Operand(64));
142 __ cmp(chars, Operand(128));
152 __ cmp(chars, Operand(256));
154 __ sub(chars, chars, Operand(256));
163 __ sub(chars, chars, Operand(64),
SetCC);
167 __ add(chars, chars, Operand(256));
172 __ sub(chars, chars, Operand(128));
179 __ cmp(chars, Operand(64));
185 __ sub(chars, chars, Operand(64));
190 __ cmp(chars, Operand(32));
194 __ sub(chars, chars, Operand(32));
197 __ cmp(chars, Operand(16));
198 __ b(
le, &_16_or_less);
201 __ sub(chars, chars, Operand(16));
203 __ bind(&_16_or_less);
204 __ cmp(chars, Operand(8));
205 __ b(
le, &_8_or_less);
208 __ sub(chars, chars, Operand(8));
211 __ bind(&_8_or_less);
212 __ rsb(chars, chars, Operand(8));
213 __ sub(src, src, Operand(chars));
214 __ sub(dest, dest, Operand(chars));
215 __ vld1(
Neon8, NeonListOperand(
d0), NeonMemOperand(src));
216 __ vst1(
Neon8, NeonListOperand(
d0), NeonMemOperand(dest));
220 __ bind(&size_less_than_8);
222 __ bic(temp1, chars, Operand(0x3),
SetCC);
230 __ bic(temp2, chars, Operand(0x3),
SetCC);
232 __ add(temp2, dest, temp2);
242 __ mov(chars, Operand(chars,
LSL, 31),
SetCC);
252 ASSERT(!RelocInfo::RequiresRelocation(desc));
254 CPU::FlushICache(buffer, actual_size);
256 return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
262 OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
263 OS::MemCopyUint16Uint8Function stub) {
264 #if defined(USE_SIMULATOR)
272 if (buffer ==
NULL)
return stub;
274 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
283 __ bic(temp, chars, Operand(0x7));
284 __ sub(chars, chars, Operand(temp));
285 __ add(temp, dest, Operand(temp,
LSL, 1));
295 __ rsb(chars, chars, Operand(8));
296 __ sub(src, src, Operand(chars));
297 __ sub(dest, dest, Operand(chars,
LSL, 1));
298 __ vld1(
Neon8, NeonListOperand(
d0), NeonMemOperand(src));
300 __ vst1(
Neon16, NeonListOperand(
d0, 2), NeonMemOperand(dest));
311 __ bic(temp2, chars, Operand(0x3));
312 __ add(temp2, dest, Operand(temp2,
LSL, 1));
316 __ uxtb16(temp3, Operand(temp1,
ROR, 0));
317 __ uxtb16(temp4, Operand(temp1,
ROR, 8));
318 __ pkhbt(temp1, temp3, Operand(temp4,
LSL, 16));
320 __ pkhtb(temp1, temp4, Operand(temp3,
ASR, 16));
322 __ add(dest, dest, Operand(8));
326 __ mov(chars, Operand(chars,
LSL, 31),
SetCC);
329 __ uxtb(temp3, Operand(temp1,
ROR, 8));
330 __ mov(temp3, Operand(temp3,
LSL, 16));
331 __ uxtab(temp3, temp3, Operand(temp1,
ROR, 0));
342 CPU::FlushICache(buffer, actual_size);
345 return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
351 #if defined(USE_SIMULATOR)
356 if (buffer ==
NULL)
return &std::sqrt;
358 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
360 __ MovFromFloatParameter(
d0);
362 __ MovToFloatResult(
d0);
367 ASSERT(!RelocInfo::RequiresRelocation(desc));
369 CPU::FlushICache(buffer, actual_size);
371 return FUNCTION_CAST<UnaryMathFunction>(buffer);
383 ASSERT(!masm->has_frame());
384 masm->set_has_frame(
true);
390 ASSERT(masm->has_frame());
391 masm->set_has_frame(
false);
398 #define __ ACCESS_MASM(masm)
402 Label* allocation_memento_found) {
413 __ JumpIfJSArrayHasAllocationMemento(
r2,
r4, allocation_memento_found);
418 __ RecordWriteField(
r2,
439 Label loop, entry, convert_hole, gc_required, only_change_map, done;
442 __ JumpIfJSArrayHasAllocationMemento(
r2,
r4, fail);
448 __ CompareRoot(
r4, Heap::kEmptyFixedArrayRootIndex);
449 __ b(
eq, &only_change_map);
465 __ LoadRoot(
r9, Heap::kFixedDoubleArrayMapRootIndex);
471 __ RecordWriteField(
r2,
482 __ RecordWriteField(
r2,
505 __ bind(&only_change_map);
507 __ RecordWriteField(
r2,
518 __ bind(&gc_required);
526 __ UntagAndJumpIfNotSmi(
lr,
lr, &convert_hole);
532 __ add(
r9,
r9, Operand(8));
536 __ bind(&convert_hole);
537 if (FLAG_debug_code) {
540 __ orr(
lr,
lr, Operand(1));
541 __ CompareRoot(
lr, Heap::kTheHoleValueRootIndex);
542 __ Assert(
eq, kObjectFoundInSmiOnlyArray);
565 Label entry, loop, convert_hole, gc_required, only_change_map;
568 __ JumpIfJSArrayHasAllocationMemento(
r2,
r4, fail);
574 __ CompareRoot(
r4, Heap::kEmptyFixedArrayRootIndex);
575 __ b(
eq, &only_change_map);
589 __ LoadRoot(
r9, Heap::kFixedArrayMapRootIndex);
598 __ LoadRoot(
r9, Heap::kHeapNumberMapRootIndex);
608 __ bind(&gc_required);
618 __ b(
eq, &convert_hole);
621 __ AllocateHeapNumber(
r2,
r0,
lr,
r9, &gc_required);
637 __ bind(&convert_hole);
638 __ LoadRoot(
r0, Heap::kTheHoleValueRootIndex);
648 __ RecordWriteField(
r2,
658 __ bind(&only_change_map);
661 __ RecordWriteField(
r2,
676 Label* call_runtime) {
682 Label check_sequential;
684 __ b(
eq, &check_sequential);
689 __ b(
eq, &cons_string);
692 Label indirect_string_loaded;
695 __ add(index, index, Operand::SmiUntag(result));
696 __ jmp(&indirect_string_loaded);
703 __ bind(&cons_string);
705 __ CompareRoot(result, Heap::kempty_stringRootIndex);
706 __ b(
ne, call_runtime);
710 __ bind(&indirect_string_loaded);
717 Label external_string, check_encoding;
718 __ bind(&check_sequential);
721 __ b(
ne, &external_string);
728 __ jmp(&check_encoding);
731 __ bind(&external_string);
732 if (FLAG_debug_code) {
736 __ Assert(
eq, kExternalStringExpectedButNotFound);
741 __ b(
ne, call_runtime);
745 __ bind(&check_encoding);
759 static MemOperand ExpConstant(
int index, Register base) {
766 DwVfpRegister result,
767 DwVfpRegister double_scratch1,
768 DwVfpRegister double_scratch2,
772 ASSERT(!input.is(result));
773 ASSERT(!input.is(double_scratch1));
774 ASSERT(!input.is(double_scratch2));
775 ASSERT(!result.is(double_scratch1));
776 ASSERT(!result.is(double_scratch2));
777 ASSERT(!double_scratch1.is(double_scratch2));
781 ASSERT(ExternalReference::math_exp_constants(0).address() !=
NULL);
783 Label
zero, infinity, done;
785 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
787 __ vldr(double_scratch1, ExpConstant(0, temp3));
788 __ VFPCompareAndSetFlags(double_scratch1, input);
791 __ vldr(double_scratch2, ExpConstant(1, temp3));
792 __ VFPCompareAndSetFlags(input, double_scratch2);
795 __ vldr(double_scratch1, ExpConstant(3, temp3));
796 __ vldr(result, ExpConstant(4, temp3));
797 __ vmul(double_scratch1, double_scratch1, input);
798 __ vadd(double_scratch1, double_scratch1, result);
799 __ VmovLow(temp2, double_scratch1);
800 __ vsub(double_scratch1, double_scratch1, result);
801 __ vldr(result, ExpConstant(6, temp3));
802 __ vldr(double_scratch2, ExpConstant(5, temp3));
803 __ vmul(double_scratch1, double_scratch1, double_scratch2);
804 __ vsub(double_scratch1, double_scratch1, input);
805 __ vsub(result, result, double_scratch1);
806 __ vmul(double_scratch2, double_scratch1, double_scratch1);
807 __ vmul(result, result, double_scratch2);
808 __ vldr(double_scratch2, ExpConstant(7, temp3));
809 __ vmul(result, result, double_scratch2);
810 __ vsub(result, result, double_scratch1);
812 ASSERT(*reinterpret_cast<double*>
813 (ExternalReference::math_exp_constants(8).address()) == 1);
814 __ vmov(double_scratch2, 1);
815 __ vadd(result, result, double_scratch2);
816 __ mov(temp1, Operand(temp2,
LSR, 11));
817 __ Ubfx(temp2, temp2, 0, 11);
818 __ add(temp1, temp1, Operand(0x3ff));
821 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
822 __ add(temp3, temp3, Operand(temp2,
LSL, 3));
823 __ ldm(
ia, temp3, temp2.bit() | temp3.bit());
825 if (temp2.code() < temp3.code()) {
826 __ orr(temp1, temp3, Operand(temp1,
LSL, 20));
827 __ vmov(double_scratch1, temp2, temp1);
829 __ orr(temp1, temp2, Operand(temp1,
LSL, 20));
830 __ vmov(double_scratch1, temp3, temp1);
832 __ vmul(result, result, double_scratch1);
840 __ vldr(result, ExpConstant(2, temp3));
849 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
852 static byte* GetNoCodeAgeSequence(uint32_t* length) {
855 static bool initialized =
false;
856 static uint32_t sequence[kNoCodeAgeSequenceLength];
857 byte* byte_sequence =
reinterpret_cast<byte*
>(sequence);
862 SmartPointer<CodePatcher>
863 patcher(
new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
864 PredictableCodeSizeScope scope(patcher->masm(), *length);
865 patcher->masm()->PushFixedFrame(
r1);
866 patcher->masm()->nop(
ip.
code());
867 patcher->masm()->add(
871 return byte_sequence;
876 uint32_t young_length;
877 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
878 bool result = !memcmp(sequence, young_sequence, young_length);
885 void Code::GetCodeAgeAndParity(
byte* sequence, Age* age,
892 sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
894 GetCodeAgeAndParity(stub, age, parity);
899 void Code::PatchPlatformCodeAge(Isolate* isolate,
903 uint32_t young_length;
904 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
906 CopyBytes(sequence, young_sequence, young_length);
907 CPU::FlushICache(sequence, young_length);
909 Code* stub = GetCodeAgeStub(isolate, age, parity);
910 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
911 patcher.masm()->add(
r0,
pc, Operand(-8));
913 patcher.masm()->emit_code_stub_address(stub);
920 #endif // V8_TARGET_ARCH_ARM
static const int kResourceDataOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const uint32_t kTwoByteStringTag
const LowDwVfpRegister d0
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
const uint32_t kShortExternalStringMask
UnaryMathFunction CreateExpFunction()
static const int kFirstOffset
static const int kParentOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const LowDwVfpRegister d4
static const int kValueOffset
const uint32_t kHoleNanUpper32
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
static Address & Address_at(Address addr)
const uint32_t kHoleNanLower32
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
const uint32_t kShortExternalStringTag
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
static const int kFixedFrameSizeFromFp
const LowDwVfpRegister d2
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
static const int kInstrSize
static uint32_t & uint32_at(Address addr)
static unsigned cache_line_size()
const LowDwVfpRegister d1
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset