30 #if V8_TARGET_ARCH_MIPS
43 #if defined(USE_SIMULATOR)
44 byte* fast_exp_mips_machine_code =
NULL;
45 double fast_exp_simulator(
double x) {
46 return Simulator::current(Isolate::Current())->CallFP(
47 fast_exp_mips_machine_code, x, 0);
53 if (!FLAG_fast_math)
return &std::exp;
56 if (buffer ==
NULL)
return &std::exp;
57 ExternalReference::InitializeMathExpData();
59 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
73 __ Move(input, a0, a1);
75 __ Push(temp3, temp2, temp1);
77 &masm, input, result, double_scratch1, double_scratch2,
79 __ Pop(temp3, temp2, temp1);
83 __ Move(v0, v1, result);
90 ASSERT(!RelocInfo::RequiresRelocation(desc));
92 CPU::FlushICache(buffer, actual_size);
95 #if !defined(USE_SIMULATOR)
96 return FUNCTION_CAST<UnaryMathFunction>(buffer);
98 fast_exp_mips_machine_code = buffer;
99 return &fast_exp_simulator;
104 #if defined(V8_HOST_ARCH_MIPS)
105 OS::MemCopyUint8Function CreateMemCopyUint8Function(
106 OS::MemCopyUint8Function stub) {
107 #if defined(USE_SIMULATOR)
116 if (buffer ==
NULL)
return stub;
120 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
123 Label lastb, unaligned, aligned, chkw,
124 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
125 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
126 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
129 uint32_t pref_chunk = 32;
134 uint32_t max_pref_size = 128;
135 ASSERT(pref_chunk < max_pref_size);
140 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
143 uint32_t loadstore_chunk = 4;
149 pref_chunk * 4 >= max_pref_size);
153 __ slti(t2, a2, 2 * loadstore_chunk);
154 __ bne(t2, zero_reg, &lastb);
162 __ andi(t8, t8, loadstore_chunk - 1);
163 __ bne(t8, zero_reg, &unaligned);
164 __ subu(a3, zero_reg, a0);
166 __ andi(a3, a3, loadstore_chunk - 1);
167 __ beq(a3, zero_reg, &aligned);
180 __ andi(t8, a2, 0x3f);
181 __ beq(a2, t8, &chkw);
192 __ Subu(t9, t0, pref_limit);
195 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
196 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
197 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
198 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
201 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
202 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
203 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
214 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
215 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
224 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
243 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
253 __ addiu(a0, a0, 16 * loadstore_chunk);
254 __ bne(a0, a3, &loop16w);
255 __ addiu(a1, a1, 16 * loadstore_chunk);
262 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
263 __ andi(t8, a2, 0x1f);
264 __ beq(a2, t8, &chk1w);
274 __ addiu(a1, a1, 8 * loadstore_chunk);
283 __ addiu(a0, a0, 8 * loadstore_chunk);
291 __ andi(a2, t8, loadstore_chunk - 1);
292 __ beq(a2, t8, &lastb);
296 __ bind(&wordCopy_loop);
298 __ addiu(a0, a0, loadstore_chunk);
299 __ addiu(a1, a1, loadstore_chunk);
300 __ bne(a0, a3, &wordCopy_loop);
304 __ Branch(&leave,
le, a2, Operand(zero_reg));
311 __ bne(a0, a3, &lastbloop);
322 __ andi(a3, a3, loadstore_chunk - 1);
323 __ beq(a3, zero_reg, &ua_chk16w);
338 __ andi(t8, a2, 0x3f);
339 __ beq(a2, t8, &ua_chkw);
345 __ Subu(t9, t0, pref_limit);
348 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
349 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
350 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
353 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
354 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
355 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
358 __ bind(&ua_loop16w);
359 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
370 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
371 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
373 __ bind(&ua_skip_pref);
394 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
427 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
436 __ addiu(a0, a0, 16 * loadstore_chunk);
437 __ bne(a0, a3, &ua_loop16w);
438 __ addiu(a1, a1, 16 * loadstore_chunk);
446 __ andi(t8, a2, 0x1f);
448 __ beq(a2, t8, &ua_chk1w);
474 __ addiu(a1, a1, 8 * loadstore_chunk);
483 __ addiu(a0, a0, 8 * loadstore_chunk);
488 __ andi(a2, t8, loadstore_chunk - 1);
489 __ beq(a2, t8, &ua_smallCopy);
493 __ bind(&ua_wordCopy_loop);
497 __ addiu(a0, a0, loadstore_chunk);
498 __ addiu(a1, a1, loadstore_chunk);
499 __ bne(a0, a3, &ua_wordCopy_loop);
503 __ bind(&ua_smallCopy);
504 __ beq(a2, zero_reg, &leave);
507 __ bind(&ua_smallCopy_loop);
511 __ bne(a0, a3, &ua_smallCopy_loop);
519 ASSERT(!RelocInfo::RequiresRelocation(desc));
521 CPU::FlushICache(buffer, actual_size);
523 return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
529 #if defined(USE_SIMULATOR)
534 if (buffer ==
NULL)
return &std::sqrt;
536 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
538 __ MovFromFloatParameter(
f12);
540 __ MovToFloatResult(
f0);
545 ASSERT(!RelocInfo::RequiresRelocation(desc));
547 CPU::FlushICache(buffer, actual_size);
549 return FUNCTION_CAST<UnaryMathFunction>(buffer);
561 ASSERT(!masm->has_frame());
562 masm->set_has_frame(
true);
568 ASSERT(masm->has_frame());
569 masm->set_has_frame(
false);
576 #define __ ACCESS_MASM(masm)
580 Label* allocation_memento_found) {
591 __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
596 __ RecordWriteField(a2,
617 Label loop, entry, convert_hole, gc_required, only_change_map, done;
619 Register scratch = t6;
622 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
628 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
629 __ Branch(&only_change_map,
eq, at, Operand(t0));
637 __ sll(scratch, t1, 2);
643 __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
649 __ RecordWriteField(a2,
660 __ RecordWriteField(a2,
684 __ bind(&only_change_map);
686 __ RecordWriteField(a2,
697 __ bind(&gc_required);
706 __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
717 __ bind(&convert_hole);
718 if (FLAG_debug_code) {
721 __ Or(t5, t5, Operand(1));
722 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
723 __ Assert(
eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
730 __ Branch(&loop,
lt, t3, Operand(t2));
747 Label entry, loop, convert_hole, gc_required, only_change_map;
750 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757 __ Branch(&only_change_map,
eq, at, Operand(t0));
759 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
771 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
781 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
782 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
793 __ bind(&gc_required);
794 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
806 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
824 __ bind(&convert_hole);
829 __ Branch(&loop,
lt, a3, Operand(t1));
831 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
834 __ RecordWriteField(a2,
844 __ bind(&only_change_map);
847 __ RecordWriteField(a2,
862 Label* call_runtime) {
868 Label check_sequential;
870 __ Branch(&check_sequential,
eq, at, Operand(zero_reg));
875 __ Branch(&cons_string,
eq, at, Operand(zero_reg));
878 Label indirect_string_loaded;
882 __ Addu(index, index, at);
883 __ jmp(&indirect_string_loaded);
890 __ bind(&cons_string);
892 __ LoadRoot(at, Heap::kempty_stringRootIndex);
893 __ Branch(call_runtime,
ne, result, Operand(at));
897 __ bind(&indirect_string_loaded);
904 Label external_string, check_encoding;
905 __ bind(&check_sequential);
908 __ Branch(&external_string,
ne, at, Operand(zero_reg));
915 __ jmp(&check_encoding);
918 __ bind(&external_string);
919 if (FLAG_debug_code) {
923 __ Assert(
eq, kExternalStringExpectedButNotFound,
924 at, Operand(zero_reg));
929 __ Branch(call_runtime,
ne, at, Operand(zero_reg));
933 __ bind(&check_encoding);
936 __ Branch(&ascii,
ne, at, Operand(zero_reg));
938 __ sll(at, index, 1);
939 __ Addu(at,
string, at);
944 __ Addu(at,
string, index);
950 static MemOperand ExpConstant(
int index, Register base) {
963 ASSERT(!input.is(result));
964 ASSERT(!input.is(double_scratch1));
965 ASSERT(!input.is(double_scratch2));
966 ASSERT(!result.is(double_scratch1));
967 ASSERT(!result.is(double_scratch2));
968 ASSERT(!double_scratch1.is(double_scratch2));
972 ASSERT(ExternalReference::math_exp_constants(0).address() !=
NULL);
974 Label
zero, infinity, done;
976 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
978 __ ldc1(double_scratch1, ExpConstant(0, temp3));
979 __ BranchF(&zero,
NULL,
ge, double_scratch1, input);
981 __ ldc1(double_scratch2, ExpConstant(1, temp3));
982 __ BranchF(&infinity,
NULL,
ge, input, double_scratch2);
984 __ ldc1(double_scratch1, ExpConstant(3, temp3));
985 __ ldc1(result, ExpConstant(4, temp3));
986 __ mul_d(double_scratch1, double_scratch1, input);
987 __ add_d(double_scratch1, double_scratch1, result);
988 __ FmoveLow(temp2, double_scratch1);
989 __ sub_d(double_scratch1, double_scratch1, result);
990 __ ldc1(result, ExpConstant(6, temp3));
991 __ ldc1(double_scratch2, ExpConstant(5, temp3));
992 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
993 __ sub_d(double_scratch1, double_scratch1, input);
994 __ sub_d(result, result, double_scratch1);
995 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
996 __ mul_d(result, result, double_scratch2);
997 __ ldc1(double_scratch2, ExpConstant(7, temp3));
998 __ mul_d(result, result, double_scratch2);
999 __ sub_d(result, result, double_scratch1);
1001 ASSERT(*reinterpret_cast<double*>
1002 (ExternalReference::math_exp_constants(8).address()) == 1);
1003 __ Move(double_scratch2, 1);
1004 __ add_d(result, result, double_scratch2);
1005 __ srl(temp1, temp2, 11);
1006 __ Ext(temp2, temp2, 0, 11);
1007 __ Addu(temp1, temp1, Operand(0x3ff));
1010 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1011 __ sll(at, temp2, 3);
1012 __ Addu(temp3, temp3, Operand(at));
1016 if (temp2.code() < temp3.code()) {
1017 __ sll(at, temp1, 20);
1018 __ Or(temp1, temp3, at);
1019 __ Move(double_scratch1, temp2, temp1);
1021 __ sll(at, temp1, 20);
1022 __ Or(temp1, temp2, at);
1023 __ Move(double_scratch1, temp3, temp1);
1025 __ mul_d(result, result, double_scratch1);
1026 __ BranchShort(&done);
1030 __ BranchShort(&done);
1033 __ ldc1(result, ExpConstant(2, temp3));
1040 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1043 static byte* GetNoCodeAgeSequence(uint32_t* length) {
1046 static bool initialized =
false;
1047 static uint32_t sequence[kNoCodeAgeSequenceLength];
1048 byte* byte_sequence =
reinterpret_cast<byte*
>(sequence);
1053 SmartPointer<CodePatcher>
1054 patcher(
new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
1055 PredictableCodeSizeScope scope(patcher->masm(), *length);
1056 patcher->masm()->Push(ra,
fp,
cp, a1);
1058 patcher->masm()->Addu(
1062 return byte_sequence;
1067 uint32_t young_length;
1068 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1069 bool result = !memcmp(sequence, young_sequence, young_length);
1076 void Code::GetCodeAgeAndParity(
byte* sequence, Age* age,
1083 sequence + Assembler::kInstrSize);
1085 GetCodeAgeAndParity(stub, age, parity);
1090 void Code::PatchPlatformCodeAge(Isolate* isolate,
1094 uint32_t young_length;
1095 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1097 CopyBytes(sequence, young_sequence, young_length);
1098 CPU::FlushICache(sequence, young_length);
1100 Code* stub = GetCodeAgeStub(isolate, age, parity);
1101 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
1108 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
1110 patcher.masm()->nop();
1111 patcher.masm()->jalr(t9, a0);
1112 patcher.masm()->nop();
1113 patcher.masm()->nop();
1122 #endif // V8_TARGET_ARCH_MIPS
static const int kResourceDataOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const int32_t kPrefHintPrepareForStore
const uint32_t kTwoByteStringTag
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
const uint32_t kStringRepresentationMask
const uint32_t kShortExternalStringMask
UnaryMathFunction CreateExpFunction()
static const int kFirstOffset
static const int kParentOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint32_t kHoleNanUpper32
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
const bool IsMipsSoftFloatABI
const uint32_t kHoleNanLower32
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
const uint32_t kShortExternalStringTag
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
static const int kFixedFrameSizeFromFp
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
const int32_t kPrefHintLoadStreamed
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
static const int kInstrSize
static uint32_t & uint32_at(Address addr)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kExponentOffset
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset
static const int kMantissaOffset