30 #if V8_TARGET_ARCH_X64
43 ASSERT(!masm->has_frame());
44 masm->set_has_frame(
true);
51 masm->set_has_frame(
false);
59 if (!FLAG_fast_math)
return &std::exp;
62 if (buffer ==
NULL)
return &std::exp;
63 ExternalReference::InitializeMathExpData();
65 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
67 XMMRegister input =
xmm0;
68 XMMRegister result =
xmm1;
81 ASSERT(!RelocInfo::RequiresRelocation(desc));
83 CPU::FlushICache(buffer, actual_size);
85 return FUNCTION_CAST<UnaryMathFunction>(buffer);
95 if (buffer ==
NULL)
return &std::sqrt;
97 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
105 ASSERT(!RelocInfo::RequiresRelocation(desc));
107 CPU::FlushICache(buffer, actual_size);
109 return FUNCTION_CAST<UnaryMathFunction>(buffer);
114 typedef double (*ModuloFunction)(double, double);
116 ModuloFunction CreateModuloFunction() {
122 Assembler masm(
NULL, buffer, static_cast<int>(actual_size));
145 __ testb(
rax, Immediate(5));
146 __ j(
zero, &no_exceptions);
148 __ bind(&no_exceptions);
153 Label partial_remainder_loop;
154 __ bind(&partial_remainder_loop);
158 __ testl(
rax, Immediate(0x400 ));
168 __ testb(
rax, Immediate(5));
169 __ j(
zero, &valid_result);
171 int64_t kNaNValue =
V8_INT64_C(0x7ff8000000000000);
172 __ movq(
rcx, kNaNValue);
175 __ jmp(&return_result);
178 __ bind(&valid_result);
183 __ bind(&return_result);
186 Label clear_exceptions;
187 __ testb(
rax, Immediate(0x3f ));
190 __ bind(&clear_exceptions);
198 return FUNCTION_CAST<ModuloFunction>(buffer);
208 #define __ ACCESS_MASM(masm)
212 Label* allocation_memento_found) {
222 __ JumpIfJSArrayHasAllocationMemento(
rdx,
rdi, allocation_memento_found);
227 __ RecordWriteField(
rdx,
247 Label allocated, new_backing_store, only_change_map, done;
250 __ JumpIfJSArrayHasAllocationMemento(
rdx,
rdi, fail);
256 __ CompareRoot(
r8, Heap::kEmptyFixedArrayRootIndex);
263 Heap::kFixedCOWArrayMapRootIndex);
264 __ j(
equal, &new_backing_store);
269 __ JumpIfNotInNewSpace(
r8,
rdi, &new_backing_store);
277 __ LoadRoot(
rdi, Heap::kFixedDoubleArrayMapRootIndex);
283 __ RecordWriteField(
rdx,
296 Label loop, entry, convert_hole;
302 __ bind(&new_backing_store);
306 __ LoadRoot(
rdi, Heap::kFixedDoubleArrayMapRootIndex);
311 __ RecordWriteField(
rdx,
323 __ bind(&only_change_map);
326 __ RecordWriteField(
rdx,
341 __ JumpIfNotSmi(
rbx, &convert_hole);
347 __ bind(&convert_hole);
349 if (FLAG_debug_code) {
350 __ CompareRoot(
rbx, Heap::kTheHoleValueRootIndex);
351 __ Assert(
equal, kObjectFoundInSmiOnlyArray);
372 Label loop, entry, convert_hole, gc_required, only_change_map;
375 __ JumpIfJSArrayHasAllocationMemento(
rdx,
rdi, fail);
381 __ CompareRoot(
r8, Heap::kEmptyFixedArrayRootIndex);
393 __ LoadRoot(
rdi, Heap::kFixedArrayMapRootIndex);
400 __ LoadRoot(
rdi, Heap::kTheHoleValueRootIndex);
406 __ bind(&gc_required);
423 __ AllocateHeapNumber(
rax,
r15, &gc_required);
432 __ RecordWriteArray(
r11,
438 __ jmp(&entry, Label::kNear);
441 __ bind(&convert_hole);
454 __ RecordWriteField(
rdx,
464 __ bind(&only_change_map);
467 __ RecordWriteField(
rdx,
481 Label* call_runtime) {
487 Label check_sequential;
489 __ j(
zero, &check_sequential, Label::kNear);
494 __ j(
zero, &cons_string, Label::kNear);
497 Label indirect_string_loaded;
499 __ addp(index, result);
501 __ jmp(&indirect_string_loaded, Label::kNear);
508 __ bind(&cons_string);
510 Heap::kempty_stringRootIndex);
514 __ bind(&indirect_string_loaded);
522 __ bind(&check_sequential);
525 __ j(
zero, &seq_string, Label::kNear);
528 Label ascii_external, done;
529 if (FLAG_debug_code) {
533 __ Assert(
zero, kExternalStringExpectedButNotFound);
545 __ movzxwl(result, Operand(result, index,
times_2, 0));
546 __ jmp(&done, Label::kNear);
547 __ bind(&ascii_external);
549 __ movzxbl(result, Operand(result, index,
times_1, 0));
550 __ jmp(&done, Label::kNear);
554 __ bind(&seq_string);
567 __ jmp(&done, Label::kNear);
583 XMMRegister double_scratch,
586 ASSERT(!input.is(result));
587 ASSERT(!input.is(double_scratch));
588 ASSERT(!result.is(double_scratch));
590 ASSERT(ExternalReference::math_exp_constants(0).address() !=
NULL);
596 __ xorpd(result, result);
597 __ ucomisd(double_scratch, input);
604 __ mulsd(double_scratch, input);
605 __ addsd(double_scratch, result);
606 __ movq(temp2, double_scratch);
607 __ subsd(double_scratch, result);
609 __ leaq(temp1, Operand(temp2, 0x1ff800));
610 __ andq(temp2, Immediate(0x7ff));
611 __ shr(temp1, Immediate(11));
614 __ shl(temp1, Immediate(52));
617 __ subsd(double_scratch, input);
618 __ movsd(input, double_scratch);
619 __ subsd(result, double_scratch);
620 __ mulsd(input, double_scratch);
621 __ mulsd(result, input);
622 __ movq(input, temp1);
624 __ subsd(result, double_scratch);
626 __ mulsd(result, input);
634 static byte* GetNoCodeAgeSequence(uint32_t* length) {
635 static bool initialized =
false;
636 static byte sequence[kNoCodeAgeSequenceLength];
637 *length = kNoCodeAgeSequenceLength;
642 CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
643 patcher.masm()->pushq(
rbp);
644 patcher.masm()->movp(
rbp,
rsp);
645 patcher.masm()->Push(
rsi);
646 patcher.masm()->Push(
rdi);
654 uint32_t young_length;
655 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
656 bool result = (!memcmp(sequence, young_sequence, young_length));
657 ASSERT(result || *sequence == kCallOpcode);
662 void Code::GetCodeAgeAndParity(
byte* sequence, Age* age,
669 Address target_address = sequence + *
reinterpret_cast<int*
>(sequence) +
672 GetCodeAgeAndParity(stub, age, parity);
677 void Code::PatchPlatformCodeAge(Isolate* isolate,
681 uint32_t young_length;
682 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
684 CopyBytes(sequence, young_sequence, young_length);
685 CPU::FlushICache(sequence, young_length);
687 Code* stub = GetCodeAgeStub(isolate, age, parity);
688 CodePatcher patcher(sequence, young_length);
689 patcher.masm()->call(stub->instruction_start());
696 Operand StackArgumentsAccessor::GetArgumentOperand(
int index) {
699 int displacement_to_last_argument = base_reg_.is(
rsp) ?
701 displacement_to_last_argument += extra_displacement_to_last_argument_;
702 if (argument_count_reg_.is(
no_reg)) {
705 ASSERT(argument_count_immediate_ + receiver > 0);
706 return Operand(base_reg_, displacement_to_last_argument +
707 (argument_count_immediate_ + receiver - 1 - index) *
kPointerSize);
712 displacement_to_last_argument + (receiver - 1 - index) *
kPointerSize);
719 #endif // V8_TARGET_ARCH_X64
static const int kResourceDataOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const uint32_t kTwoByteStringTag
static const int kMinimalBufferSize
#define ASSERT(condition)
static const int kContextOffset
virtual void AfterCall(MacroAssembler *masm) const
static const int kShortCallInstructionLength
double(* UnaryMathFunction)(double x)
const uint32_t kStringRepresentationMask
UnaryMathFunction CreateExpFunction()
static const int kFirstOffset
static const int kParentOffset
const uint64_t kHoleNanInt64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
Operand FieldOperand(Register object, int offset)
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
const uint32_t kShortExternalStringTag
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kCallTargetAddressOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
const Register kScratchRegister
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kOneByteStringTag
static bool IsYoungSequence(byte *sequence)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset