30 #if defined(V8_TARGET_ARCH_IA32)
45 ASSERT(!masm->has_frame());
46 masm->set_has_frame(
true);
53 masm->set_has_frame(
false);
77 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
97 ASSERT(desc.reloc_size == 0);
101 return FUNCTION_CAST<UnaryMathFunction>(buffer);
114 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
119 CpuFeatures::Scope use_sse2(
SSE2);
130 ASSERT(desc.reloc_size == 0);
134 return FUNCTION_CAST<UnaryMathFunction>(buffer);
138 static void MemCopyWrapper(
void* dest,
const void* src,
size_t size) {
139 memcpy(dest, src, size);
143 OS::MemCopyFunction CreateMemCopyFunction() {
149 if (buffer ==
NULL)
return &MemCopyWrapper;
150 MacroAssembler masm(
NULL, buffer, static_cast<int>(actual_size));
168 int stack_offset = 0;
170 if (FLAG_debug_code) {
171 __ cmp(Operand(
esp, kSizeOffset + stack_offset),
179 CpuFeatures::Scope enable(
SSE2);
185 Register count =
ecx;
186 __ mov(dst, Operand(
esp, stack_offset + kDestinationOffset));
187 __ mov(src, Operand(
esp, stack_offset + kSourceOffset));
188 __ mov(count, Operand(
esp, stack_offset + kSizeOffset));
191 __ movdqu(
xmm0, Operand(src, 0));
192 __ movdqu(Operand(dst, 0),
xmm0);
196 __ add(
edx, Immediate(16));
202 Label unaligned_source;
203 __ test(src, Immediate(0x0F));
208 Register loop_count =
ecx;
209 Register count =
edx;
210 __ shr(loop_count, 5);
215 __ prefetch(Operand(src, 0x20), 1);
216 __ movdqa(
xmm0, Operand(src, 0x00));
217 __ movdqa(
xmm1, Operand(src, 0x10));
218 __ add(src, Immediate(0x20));
220 __ movdqa(Operand(dst, 0x00),
xmm0);
221 __ movdqa(Operand(dst, 0x10),
xmm1);
222 __ add(dst, Immediate(0x20));
230 __ test(count, Immediate(0x10));
231 __ j(
zero, &move_less_16);
232 __ movdqa(
xmm0, Operand(src, 0));
233 __ add(src, Immediate(0x10));
234 __ movdqa(Operand(dst, 0),
xmm0);
235 __ add(dst, Immediate(0x10));
236 __ bind(&move_less_16);
243 __ mov(
eax, Operand(
esp, stack_offset + kDestinationOffset));
252 __ bind(&unaligned_source);
254 Register loop_count =
ecx;
255 Register count =
edx;
256 __ shr(loop_count, 5);
261 __ prefetch(Operand(src, 0x20), 1);
262 __ movdqu(
xmm0, Operand(src, 0x00));
263 __ movdqu(
xmm1, Operand(src, 0x10));
264 __ add(src, Immediate(0x20));
266 __ movdqa(Operand(dst, 0x00),
xmm0);
267 __ movdqa(Operand(dst, 0x10),
xmm1);
268 __ add(dst, Immediate(0x20));
276 __ test(count, Immediate(0x10));
277 __ j(
zero, &move_less_16);
278 __ movdqu(
xmm0, Operand(src, 0));
279 __ add(src, Immediate(0x10));
280 __ movdqa(Operand(dst, 0),
xmm0);
281 __ add(dst, Immediate(0x10));
282 __ bind(&move_less_16);
285 __ and_(count, 0x0F);
289 __ mov(
eax, Operand(
esp, stack_offset + kDestinationOffset));
303 Register count =
ecx;
304 __ mov(dst, Operand(
esp, stack_offset + kDestinationOffset));
305 __ mov(src, Operand(
esp, stack_offset + kSourceOffset));
306 __ mov(count, Operand(
esp, stack_offset + kSizeOffset));
309 __ mov(
eax, Operand(src, 0));
310 __ mov(Operand(dst, 0),
eax);
316 __ add(
edx, Immediate(4));
332 __ mov(
eax, Operand(
esp, stack_offset + kDestinationOffset));
340 ASSERT(desc.reloc_size == 0);
344 return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
352 #define __ ACCESS_MASM(masm)
355 MacroAssembler* masm) {
365 __ RecordWriteField(
edx,
376 MacroAssembler* masm, Label* fail) {
384 Label loop, entry, convert_hole, gc_required, only_change_map;
389 __ cmp(
edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
405 Label aligned, aligned_done;
407 __ j(
zero, &aligned, Label::kNear);
409 Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
411 __ jmp(&aligned_done);
415 Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
417 __ bind(&aligned_done);
423 Immediate(masm->isolate()->factory()->fixed_double_array_map()));
429 __ RecordWriteField(
edx,
440 ExternalReference canonical_the_hole_nan_reference =
441 ExternalReference::address_of_the_hole_nan();
442 XMMRegister the_hole_nan =
xmm1;
444 CpuFeatures::Scope use_sse2(
SSE2);
445 __ movdbl(the_hole_nan,
446 Operand::StaticVariable(canonical_the_hole_nan_reference));
451 __ bind(&gc_required);
464 __ JumpIfNotSmi(
ebx, &convert_hole);
469 CpuFeatures::Scope fscope(
SSE2);
475 __ fild_s(Operand(
esp, 0));
482 __ bind(&convert_hole);
484 if (FLAG_debug_code) {
485 __ cmp(
ebx, masm->isolate()->factory()->the_hole_value());
486 __ Assert(
equal,
"object found in smi-only array");
490 CpuFeatures::Scope use_sse2(
SSE2);
494 __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
508 __ bind(&only_change_map);
513 __ RecordWriteField(
edx,
524 MacroAssembler* masm, Label* fail) {
532 Label loop, entry, convert_hole, gc_required, only_change_map, success;
537 __ cmp(
edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
554 Immediate(masm->isolate()->factory()->fixed_array_map()));
563 __ bind(&only_change_map);
565 __ RecordWriteField(
edx,
575 __ bind(&gc_required);
595 CpuFeatures::Scope fscope(
SSE2);
607 __ RecordWriteArray(
eax,
613 __ jmp(&entry, Label::kNear);
616 __ bind(&convert_hole);
618 masm->isolate()->factory()->the_hole_value());
630 __ RecordWriteField(
edx,
639 __ RecordWriteField(
edx,
660 Label* call_runtime) {
666 Label check_sequential;
668 __ j(
zero, &check_sequential, Label::kNear);
673 __ j(
zero, &cons_string, Label::kNear);
676 Label indirect_string_loaded;
679 __ add(index, result);
681 __ jmp(&indirect_string_loaded, Label::kNear);
688 __ bind(&cons_string);
690 Immediate(factory->empty_string()));
694 __ bind(&indirect_string_loaded);
702 __ bind(&check_sequential);
705 __ j(
zero, &seq_string, Label::kNear);
708 Label ascii_external, done;
709 if (FLAG_debug_code) {
713 __ Assert(
zero,
"external string expected, but not found");
725 __ movzx_w(result, Operand(result, index,
times_2, 0));
726 __ jmp(&done, Label::kNear);
727 __ bind(&ascii_external);
729 __ movzx_b(result, Operand(result, index,
times_1, 0));
730 __ jmp(&done, Label::kNear);
734 __ bind(&seq_string);
746 __ jmp(&done, Label::kNear);
762 #endif // V8_TARGET_ARCH_IA32
static const int kResourceDataOffset
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kDoubleAlignmentMask
const uint32_t kTwoByteStringTag
static Smi * FromInt(int value)
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
const uint32_t kStringRepresentationMask
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
const uint32_t kShortExternalStringMask
static const int kMinComplexMemCopy
static const int kFirstOffset
static const int kParentOffset
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kValueOffset
const uint32_t kHoleNanUpper32
const uint32_t kIsIndirectStringMask
static void ProtectCode(void *address, const size_t size)
Operand FieldOperand(Register object, int offset)
const uint32_t kHoleNanLower32
UnaryMathFunction CreateSqrtFunction()
static void GenerateSmiToDouble(MacroAssembler *masm, Label *fail)
const uint32_t kShortExternalStringTag
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
static const int kElementsOffset
static const int kOffsetOffset
static const int kHeaderSize
static const int kMapOffset
static void GenerateDoubleToObject(MacroAssembler *masm, Label *fail)
const uint32_t kSlicedNotConsMask
static const int kLengthOffset
static const int kSecondOffset
static const int kContextOffset
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static void FlushICache(void *start, size_t size)
virtual void BeforeCall(MacroAssembler *masm) const
const uint32_t kAsciiStringTag
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type)
const uint32_t kStringEncodingMask
static const int kInstanceTypeOffset