v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // -------------------------------------------------------------------------
39 // Platform-specific RuntimeCallHelper functions.
40 
41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
42  masm->EnterFrame(StackFrame::INTERNAL);
43  ASSERT(!masm->has_frame());
44  masm->set_has_frame(true);
45 }
46 
47 
48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
49  masm->LeaveFrame(StackFrame::INTERNAL);
50  ASSERT(masm->has_frame());
51  masm->set_has_frame(false);
52 }
53 
54 
55 #define __ masm.
56 
57 
59  size_t actual_size;
60  // Allocate buffer in executable space.
61  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
62  &actual_size,
63  true));
64  if (buffer == NULL) {
65  // Fallback to library function if function cannot be created.
66  switch (type) {
67  case TranscendentalCache::SIN: return &sin;
68  case TranscendentalCache::COS: return &cos;
69  case TranscendentalCache::TAN: return &tan;
70  case TranscendentalCache::LOG: return &log;
71  default: UNIMPLEMENTED();
72  }
73  }
74 
75  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
76  // xmm0: raw double input.
77  // Move double input into registers.
78  __ push(rbx);
79  __ push(rdi);
80  __ movq(rbx, xmm0);
81  __ push(rbx);
82  __ fld_d(Operand(rsp, 0));
84  // The return value is expected to be in xmm0.
85  __ fstp_d(Operand(rsp, 0));
86  __ pop(rbx);
87  __ movq(xmm0, rbx);
88  __ pop(rdi);
89  __ pop(rbx);
90  __ Ret();
91 
92  CodeDesc desc;
93  masm.GetCode(&desc);
94  ASSERT(desc.reloc_size == 0);
95 
96  CPU::FlushICache(buffer, actual_size);
97  OS::ProtectCode(buffer, actual_size);
98  return FUNCTION_CAST<UnaryMathFunction>(buffer);
99 }
100 
101 
103  size_t actual_size;
104  // Allocate buffer in executable space.
105  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
106  &actual_size,
107  true));
108  if (buffer == NULL) return &sqrt;
109 
110  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
111  // xmm0: raw double input.
112  // Move double input into registers.
113  __ sqrtsd(xmm0, xmm0);
114  __ Ret();
115 
116  CodeDesc desc;
117  masm.GetCode(&desc);
118  ASSERT(desc.reloc_size == 0);
119 
120  CPU::FlushICache(buffer, actual_size);
121  OS::ProtectCode(buffer, actual_size);
122  return FUNCTION_CAST<UnaryMathFunction>(buffer);
123 }
124 
125 
126 #ifdef _WIN64
127 typedef double (*ModuloFunction)(double, double);
128 // Define custom fmod implementation.
129 ModuloFunction CreateModuloFunction() {
130  size_t actual_size;
131  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
132  &actual_size,
133  true));
134  CHECK(buffer);
135  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
136  // Generated code is put into a fixed, unmovable, buffer, and not into
137  // the V8 heap. We can't, and don't, refer to any relocatable addresses
138  // (e.g. the JavaScript nan-object).
139 
140  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
141  // returns result in xmm0.
142  // Argument backing space is allocated on the stack above
143  // the return address.
144 
145  // Compute x mod y.
146  // Load y and x (use argument backing store as temporary storage).
147  __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
148  __ movsd(Operand(rsp, kPointerSize), xmm0);
149  __ fld_d(Operand(rsp, kPointerSize * 2));
150  __ fld_d(Operand(rsp, kPointerSize));
151 
152  // Clear exception flags before operation.
153  {
154  Label no_exceptions;
155  __ fwait();
156  __ fnstsw_ax();
157  // Clear if Illegal Operand or Zero Division exceptions are set.
158  __ testb(rax, Immediate(5));
159  __ j(zero, &no_exceptions);
160  __ fnclex();
161  __ bind(&no_exceptions);
162  }
163 
164  // Compute st(0) % st(1)
165  {
166  Label partial_remainder_loop;
167  __ bind(&partial_remainder_loop);
168  __ fprem();
169  __ fwait();
170  __ fnstsw_ax();
171  __ testl(rax, Immediate(0x400 /* C2 */));
172  // If C2 is set, computation only has partial result. Loop to
173  // continue computation.
174  __ j(not_zero, &partial_remainder_loop);
175  }
176 
177  Label valid_result;
178  Label return_result;
179  // If Invalid Operand or Zero Division exceptions are set,
180  // return NaN.
181  __ testb(rax, Immediate(5));
182  __ j(zero, &valid_result);
183  __ fstp(0); // Drop result in st(0).
184  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
185  __ movq(rcx, kNaNValue, RelocInfo::NONE);
186  __ movq(Operand(rsp, kPointerSize), rcx);
187  __ movsd(xmm0, Operand(rsp, kPointerSize));
188  __ jmp(&return_result);
189 
190  // If result is valid, return that.
191  __ bind(&valid_result);
192  __ fstp_d(Operand(rsp, kPointerSize));
193  __ movsd(xmm0, Operand(rsp, kPointerSize));
194 
195  // Clean up FPU stack and exceptions and return xmm0
196  __ bind(&return_result);
197  __ fstp(0); // Unload y.
198 
199  Label clear_exceptions;
200  __ testb(rax, Immediate(0x3f /* Any Exception*/));
201  __ j(not_zero, &clear_exceptions);
202  __ ret(0);
203  __ bind(&clear_exceptions);
204  __ fnclex();
205  __ ret(0);
206 
207  CodeDesc desc;
208  masm.GetCode(&desc);
209  OS::ProtectCode(buffer, actual_size);
210  // Call the function from C++ through this pointer.
211  return FUNCTION_CAST<ModuloFunction>(buffer);
212 }
213 
214 #endif
215 
216 #undef __
217 
218 // -------------------------------------------------------------------------
219 // Code generators
220 
221 #define __ ACCESS_MASM(masm)
222 
224  MacroAssembler* masm) {
225  // ----------- S t a t e -------------
226  // -- rax : value
227  // -- rbx : target map
228  // -- rcx : key
229  // -- rdx : receiver
230  // -- rsp[0] : return address
231  // -----------------------------------
232  // Set transitioned map.
234  __ RecordWriteField(rdx,
236  rbx,
237  rdi,
241 }
242 
243 
245  MacroAssembler* masm, Label* fail) {
246  // ----------- S t a t e -------------
247  // -- rax : value
248  // -- rbx : target map
249  // -- rcx : key
250  // -- rdx : receiver
251  // -- rsp[0] : return address
252  // -----------------------------------
253  // The fail label is not actually used since we do not allocate.
254  Label allocated, new_backing_store, only_change_map, done;
255 
256  // Check for empty arrays, which only require a map transition and no changes
257  // to the backing store.
259  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
260  __ j(equal, &only_change_map);
261 
262  // Check backing store for COW-ness. For COW arrays we have to
263  // allocate a new backing store.
266  Heap::kFixedCOWArrayMapRootIndex);
267  __ j(equal, &new_backing_store);
268  // Check if the backing store is in new-space. If not, we need to allocate
269  // a new one since the old one is in pointer-space.
270  // If in new space, we can reuse the old backing store because it is
271  // the same size.
272  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
273 
274  __ movq(r14, r8); // Destination array equals source array.
275 
276  // r8 : source FixedArray
277  // r9 : elements array length
278  // r14: destination FixedDoubleArray
279  // Set backing store's map
280  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
282 
283  __ bind(&allocated);
284  // Set transitioned map.
286  __ RecordWriteField(rdx,
288  rbx,
289  rdi,
293 
294  // Convert smis to doubles and holes to hole NaNs. The Array's length
295  // remains unchanged.
298 
299  Label loop, entry, convert_hole;
300  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
301  // r15: the-hole NaN
302  __ jmp(&entry);
303 
304  // Allocate new backing store.
305  __ bind(&new_backing_store);
307  __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
308  // Set backing store's map
309  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
311  // Set receiver's backing store.
313  __ movq(r11, r14);
314  __ RecordWriteField(rdx,
316  r11,
317  r15,
321  // Set backing store's length.
322  __ Integer32ToSmi(r11, r9);
324  __ jmp(&allocated);
325 
326  __ bind(&only_change_map);
327  // Set transitioned map.
329  __ RecordWriteField(rdx,
331  rbx,
332  rdi,
336  __ jmp(&done);
337 
338  // Conversion loop.
339  __ bind(&loop);
340  __ movq(rbx,
342  // r9 : current element's index
343  // rbx: current element (smi-tagged)
344  __ JumpIfNotSmi(rbx, &convert_hole);
345  __ SmiToInteger32(rbx, rbx);
346  __ cvtlsi2sd(xmm0, rbx);
348  xmm0);
349  __ jmp(&entry);
350  __ bind(&convert_hole);
351 
352  if (FLAG_debug_code) {
353  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
354  __ Assert(equal, "object found in smi-only array");
355  }
356 
358  __ bind(&entry);
359  __ decq(r9);
360  __ j(not_sign, &loop);
361 
362  __ bind(&done);
363 }
364 
365 
367  MacroAssembler* masm, Label* fail) {
368  // ----------- S t a t e -------------
369  // -- rax : value
370  // -- rbx : target map
371  // -- rcx : key
372  // -- rdx : receiver
373  // -- rsp[0] : return address
374  // -----------------------------------
375  Label loop, entry, convert_hole, gc_required, only_change_map;
376 
377  // Check for empty arrays, which only require a map transition and no changes
378  // to the backing store.
380  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
381  __ j(equal, &only_change_map);
382 
383  __ push(rax);
384 
387  // r8 : source FixedDoubleArray
388  // r9 : number of elements
390  __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
391  // r11: destination FixedArray
392  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
394  __ Integer32ToSmi(r14, r9);
396 
397  // Prepare for conversion loop.
398  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
399  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
400  // rsi: the-hole NaN
401  // rdi: pointer to the-hole
402  __ jmp(&entry);
403 
404  // Call into runtime if GC is required.
405  __ bind(&gc_required);
406  __ pop(rax);
408  __ jmp(fail);
409 
410  // Box doubles into heap numbers.
411  __ bind(&loop);
412  __ movq(r14, FieldOperand(r8,
413  r9,
416  // r9 : current element's index
417  // r14: current element
418  __ cmpq(r14, rsi);
419  __ j(equal, &convert_hole);
420 
421  // Non-hole double, copy value into a heap number.
422  __ AllocateHeapNumber(rax, r15, &gc_required);
423  // rax: new heap number
425  __ movq(FieldOperand(r11,
426  r9,
429  rax);
430  __ movq(r15, r9);
431  __ RecordWriteArray(r11,
432  rax,
433  r15,
437  __ jmp(&entry, Label::kNear);
438 
439  // Replace the-hole NaN with the-hole pointer.
440  __ bind(&convert_hole);
441  __ movq(FieldOperand(r11,
442  r9,
445  rdi);
446 
447  __ bind(&entry);
448  __ decq(r9);
449  __ j(not_sign, &loop);
450 
451  // Replace receiver's backing store with newly created and filled FixedArray.
453  __ RecordWriteField(rdx,
455  r11,
456  r15,
460  __ pop(rax);
462 
463  __ bind(&only_change_map);
464  // Set transitioned map.
466  __ RecordWriteField(rdx,
468  rbx,
469  rdi,
473 }
474 
475 
476 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
477  Register string,
478  Register index,
479  Register result,
480  Label* call_runtime) {
481  // Fetch the instance type of the receiver into result register.
482  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
483  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
484 
485  // We need special handling for indirect strings.
486  Label check_sequential;
487  __ testb(result, Immediate(kIsIndirectStringMask));
488  __ j(zero, &check_sequential, Label::kNear);
489 
490  // Dispatch on the indirect string shape: slice or cons.
491  Label cons_string;
492  __ testb(result, Immediate(kSlicedNotConsMask));
493  __ j(zero, &cons_string, Label::kNear);
494 
495  // Handle slices.
496  Label indirect_string_loaded;
497  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
498  __ addq(index, result);
499  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
500  __ jmp(&indirect_string_loaded, Label::kNear);
501 
502  // Handle cons strings.
503  // Check whether the right hand side is the empty string (i.e. if
504  // this is really a flat string in a cons string). If that is not
505  // the case we would rather go to the runtime system now to flatten
506  // the string.
507  __ bind(&cons_string);
508  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
509  Heap::kEmptyStringRootIndex);
510  __ j(not_equal, call_runtime);
511  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
512 
513  __ bind(&indirect_string_loaded);
514  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
515  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
516 
517  // Distinguish sequential and external strings. Only these two string
518  // representations can reach here (slices and flat cons strings have been
519  // reduced to the underlying sequential or external string).
520  Label seq_string;
521  __ bind(&check_sequential);
523  __ testb(result, Immediate(kStringRepresentationMask));
524  __ j(zero, &seq_string, Label::kNear);
525 
526  // Handle external strings.
527  Label ascii_external, done;
528  if (FLAG_debug_code) {
529  // Assert that we do not have a cons or slice (indirect strings) here.
530  // Sequential strings have already been ruled out.
531  __ testb(result, Immediate(kIsIndirectStringMask));
532  __ Assert(zero, "external string expected, but not found");
533  }
534  // Rule out short external strings.
536  __ testb(result, Immediate(kShortExternalStringTag));
537  __ j(not_zero, call_runtime);
538  // Check encoding.
540  __ testb(result, Immediate(kStringEncodingMask));
541  __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
542  __ j(not_equal, &ascii_external, Label::kNear);
543  // Two-byte string.
544  __ movzxwl(result, Operand(result, index, times_2, 0));
545  __ jmp(&done, Label::kNear);
546  __ bind(&ascii_external);
547  // Ascii string.
548  __ movzxbl(result, Operand(result, index, times_1, 0));
549  __ jmp(&done, Label::kNear);
550 
551  // Dispatch on the encoding: ASCII or two-byte.
552  Label ascii;
553  __ bind(&seq_string);
556  __ testb(result, Immediate(kStringEncodingMask));
557  __ j(not_zero, &ascii, Label::kNear);
558 
559  // Two-byte string.
560  // Load the two-byte character code into the result register.
561  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
562  __ movzxwl(result, FieldOperand(string,
563  index,
564  times_2,
566  __ jmp(&done, Label::kNear);
567 
568  // ASCII string.
569  // Load the byte into the result register.
570  __ bind(&ascii);
571  __ movzxbl(result, FieldOperand(string,
572  index,
573  times_1,
575  __ bind(&done);
576 }
577 
578 #undef __
579 
580 } } // namespace v8::internal
581 
582 #endif // V8_TARGET_ARCH_X64
static const int kResourceDataOffset
Definition: objects.h:7517
const Register rdx
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const Register r14
const Register r11
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const Register rbp
const int KB
Definition: globals.h:221
const Register rsi
static const int kMinimalBufferSize
#define ASSERT(condition)
Definition: checks.h:270
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:90
const uint32_t kStringRepresentationMask
Definition: objects.h:455
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
#define CHECK(condition)
Definition: checks.h:56
uint8_t byte
Definition: globals.h:171
static const int kFirstOffset
Definition: objects.h:7420
static const int kParentOffset
Definition: objects.h:7473
const uint64_t kHoleNanInt64
Definition: v8globals.h:480
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kValueOffset
Definition: objects.h:1307
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
static void ProtectCode(void *address, const size_t size)
const Register r9
const int kPointerSize
Definition: globals.h:234
Operand FieldOperand(Register object, int offset)
const Register rbx
const Register rsp
#define __
UnaryMathFunction CreateSqrtFunction()
const Register rax
static void GenerateSmiToDouble(MacroAssembler *masm, Label *fail)
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kHeaderSize
Definition: objects.h:7282
static const int kElementsOffset
Definition: objects.h:2114
static const int kOffsetOffset
Definition: objects.h:7474
static const int kHeaderSize
Definition: objects.h:2233
static const int kMapOffset
Definition: objects.h:1219
static void GenerateDoubleToObject(MacroAssembler *masm, Label *fail)
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
static const int kSecondOffset
Definition: objects.h:7421
#define UNIMPLEMENTED()
Definition: checks.h:48
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
const int kSmiTagSize
Definition: v8.h:3854
const Register r8
const Register rcx
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
const int kSmiTag
Definition: v8.h:3853
static void FlushICache(void *start, size_t size)
virtual void BeforeCall(MacroAssembler *masm) const
const uint32_t kAsciiStringTag
Definition: objects.h:451
const Register r15
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type)
FlagType type() const
Definition: flags.cc:1358
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
const XMMRegister xmm0