v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_X64
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 // -------------------------------------------------------------------------
39 // Platform-specific RuntimeCallHelper functions.
40 
41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
42  masm->EnterFrame(StackFrame::INTERNAL);
43  ASSERT(!masm->has_frame());
44  masm->set_has_frame(true);
45 }
46 
47 
48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
49  masm->LeaveFrame(StackFrame::INTERNAL);
50  ASSERT(masm->has_frame());
51  masm->set_has_frame(false);
52 }
53 
54 
55 #define __ masm.
56 
57 
59  if (!FLAG_fast_math) return &std::exp;
60  size_t actual_size;
61  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
62  if (buffer == NULL) return &std::exp;
63  ExternalReference::InitializeMathExpData();
64 
65  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
66  // xmm0: raw double input.
67  XMMRegister input = xmm0;
68  XMMRegister result = xmm1;
69  __ pushq(rax);
70  __ pushq(rbx);
71 
72  MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
73 
74  __ popq(rbx);
75  __ popq(rax);
76  __ movsd(xmm0, result);
77  __ Ret();
78 
79  CodeDesc desc;
80  masm.GetCode(&desc);
81  ASSERT(!RelocInfo::RequiresRelocation(desc));
82 
83  CPU::FlushICache(buffer, actual_size);
84  OS::ProtectCode(buffer, actual_size);
85  return FUNCTION_CAST<UnaryMathFunction>(buffer);
86 }
87 
88 
90  size_t actual_size;
91  // Allocate buffer in executable space.
92  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
93  &actual_size,
94  true));
95  if (buffer == NULL) return &std::sqrt;
96 
97  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
98  // xmm0: raw double input.
99  // Move double input into registers.
100  __ sqrtsd(xmm0, xmm0);
101  __ Ret();
102 
103  CodeDesc desc;
104  masm.GetCode(&desc);
105  ASSERT(!RelocInfo::RequiresRelocation(desc));
106 
107  CPU::FlushICache(buffer, actual_size);
108  OS::ProtectCode(buffer, actual_size);
109  return FUNCTION_CAST<UnaryMathFunction>(buffer);
110 }
111 
112 
113 #ifdef _WIN64
114 typedef double (*ModuloFunction)(double, double);
115 // Define custom fmod implementation.
116 ModuloFunction CreateModuloFunction() {
117  size_t actual_size;
118  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
119  &actual_size,
120  true));
121  CHECK(buffer);
122  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
123  // Generated code is put into a fixed, unmovable, buffer, and not into
124  // the V8 heap. We can't, and don't, refer to any relocatable addresses
125  // (e.g. the JavaScript nan-object).
126 
127  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
128  // returns result in xmm0.
129  // Argument backing space is allocated on the stack above
130  // the return address.
131 
132  // Compute x mod y.
133  // Load y and x (use argument backing store as temporary storage).
134  __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
135  __ movsd(Operand(rsp, kRegisterSize), xmm0);
136  __ fld_d(Operand(rsp, kRegisterSize * 2));
137  __ fld_d(Operand(rsp, kRegisterSize));
138 
139  // Clear exception flags before operation.
140  {
141  Label no_exceptions;
142  __ fwait();
143  __ fnstsw_ax();
144  // Clear if Illegal Operand or Zero Division exceptions are set.
145  __ testb(rax, Immediate(5));
146  __ j(zero, &no_exceptions);
147  __ fnclex();
148  __ bind(&no_exceptions);
149  }
150 
151  // Compute st(0) % st(1)
152  {
153  Label partial_remainder_loop;
154  __ bind(&partial_remainder_loop);
155  __ fprem();
156  __ fwait();
157  __ fnstsw_ax();
158  __ testl(rax, Immediate(0x400 /* C2 */));
159  // If C2 is set, computation only has partial result. Loop to
160  // continue computation.
161  __ j(not_zero, &partial_remainder_loop);
162  }
163 
164  Label valid_result;
165  Label return_result;
166  // If Invalid Operand or Zero Division exceptions are set,
167  // return NaN.
168  __ testb(rax, Immediate(5));
169  __ j(zero, &valid_result);
170  __ fstp(0); // Drop result in st(0).
171  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
172  __ movq(rcx, kNaNValue);
173  __ movq(Operand(rsp, kRegisterSize), rcx);
174  __ movsd(xmm0, Operand(rsp, kRegisterSize));
175  __ jmp(&return_result);
176 
177  // If result is valid, return that.
178  __ bind(&valid_result);
179  __ fstp_d(Operand(rsp, kRegisterSize));
180  __ movsd(xmm0, Operand(rsp, kRegisterSize));
181 
182  // Clean up FPU stack and exceptions and return xmm0
183  __ bind(&return_result);
184  __ fstp(0); // Unload y.
185 
186  Label clear_exceptions;
187  __ testb(rax, Immediate(0x3f /* Any Exception*/));
188  __ j(not_zero, &clear_exceptions);
189  __ ret(0);
190  __ bind(&clear_exceptions);
191  __ fnclex();
192  __ ret(0);
193 
194  CodeDesc desc;
195  masm.GetCode(&desc);
196  OS::ProtectCode(buffer, actual_size);
197  // Call the function from C++ through this pointer.
198  return FUNCTION_CAST<ModuloFunction>(buffer);
199 }
200 
201 #endif
202 
203 #undef __
204 
205 // -------------------------------------------------------------------------
206 // Code generators
207 
208 #define __ ACCESS_MASM(masm)
209 
211  MacroAssembler* masm, AllocationSiteMode mode,
212  Label* allocation_memento_found) {
213  // ----------- S t a t e -------------
214  // -- rax : value
215  // -- rbx : target map
216  // -- rcx : key
217  // -- rdx : receiver
218  // -- rsp[0] : return address
219  // -----------------------------------
220  if (mode == TRACK_ALLOCATION_SITE) {
221  ASSERT(allocation_memento_found != NULL);
222  __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
223  }
224 
225  // Set transitioned map.
227  __ RecordWriteField(rdx,
229  rbx,
230  rdi,
234 }
235 
236 
238  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
239  // ----------- S t a t e -------------
240  // -- rax : value
241  // -- rbx : target map
242  // -- rcx : key
243  // -- rdx : receiver
244  // -- rsp[0] : return address
245  // -----------------------------------
246  // The fail label is not actually used since we do not allocate.
247  Label allocated, new_backing_store, only_change_map, done;
248 
249  if (mode == TRACK_ALLOCATION_SITE) {
250  __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
251  }
252 
253  // Check for empty arrays, which only require a map transition and no changes
254  // to the backing store.
256  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
257  __ j(equal, &only_change_map);
258 
259  // Check backing store for COW-ness. For COW arrays we have to
260  // allocate a new backing store.
263  Heap::kFixedCOWArrayMapRootIndex);
264  __ j(equal, &new_backing_store);
265  // Check if the backing store is in new-space. If not, we need to allocate
266  // a new one since the old one is in pointer-space.
267  // If in new space, we can reuse the old backing store because it is
268  // the same size.
269  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
270 
271  __ movp(r14, r8); // Destination array equals source array.
272 
273  // r8 : source FixedArray
274  // r9 : elements array length
275  // r14: destination FixedDoubleArray
276  // Set backing store's map
277  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
279 
280  __ bind(&allocated);
281  // Set transitioned map.
283  __ RecordWriteField(rdx,
285  rbx,
286  rdi,
290 
291  // Convert smis to doubles and holes to hole NaNs. The Array's length
292  // remains unchanged.
295 
296  Label loop, entry, convert_hole;
297  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
298  // r15: the-hole NaN
299  __ jmp(&entry);
300 
301  // Allocate new backing store.
302  __ bind(&new_backing_store);
303  __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
304  __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
305  // Set backing store's map
306  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
308  // Set receiver's backing store.
310  __ movp(r11, r14);
311  __ RecordWriteField(rdx,
313  r11,
314  r15,
318  // Set backing store's length.
319  __ Integer32ToSmi(r11, r9);
321  __ jmp(&allocated);
322 
323  __ bind(&only_change_map);
324  // Set transitioned map.
326  __ RecordWriteField(rdx,
328  rbx,
329  rdi,
333  __ jmp(&done);
334 
335  // Conversion loop.
336  __ bind(&loop);
337  __ movp(rbx,
339  // r9 : current element's index
340  // rbx: current element (smi-tagged)
341  __ JumpIfNotSmi(rbx, &convert_hole);
342  __ SmiToInteger32(rbx, rbx);
343  __ Cvtlsi2sd(xmm0, rbx);
345  xmm0);
346  __ jmp(&entry);
347  __ bind(&convert_hole);
348 
349  if (FLAG_debug_code) {
350  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
351  __ Assert(equal, kObjectFoundInSmiOnlyArray);
352  }
353 
355  __ bind(&entry);
356  __ decp(r9);
357  __ j(not_sign, &loop);
358 
359  __ bind(&done);
360 }
361 
362 
364  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
365  // ----------- S t a t e -------------
366  // -- rax : value
367  // -- rbx : target map
368  // -- rcx : key
369  // -- rdx : receiver
370  // -- rsp[0] : return address
371  // -----------------------------------
372  Label loop, entry, convert_hole, gc_required, only_change_map;
373 
374  if (mode == TRACK_ALLOCATION_SITE) {
375  __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
376  }
377 
378  // Check for empty arrays, which only require a map transition and no changes
379  // to the backing store.
381  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
382  __ j(equal, &only_change_map);
383 
384  __ Push(rax);
385 
388  // r8 : source FixedDoubleArray
389  // r9 : number of elements
391  __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
392  // r11: destination FixedArray
393  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
395  __ Integer32ToSmi(r14, r9);
397 
398  // Prepare for conversion loop.
399  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
400  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
401  // rsi: the-hole NaN
402  // rdi: pointer to the-hole
403  __ jmp(&entry);
404 
405  // Call into runtime if GC is required.
406  __ bind(&gc_required);
407  __ Pop(rax);
409  __ jmp(fail);
410 
411  // Box doubles into heap numbers.
412  __ bind(&loop);
413  __ movq(r14, FieldOperand(r8,
414  r9,
415  times_8,
417  // r9 : current element's index
418  // r14: current element
419  __ cmpq(r14, rsi);
420  __ j(equal, &convert_hole);
421 
422  // Non-hole double, copy value into a heap number.
423  __ AllocateHeapNumber(rax, r15, &gc_required);
424  // rax: new heap number
426  __ movp(FieldOperand(r11,
427  r9,
430  rax);
431  __ movp(r15, r9);
432  __ RecordWriteArray(r11,
433  rax,
434  r15,
438  __ jmp(&entry, Label::kNear);
439 
440  // Replace the-hole NaN with the-hole pointer.
441  __ bind(&convert_hole);
442  __ movp(FieldOperand(r11,
443  r9,
446  rdi);
447 
448  __ bind(&entry);
449  __ decp(r9);
450  __ j(not_sign, &loop);
451 
452  // Replace receiver's backing store with newly created and filled FixedArray.
454  __ RecordWriteField(rdx,
456  r11,
457  r15,
461  __ Pop(rax);
463 
464  __ bind(&only_change_map);
465  // Set transitioned map.
467  __ RecordWriteField(rdx,
469  rbx,
470  rdi,
474 }
475 
476 
477 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
478  Register string,
479  Register index,
480  Register result,
481  Label* call_runtime) {
482  // Fetch the instance type of the receiver into result register.
483  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
484  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
485 
486  // We need special handling for indirect strings.
487  Label check_sequential;
488  __ testb(result, Immediate(kIsIndirectStringMask));
489  __ j(zero, &check_sequential, Label::kNear);
490 
491  // Dispatch on the indirect string shape: slice or cons.
492  Label cons_string;
493  __ testb(result, Immediate(kSlicedNotConsMask));
494  __ j(zero, &cons_string, Label::kNear);
495 
496  // Handle slices.
497  Label indirect_string_loaded;
498  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
499  __ addp(index, result);
500  __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
501  __ jmp(&indirect_string_loaded, Label::kNear);
502 
503  // Handle cons strings.
504  // Check whether the right hand side is the empty string (i.e. if
505  // this is really a flat string in a cons string). If that is not
506  // the case we would rather go to the runtime system now to flatten
507  // the string.
508  __ bind(&cons_string);
509  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
510  Heap::kempty_stringRootIndex);
511  __ j(not_equal, call_runtime);
512  __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
513 
514  __ bind(&indirect_string_loaded);
515  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
516  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
517 
518  // Distinguish sequential and external strings. Only these two string
519  // representations can reach here (slices and flat cons strings have been
520  // reduced to the underlying sequential or external string).
521  Label seq_string;
522  __ bind(&check_sequential);
524  __ testb(result, Immediate(kStringRepresentationMask));
525  __ j(zero, &seq_string, Label::kNear);
526 
527  // Handle external strings.
528  Label ascii_external, done;
529  if (FLAG_debug_code) {
530  // Assert that we do not have a cons or slice (indirect strings) here.
531  // Sequential strings have already been ruled out.
532  __ testb(result, Immediate(kIsIndirectStringMask));
533  __ Assert(zero, kExternalStringExpectedButNotFound);
534  }
535  // Rule out short external strings.
537  __ testb(result, Immediate(kShortExternalStringTag));
538  __ j(not_zero, call_runtime);
539  // Check encoding.
541  __ testb(result, Immediate(kStringEncodingMask));
542  __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
543  __ j(not_equal, &ascii_external, Label::kNear);
544  // Two-byte string.
545  __ movzxwl(result, Operand(result, index, times_2, 0));
546  __ jmp(&done, Label::kNear);
547  __ bind(&ascii_external);
548  // Ascii string.
549  __ movzxbl(result, Operand(result, index, times_1, 0));
550  __ jmp(&done, Label::kNear);
551 
552  // Dispatch on the encoding: ASCII or two-byte.
553  Label ascii;
554  __ bind(&seq_string);
557  __ testb(result, Immediate(kStringEncodingMask));
558  __ j(not_zero, &ascii, Label::kNear);
559 
560  // Two-byte string.
561  // Load the two-byte character code into the result register.
562  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
563  __ movzxwl(result, FieldOperand(string,
564  index,
565  times_2,
567  __ jmp(&done, Label::kNear);
568 
569  // ASCII string.
570  // Load the byte into the result register.
571  __ bind(&ascii);
572  __ movzxbl(result, FieldOperand(string,
573  index,
574  times_1,
576  __ bind(&done);
577 }
578 
579 
580 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
581  XMMRegister input,
582  XMMRegister result,
583  XMMRegister double_scratch,
584  Register temp1,
585  Register temp2) {
586  ASSERT(!input.is(result));
587  ASSERT(!input.is(double_scratch));
588  ASSERT(!result.is(double_scratch));
589  ASSERT(!temp1.is(temp2));
590  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
591 
592  Label done;
593 
594  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
595  __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
596  __ xorpd(result, result);
597  __ ucomisd(double_scratch, input);
598  __ j(above_equal, &done);
599  __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
600  __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
601  __ j(above_equal, &done);
602  __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
603  __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
604  __ mulsd(double_scratch, input);
605  __ addsd(double_scratch, result);
606  __ movq(temp2, double_scratch);
607  __ subsd(double_scratch, result);
608  __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
609  __ leaq(temp1, Operand(temp2, 0x1ff800));
610  __ andq(temp2, Immediate(0x7ff));
611  __ shr(temp1, Immediate(11));
612  __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
613  __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
614  __ shl(temp1, Immediate(52));
615  __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
616  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
617  __ subsd(double_scratch, input);
618  __ movsd(input, double_scratch);
619  __ subsd(result, double_scratch);
620  __ mulsd(input, double_scratch);
621  __ mulsd(result, input);
622  __ movq(input, temp1);
623  __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
624  __ subsd(result, double_scratch);
625  __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
626  __ mulsd(result, input);
627 
628  __ bind(&done);
629 }
630 
631 #undef __
632 
633 
634 static byte* GetNoCodeAgeSequence(uint32_t* length) {
635  static bool initialized = false;
636  static byte sequence[kNoCodeAgeSequenceLength];
637  *length = kNoCodeAgeSequenceLength;
638  if (!initialized) {
639  // The sequence of instructions that is patched out for aging code is the
640  // following boilerplate stack-building prologue that is found both in
641  // FUNCTION and OPTIMIZED_FUNCTION code:
642  CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
643  patcher.masm()->pushq(rbp);
644  patcher.masm()->movp(rbp, rsp);
645  patcher.masm()->Push(rsi);
646  patcher.masm()->Push(rdi);
647  initialized = true;
648  }
649  return sequence;
650 }
651 
652 
653 bool Code::IsYoungSequence(byte* sequence) {
654  uint32_t young_length;
655  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
656  bool result = (!memcmp(sequence, young_sequence, young_length));
657  ASSERT(result || *sequence == kCallOpcode);
658  return result;
659 }
660 
661 
662 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
663  MarkingParity* parity) {
664  if (IsYoungSequence(sequence)) {
665  *age = kNoAgeCodeAge;
666  *parity = NO_MARKING_PARITY;
667  } else {
668  sequence++; // Skip the kCallOpcode byte
669  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
671  Code* stub = GetCodeFromTargetAddress(target_address);
672  GetCodeAgeAndParity(stub, age, parity);
673  }
674 }
675 
676 
677 void Code::PatchPlatformCodeAge(Isolate* isolate,
678  byte* sequence,
679  Code::Age age,
680  MarkingParity parity) {
681  uint32_t young_length;
682  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
683  if (age == kNoAgeCodeAge) {
684  CopyBytes(sequence, young_sequence, young_length);
685  CPU::FlushICache(sequence, young_length);
686  } else {
687  Code* stub = GetCodeAgeStub(isolate, age, parity);
688  CodePatcher patcher(sequence, young_length);
689  patcher.masm()->call(stub->instruction_start());
690  patcher.masm()->Nop(
691  kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
692  }
693 }
694 
695 
696 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
697  ASSERT(index >= 0);
698  int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
699  int displacement_to_last_argument = base_reg_.is(rsp) ?
701  displacement_to_last_argument += extra_displacement_to_last_argument_;
702  if (argument_count_reg_.is(no_reg)) {
703  // argument[0] is at base_reg_ + displacement_to_last_argument +
704  // (argument_count_immediate_ + receiver - 1) * kPointerSize.
705  ASSERT(argument_count_immediate_ + receiver > 0);
706  return Operand(base_reg_, displacement_to_last_argument +
707  (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
708  } else {
709  // argument[0] is at base_reg_ + displacement_to_last_argument +
710  // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
711  return Operand(base_reg_, argument_count_reg_, times_pointer_size,
712  displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
713  }
714 }
715 
716 
717 } } // namespace v8::internal
718 
719 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
const Register rdx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const Register r14
const Register r11
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register rbp
const int KB
Definition: globals.h:245
const Register rsi
static const int kMinimalBufferSize
Definition: assembler.h:89
#define ASSERT(condition)
Definition: checks.h:329
static const int kContextOffset
Definition: frames.h:185
virtual void AfterCall(MacroAssembler *masm) const
static const int kShortCallInstructionLength
double(* UnaryMathFunction)(double x)
Definition: codegen.h:119
const uint32_t kStringRepresentationMask
Definition: objects.h:615
#define CHECK(condition)
Definition: checks.h:75
UnaryMathFunction CreateExpFunction()
uint8_t byte
Definition: globals.h:185
static const int kFirstOffset
Definition: objects.h:9165
static const int kParentOffset
Definition: objects.h:9209
const uint64_t kHoleNanInt64
Definition: v8globals.h:458
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
const int kDoubleSize
Definition: globals.h:266
#define V8_INT64_C(x)
Definition: globals.h:218
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
static void ProtectCode(void *address, const size_t size)
const Register r9
const int kPointerSize
Definition: globals.h:268
Operand FieldOperand(Register object, int offset)
const int kRegisterSize
Definition: globals.h:269
const Register rbx
const Register rsp
#define __
const int kFPOnStackSize
Definition: globals.h:271
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const Register rax
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kHeaderSize
Definition: objects.h:9042
static const int kElementsOffset
Definition: objects.h:2756
static const int kCallTargetAddressOffset
static const int kOffsetOffset
Definition: objects.h:9210
static const int kHeaderSize
Definition: objects.h:3016
static const int kMapOffset
Definition: objects.h:1890
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
static const int kSecondOffset
Definition: objects.h:9166
const Register kScratchRegister
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
const Register r8
const Register rcx
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
static bool IsYoungSequence(byte *sequence)
const Register no_reg
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
const int kPCOnStackSize
Definition: globals.h:270
const XMMRegister xmm2
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const Register r15
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
const XMMRegister xmm0