v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM64
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 #include "simulator-arm64.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 #if defined(USE_SIMULATOR)
42 byte* fast_exp_arm64_machine_code = NULL;
43 double fast_exp_simulator(double x) {
44  Simulator * simulator = Simulator::current(Isolate::Current());
45  Simulator::CallArgument args[] = {
46  Simulator::CallArgument(x),
47  Simulator::CallArgument::End()
48  };
49  return simulator->CallDouble(fast_exp_arm64_machine_code, args);
50 }
51 #endif
52 
53 
55  if (!FLAG_fast_math) return &std::exp;
56 
57  // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
58  // an AAPCS64-compliant exp() function. This will be faster than the C
59  // library's exp() function, but probably less accurate.
60  size_t actual_size;
61  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
62  if (buffer == NULL) return &std::exp;
63 
64  ExternalReference::InitializeMathExpData();
65  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
66  masm.SetStackPointer(csp);
67 
68  // The argument will be in d0 on entry.
69  DoubleRegister input = d0;
70  // Use other caller-saved registers for all other values.
71  DoubleRegister result = d1;
72  DoubleRegister double_temp1 = d2;
73  DoubleRegister double_temp2 = d3;
74  Register temp1 = x10;
75  Register temp2 = x11;
76  Register temp3 = x12;
77 
78  MathExpGenerator::EmitMathExp(&masm, input, result,
79  double_temp1, double_temp2,
80  temp1, temp2, temp3);
81  // Move the result to the return register.
82  masm.Fmov(d0, result);
83  masm.Ret();
84 
85  CodeDesc desc;
86  masm.GetCode(&desc);
87  ASSERT(!RelocInfo::RequiresRelocation(desc));
88 
89  CPU::FlushICache(buffer, actual_size);
90  OS::ProtectCode(buffer, actual_size);
91 
92 #if !defined(USE_SIMULATOR)
93  return FUNCTION_CAST<UnaryMathFunction>(buffer);
94 #else
95  fast_exp_arm64_machine_code = buffer;
96  return &fast_exp_simulator;
97 #endif
98 }
99 
100 
102  return &std::sqrt;
103 }
104 
105 
106 // -------------------------------------------------------------------------
107 // Platform-specific RuntimeCallHelper functions.
108 
109 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
110  masm->EnterFrame(StackFrame::INTERNAL);
111  ASSERT(!masm->has_frame());
112  masm->set_has_frame(true);
113 }
114 
115 
116 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
117  masm->LeaveFrame(StackFrame::INTERNAL);
118  ASSERT(masm->has_frame());
119  masm->set_has_frame(false);
120 }
121 
122 
123 // -------------------------------------------------------------------------
124 // Code generators
125 
127  MacroAssembler* masm, AllocationSiteMode mode,
128  Label* allocation_memento_found) {
129  // ----------- S t a t e -------------
130  // -- x2 : receiver
131  // -- x3 : target map
132  // -----------------------------------
133  Register receiver = x2;
134  Register map = x3;
135 
136  if (mode == TRACK_ALLOCATION_SITE) {
137  ASSERT(allocation_memento_found != NULL);
138  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
139  allocation_memento_found);
140  }
141 
142  // Set transitioned map.
143  __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144  __ RecordWriteField(receiver,
146  map,
147  x10,
152 }
153 
154 
156  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
157  ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
158  // ----------- S t a t e -------------
159  // -- lr : return address
160  // -- x0 : value
161  // -- x1 : key
162  // -- x2 : receiver
163  // -- x3 : target map, scratch for subsequent call
164  // -----------------------------------
165  Register receiver = x2;
166  Register target_map = x3;
167 
168  Label gc_required, only_change_map;
169 
170  if (mode == TRACK_ALLOCATION_SITE) {
171  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
172  }
173 
174  // Check for empty arrays, which only require a map transition and no changes
175  // to the backing store.
176  Register elements = x4;
177  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
178  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
179 
180  __ Push(lr);
181  Register length = x5;
182  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
184 
185  // Allocate new FixedDoubleArray.
186  Register array_size = x6;
187  Register array = x7;
188  __ Lsl(array_size, length, kDoubleSizeLog2);
189  __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
190  __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
191  // Register array is non-tagged heap object.
192 
193  // Set the destination FixedDoubleArray's length and map.
194  Register map_root = x6;
195  __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
196  __ SmiTag(x11, length);
198  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
199 
200  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
201  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
204 
205  // Replace receiver's backing store with newly created FixedDoubleArray.
206  __ Add(x10, array, kHeapObjectTag);
207  __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
208  __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
211 
212  // Prepare for conversion loop.
213  Register src_elements = x10;
214  Register dst_elements = x11;
215  Register dst_end = x12;
216  __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
217  __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
218  __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
219 
220  FPRegister nan_d = d1;
221  __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
222 
223  Label entry, done;
224  __ B(&entry);
225 
226  __ Bind(&only_change_map);
227  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
228  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
231  __ B(&done);
232 
233  // Call into runtime if GC is required.
234  __ Bind(&gc_required);
235  __ Pop(lr);
236  __ B(fail);
237 
238  // Iterate over the array, copying and coverting smis to doubles. If an
239  // element is non-smi, write a hole to the destination.
240  {
241  Label loop;
242  __ Bind(&loop);
243  __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
244  __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
245  __ Tst(x13, kSmiTagMask);
246  __ Fcsel(d0, d0, nan_d, eq);
247  __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
248 
249  __ Bind(&entry);
250  __ Cmp(dst_elements, dst_end);
251  __ B(lt, &loop);
252  }
253 
254  __ Pop(lr);
255  __ Bind(&done);
256 }
257 
258 
260  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
261  ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
262  // ----------- S t a t e -------------
263  // -- x0 : value
264  // -- x1 : key
265  // -- x2 : receiver
266  // -- lr : return address
267  // -- x3 : target map, scratch for subsequent call
268  // -- x4 : scratch (elements)
269  // -----------------------------------
270  Register value = x0;
271  Register key = x1;
272  Register receiver = x2;
273  Register target_map = x3;
274 
275  if (mode == TRACK_ALLOCATION_SITE) {
276  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
277  }
278 
279  // Check for empty arrays, which only require a map transition and no changes
280  // to the backing store.
281  Label only_change_map;
282  Register elements = x4;
283  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
284  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
285 
286  __ Push(lr);
287  // TODO(all): These registers may not need to be pushed. Examine
288  // RecordWriteStub and check whether it's needed.
289  __ Push(target_map, receiver, key, value);
290  Register length = x5;
291  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
293 
294  // Allocate new FixedArray.
295  Register array_size = x6;
296  Register array = x7;
297  Label gc_required;
298  __ Mov(array_size, FixedDoubleArray::kHeaderSize);
299  __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
300  __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
301 
302  // Set destination FixedDoubleArray's length and map.
303  Register map_root = x6;
304  __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
305  __ SmiTag(x11, length);
307  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
308 
309  // Prepare for conversion loop.
310  Register src_elements = x10;
311  Register dst_elements = x11;
312  Register dst_end = x12;
313  __ Add(src_elements, elements,
315  __ Add(dst_elements, array, FixedArray::kHeaderSize);
316  __ Add(array, array, kHeapObjectTag);
317  __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
318 
319  Register the_hole = x14;
320  Register heap_num_map = x15;
321  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
322  __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
323 
324  Label entry;
325  __ B(&entry);
326 
327  // Call into runtime if GC is required.
328  __ Bind(&gc_required);
329  __ Pop(value, key, receiver, target_map);
330  __ Pop(lr);
331  __ B(fail);
332 
333  {
334  Label loop, convert_hole;
335  __ Bind(&loop);
336  __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
337  __ Cmp(x13, kHoleNanInt64);
338  __ B(eq, &convert_hole);
339 
340  // Non-hole double, copy value into a heap number.
341  Register heap_num = x5;
342  __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
343  __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
344  __ Mov(x13, dst_elements);
345  __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
346  __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
348 
349  __ B(&entry);
350 
351  // Replace the-hole NaN with the-hole pointer.
352  __ Bind(&convert_hole);
353  __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
354 
355  __ Bind(&entry);
356  __ Cmp(dst_elements, dst_end);
357  __ B(lt, &loop);
358  }
359 
360  __ Pop(value, key, receiver, target_map);
361  // Replace receiver's backing store with newly created and filled FixedArray.
362  __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
363  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
366  __ Pop(lr);
367 
368  __ Bind(&only_change_map);
369  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
370  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
373 }
374 
375 
376 bool Code::IsYoungSequence(byte* sequence) {
377  return MacroAssembler::IsYoungSequence(sequence);
378 }
379 
380 
381 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
382  MarkingParity* parity) {
383  if (IsYoungSequence(sequence)) {
384  *age = kNoAgeCodeAge;
385  *parity = NO_MARKING_PARITY;
386  } else {
387  byte* target = sequence + kCodeAgeStubEntryOffset;
388  Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
389  GetCodeAgeAndParity(stub, age, parity);
390  }
391 }
392 
393 
394 void Code::PatchPlatformCodeAge(Isolate* isolate,
395  byte* sequence,
396  Code::Age age,
397  MarkingParity parity) {
398  PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
399  if (age == kNoAgeCodeAge) {
401  } else {
402  Code * stub = GetCodeAgeStub(isolate, age, parity);
403  MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
404  }
405 }
406 
407 
408 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
409  Register string,
410  Register index,
411  Register result,
412  Label* call_runtime) {
413  ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
414  // Fetch the instance type of the receiver into result register.
415  __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
416  __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
417 
418  // We need special handling for indirect strings.
419  Label check_sequential;
420  __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
421 
422  // Dispatch on the indirect string shape: slice or cons.
423  Label cons_string;
424  __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
425 
426  // Handle slices.
427  Label indirect_string_loaded;
428  __ Ldr(result.W(),
430  __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
431  __ Add(index, index, result.W());
432  __ B(&indirect_string_loaded);
433 
434  // Handle cons strings.
435  // Check whether the right hand side is the empty string (i.e. if
436  // this is really a flat string in a cons string). If that is not
437  // the case we would rather go to the runtime system now to flatten
438  // the string.
439  __ Bind(&cons_string);
440  __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
441  __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
442  // Get the first of the two strings and load its instance type.
443  __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
444 
445  __ Bind(&indirect_string_loaded);
446  __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
447  __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
448 
449  // Distinguish sequential and external strings. Only these two string
450  // representations can reach here (slices and flat cons strings have been
451  // reduced to the underlying sequential or external string).
452  Label external_string, check_encoding;
453  __ Bind(&check_sequential);
455  __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
456 
457  // Prepare sequential strings
459  __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
460  __ B(&check_encoding);
461 
462  // Handle external strings.
463  __ Bind(&external_string);
464  if (FLAG_debug_code) {
465  // Assert that we do not have a cons or slice (indirect strings) here.
466  // Sequential strings have already been ruled out.
467  __ Tst(result, kIsIndirectStringMask);
468  __ Assert(eq, kExternalStringExpectedButNotFound);
469  }
470  // Rule out short external strings.
472  // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
473  // can be bound far away in deferred code.
474  __ Tst(result, kShortExternalStringMask);
475  __ B(ne, call_runtime);
477 
478  Label ascii, done;
479  __ Bind(&check_encoding);
481  __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
482  // Two-byte string.
483  __ Ldrh(result, MemOperand(string, index, SXTW, 1));
484  __ B(&done);
485  __ Bind(&ascii);
486  // Ascii string.
487  __ Ldrb(result, MemOperand(string, index, SXTW));
488  __ Bind(&done);
489 }
490 
491 
492 static MemOperand ExpConstant(Register base, int index) {
493  return MemOperand(base, index * kDoubleSize);
494 }
495 
496 
497 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
498  DoubleRegister input,
499  DoubleRegister result,
500  DoubleRegister double_temp1,
501  DoubleRegister double_temp2,
502  Register temp1,
503  Register temp2,
504  Register temp3) {
505  // TODO(jbramley): There are several instances where fnmsub could be used
506  // instead of fmul and fsub. Doing this changes the result, but since this is
507  // an estimation anyway, does it matter?
508 
509  ASSERT(!AreAliased(input, result,
510  double_temp1, double_temp2,
511  temp1, temp2, temp3));
512  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
513 
514  Label done;
515  DoubleRegister double_temp3 = result;
516  Register constants = temp3;
517 
518  // The algorithm used relies on some magic constants which are initialized in
519  // ExternalReference::InitializeMathExpData().
520 
521  // Load the address of the start of the array.
522  __ Mov(constants, ExternalReference::math_exp_constants(0));
523 
524  // We have to do a four-way split here:
525  // - If input <= about -708.4, the output always rounds to zero.
526  // - If input >= about 709.8, the output always rounds to +infinity.
527  // - If the input is NaN, the output is NaN.
528  // - Otherwise, the result needs to be calculated.
529  Label result_is_finite_non_zero;
530  // Assert that we can load offset 0 (the small input threshold) and offset 1
531  // (the large input threshold) with a single ldp.
532  ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
533  ExpConstant(constants, 0).offset()));
534  __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
535 
536  __ Fcmp(input, double_temp1);
537  __ Fccmp(input, double_temp2, NoFlag, hi);
538  // At this point, the condition flags can be in one of five states:
539  // NZCV
540  // 1000 -708.4 < input < 709.8 result = exp(input)
541  // 0110 input == 709.8 result = +infinity
542  // 0010 input > 709.8 result = +infinity
543  // 0011 input is NaN result = input
544  // 0000 input <= -708.4 result = +0.0
545 
546  // Continue the common case first. 'mi' tests N == 1.
547  __ B(&result_is_finite_non_zero, mi);
548 
549  // TODO(jbramley): Consider adding a +infinity register for ARM64.
550  __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
551 
552  // Select between +0.0 and +infinity. 'lo' tests C == 0.
553  __ Fcsel(result, fp_zero, double_temp2, lo);
554  // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
555  __ Fcsel(result, result, input, vc);
556  __ B(&done);
557 
558  // The rest is magic, as described in InitializeMathExpData().
559  __ Bind(&result_is_finite_non_zero);
560 
561  // Assert that we can load offset 3 and offset 4 with a single ldp.
562  ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
563  ExpConstant(constants, 3).offset()));
564  __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
565  __ Fmadd(double_temp1, double_temp1, input, double_temp3);
566  __ Fmov(temp2.W(), double_temp1.S());
567  __ Fsub(double_temp1, double_temp1, double_temp3);
568 
569  // Assert that we can load offset 5 and offset 6 with a single ldp.
570  ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
571  ExpConstant(constants, 5).offset()));
572  __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
573  // TODO(jbramley): Consider using Fnmsub here.
574  __ Fmul(double_temp1, double_temp1, double_temp2);
575  __ Fsub(double_temp1, double_temp1, input);
576 
577  __ Fmul(double_temp2, double_temp1, double_temp1);
578  __ Fsub(double_temp3, double_temp3, double_temp1);
579  __ Fmul(double_temp3, double_temp3, double_temp2);
580 
581  __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
582 
583  __ Ldr(double_temp2, ExpConstant(constants, 7));
584  // TODO(jbramley): Consider using Fnmsub here.
585  __ Fmul(double_temp3, double_temp3, double_temp2);
586  __ Fsub(double_temp3, double_temp3, double_temp1);
587 
588  // The 8th constant is 1.0, so use an immediate move rather than a load.
589  // We can't generate a runtime assertion here as we would need to call Abort
590  // in the runtime and we don't have an Isolate when we generate this code.
591  __ Fmov(double_temp2, 1.0);
592  __ Fadd(double_temp3, double_temp3, double_temp2);
593 
594  __ And(temp2, temp2, 0x7ff);
595  __ Add(temp1, temp1, 0x3ff);
596 
597  // Do the final table lookup.
598  __ Mov(temp3, ExternalReference::math_exp_log_table());
599 
600  __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
601  __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
602  __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
603  __ Bfi(temp2, temp1, 32, 32);
604  __ Fmov(double_temp1, temp2);
605 
606  __ Fmul(result, double_temp3, double_temp1);
607 
608  __ Bind(&done);
609 }
610 
611 #undef __
612 
613 } } // namespace v8::internal
614 
615 #endif // V8_TARGET_ARCH_ARM64
static const int kResourceDataOffset
Definition: objects.h:9244
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:5480
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
const int kDoubleSizeLog2
Definition: globals.h:273
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const LowDwVfpRegister d0
#define ASM_LOCATION(message)
Definition: checks.h:69
const int KB
Definition: globals.h:245
#define ASSERT(condition)
Definition: checks.h:329
virtual void AfterCall(MacroAssembler *masm) const
const int kPointerSizeLog2
Definition: globals.h:281
double(* UnaryMathFunction)(double x)
Definition: codegen.h:119
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const uint32_t kShortExternalStringMask
Definition: objects.h:643
UnaryMathFunction CreateExpFunction()
uint8_t byte
Definition: globals.h:185
static const int kFirstOffset
Definition: objects.h:9165
static const int kParentOffset
Definition: objects.h:9209
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const uint64_t kHoleNanInt64
Definition: v8globals.h:458
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kValueOffset
Definition: objects.h:1971
const int kDoubleSize
Definition: globals.h:266
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
static void ProtectCode(void *address, const size_t size)
const int kPointerSize
Definition: globals.h:268
const unsigned kDRegSizeLog2
static Address & Address_at(Address addr)
Definition: v8memory.h:79
const unsigned kInstructionSize
const int kHeapObjectTag
Definition: v8.h:5473
#define __
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
static const int kElementsOffset
Definition: objects.h:2756
static const int kOffsetOffset
Definition: objects.h:9210
static const int kHeaderSize
Definition: objects.h:3016
const Register lr
static const int kMapOffset
Definition: objects.h:1890
const LowDwVfpRegister d2
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
static const int kSecondOffset
Definition: objects.h:9166
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
const unsigned kDRegSize
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
const LowDwVfpRegister d1
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static bool IsYoungSequence(byte *sequence)
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
static const int kInstanceTypeOffset
Definition: objects.h:6459