v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 #include "simulator-arm.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 #define __ masm.
41 
42 
43 #if defined(USE_SIMULATOR)
44 byte* fast_exp_arm_machine_code = NULL;
45 double fast_exp_simulator(double x) {
46  return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
47  fast_exp_arm_machine_code, x, 0);
48 }
49 #endif
50 
51 
53  if (!FLAG_fast_math) return &std::exp;
54  size_t actual_size;
55  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
56  if (buffer == NULL) return &std::exp;
57  ExternalReference::InitializeMathExpData();
58 
59  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
60 
61  {
62  DwVfpRegister input = d0;
63  DwVfpRegister result = d1;
64  DwVfpRegister double_scratch1 = d2;
65  DwVfpRegister double_scratch2 = d3;
66  Register temp1 = r4;
67  Register temp2 = r5;
68  Register temp3 = r6;
69 
70  if (masm.use_eabi_hardfloat()) {
71  // Input value is in d0 anyway, nothing to do.
72  } else {
73  __ vmov(input, r0, r1);
74  }
75  __ Push(temp3, temp2, temp1);
77  &masm, input, result, double_scratch1, double_scratch2,
78  temp1, temp2, temp3);
79  __ Pop(temp3, temp2, temp1);
80  if (masm.use_eabi_hardfloat()) {
81  __ vmov(d0, result);
82  } else {
83  __ vmov(r0, r1, result);
84  }
85  __ Ret();
86  }
87 
88  CodeDesc desc;
89  masm.GetCode(&desc);
90  ASSERT(!RelocInfo::RequiresRelocation(desc));
91 
92  CPU::FlushICache(buffer, actual_size);
93  OS::ProtectCode(buffer, actual_size);
94 
95 #if !defined(USE_SIMULATOR)
96  return FUNCTION_CAST<UnaryMathFunction>(buffer);
97 #else
98  fast_exp_arm_machine_code = buffer;
99  return &fast_exp_simulator;
100 #endif
101 }
102 
103 #if defined(V8_HOST_ARCH_ARM)
104 OS::MemCopyUint8Function CreateMemCopyUint8Function(
105  OS::MemCopyUint8Function stub) {
106 #if defined(USE_SIMULATOR)
107  return stub;
108 #else
110  return stub;
111  }
112  size_t actual_size;
113  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
114  if (buffer == NULL) return stub;
115 
116  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
117 
118  Register dest = r0;
119  Register src = r1;
120  Register chars = r2;
121  Register temp1 = r3;
122  Label less_4;
123 
125  Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
126  Label size_less_than_8;
127  __ pld(MemOperand(src, 0));
128 
129  __ cmp(chars, Operand(8));
130  __ b(lt, &size_less_than_8);
131  __ cmp(chars, Operand(32));
132  __ b(lt, &less_32);
133  if (CpuFeatures::cache_line_size() == 32) {
134  __ pld(MemOperand(src, 32));
135  }
136  __ cmp(chars, Operand(64));
137  __ b(lt, &less_64);
138  __ pld(MemOperand(src, 64));
139  if (CpuFeatures::cache_line_size() == 32) {
140  __ pld(MemOperand(src, 96));
141  }
142  __ cmp(chars, Operand(128));
143  __ b(lt, &less_128);
144  __ pld(MemOperand(src, 128));
145  if (CpuFeatures::cache_line_size() == 32) {
146  __ pld(MemOperand(src, 160));
147  }
148  __ pld(MemOperand(src, 192));
149  if (CpuFeatures::cache_line_size() == 32) {
150  __ pld(MemOperand(src, 224));
151  }
152  __ cmp(chars, Operand(256));
153  __ b(lt, &less_256);
154  __ sub(chars, chars, Operand(256));
155 
156  __ bind(&loop);
157  __ pld(MemOperand(src, 256));
158  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
159  if (CpuFeatures::cache_line_size() == 32) {
160  __ pld(MemOperand(src, 256));
161  }
162  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
163  __ sub(chars, chars, Operand(64), SetCC);
164  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
165  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
166  __ b(ge, &loop);
167  __ add(chars, chars, Operand(256));
168 
169  __ bind(&less_256);
170  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
171  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
172  __ sub(chars, chars, Operand(128));
173  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
174  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
175  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
176  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
177  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
178  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
179  __ cmp(chars, Operand(64));
180  __ b(lt, &less_64);
181 
182  __ bind(&less_128);
183  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
184  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
185  __ sub(chars, chars, Operand(64));
186  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
187  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
188 
189  __ bind(&less_64);
190  __ cmp(chars, Operand(32));
191  __ b(lt, &less_32);
192  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
193  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
194  __ sub(chars, chars, Operand(32));
195 
196  __ bind(&less_32);
197  __ cmp(chars, Operand(16));
198  __ b(le, &_16_or_less);
199  __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
200  __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
201  __ sub(chars, chars, Operand(16));
202 
203  __ bind(&_16_or_less);
204  __ cmp(chars, Operand(8));
205  __ b(le, &_8_or_less);
206  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
207  __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
208  __ sub(chars, chars, Operand(8));
209 
210  // Do a last copy which may overlap with the previous copy (up to 8 bytes).
211  __ bind(&_8_or_less);
212  __ rsb(chars, chars, Operand(8));
213  __ sub(src, src, Operand(chars));
214  __ sub(dest, dest, Operand(chars));
215  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
216  __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
217 
218  __ Ret();
219 
220  __ bind(&size_less_than_8);
221 
222  __ bic(temp1, chars, Operand(0x3), SetCC);
223  __ b(&less_4, eq);
224  __ ldr(temp1, MemOperand(src, 4, PostIndex));
225  __ str(temp1, MemOperand(dest, 4, PostIndex));
226  } else {
227  Register temp2 = ip;
228  Label loop;
229 
230  __ bic(temp2, chars, Operand(0x3), SetCC);
231  __ b(&less_4, eq);
232  __ add(temp2, dest, temp2);
233 
234  __ bind(&loop);
235  __ ldr(temp1, MemOperand(src, 4, PostIndex));
236  __ str(temp1, MemOperand(dest, 4, PostIndex));
237  __ cmp(dest, temp2);
238  __ b(&loop, ne);
239  }
240 
241  __ bind(&less_4);
242  __ mov(chars, Operand(chars, LSL, 31), SetCC);
243  // bit0 => Z (ne), bit1 => C (cs)
244  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
245  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
246  __ ldrb(temp1, MemOperand(src), ne);
247  __ strb(temp1, MemOperand(dest), ne);
248  __ Ret();
249 
250  CodeDesc desc;
251  masm.GetCode(&desc);
252  ASSERT(!RelocInfo::RequiresRelocation(desc));
253 
254  CPU::FlushICache(buffer, actual_size);
255  OS::ProtectCode(buffer, actual_size);
256  return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
257 #endif
258 }
259 
260 
261 // Convert 8 to 16. The number of character to copy must be at least 8.
262 OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
263  OS::MemCopyUint16Uint8Function stub) {
264 #if defined(USE_SIMULATOR)
265  return stub;
266 #else
268  return stub;
269  }
270  size_t actual_size;
271  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
272  if (buffer == NULL) return stub;
273 
274  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
275 
276  Register dest = r0;
277  Register src = r1;
278  Register chars = r2;
280  Register temp = r3;
281  Label loop;
282 
283  __ bic(temp, chars, Operand(0x7));
284  __ sub(chars, chars, Operand(temp));
285  __ add(temp, dest, Operand(temp, LSL, 1));
286 
287  __ bind(&loop);
288  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
289  __ vmovl(NeonU8, q0, d0);
290  __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
291  __ cmp(dest, temp);
292  __ b(&loop, ne);
293 
294  // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
295  __ rsb(chars, chars, Operand(8));
296  __ sub(src, src, Operand(chars));
297  __ sub(dest, dest, Operand(chars, LSL, 1));
298  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
299  __ vmovl(NeonU8, q0, d0);
300  __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
301  __ Ret();
302  } else {
303  Register temp1 = r3;
304  Register temp2 = ip;
305  Register temp3 = lr;
306  Register temp4 = r4;
307  Label loop;
308  Label not_two;
309 
310  __ Push(lr, r4);
311  __ bic(temp2, chars, Operand(0x3));
312  __ add(temp2, dest, Operand(temp2, LSL, 1));
313 
314  __ bind(&loop);
315  __ ldr(temp1, MemOperand(src, 4, PostIndex));
316  __ uxtb16(temp3, Operand(temp1, ROR, 0));
317  __ uxtb16(temp4, Operand(temp1, ROR, 8));
318  __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
319  __ str(temp1, MemOperand(dest));
320  __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
321  __ str(temp1, MemOperand(dest, 4));
322  __ add(dest, dest, Operand(8));
323  __ cmp(dest, temp2);
324  __ b(&loop, ne);
325 
326  __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
327  __ b(&not_two, cc);
328  __ ldrh(temp1, MemOperand(src, 2, PostIndex));
329  __ uxtb(temp3, Operand(temp1, ROR, 8));
330  __ mov(temp3, Operand(temp3, LSL, 16));
331  __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
332  __ str(temp3, MemOperand(dest, 4, PostIndex));
333  __ bind(&not_two);
334  __ ldrb(temp1, MemOperand(src), ne);
335  __ strh(temp1, MemOperand(dest), ne);
336  __ Pop(pc, r4);
337  }
338 
339  CodeDesc desc;
340  masm.GetCode(&desc);
341 
342  CPU::FlushICache(buffer, actual_size);
343  OS::ProtectCode(buffer, actual_size);
344 
345  return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
346 #endif
347 }
348 #endif
349 
351 #if defined(USE_SIMULATOR)
352  return &std::sqrt;
353 #else
354  size_t actual_size;
355  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
356  if (buffer == NULL) return &std::sqrt;
357 
358  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
359 
360  __ MovFromFloatParameter(d0);
361  __ vsqrt(d0, d0);
362  __ MovToFloatResult(d0);
363  __ Ret();
364 
365  CodeDesc desc;
366  masm.GetCode(&desc);
367  ASSERT(!RelocInfo::RequiresRelocation(desc));
368 
369  CPU::FlushICache(buffer, actual_size);
370  OS::ProtectCode(buffer, actual_size);
371  return FUNCTION_CAST<UnaryMathFunction>(buffer);
372 #endif
373 }
374 
375 #undef __
376 
377 
378 // -------------------------------------------------------------------------
379 // Platform-specific RuntimeCallHelper functions.
380 
381 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
382  masm->EnterFrame(StackFrame::INTERNAL);
383  ASSERT(!masm->has_frame());
384  masm->set_has_frame(true);
385 }
386 
387 
388 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
389  masm->LeaveFrame(StackFrame::INTERNAL);
390  ASSERT(masm->has_frame());
391  masm->set_has_frame(false);
392 }
393 
394 
395 // -------------------------------------------------------------------------
396 // Code generators
397 
398 #define __ ACCESS_MASM(masm)
399 
401  MacroAssembler* masm, AllocationSiteMode mode,
402  Label* allocation_memento_found) {
403  // ----------- S t a t e -------------
404  // -- r0 : value
405  // -- r1 : key
406  // -- r2 : receiver
407  // -- lr : return address
408  // -- r3 : target map, scratch for subsequent call
409  // -- r4 : scratch (elements)
410  // -----------------------------------
411  if (mode == TRACK_ALLOCATION_SITE) {
412  ASSERT(allocation_memento_found != NULL);
413  __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
414  }
415 
416  // Set transitioned map.
418  __ RecordWriteField(r2,
420  r3,
421  r9,
426 }
427 
428 
430  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
431  // ----------- S t a t e -------------
432  // -- r0 : value
433  // -- r1 : key
434  // -- r2 : receiver
435  // -- lr : return address
436  // -- r3 : target map, scratch for subsequent call
437  // -- r4 : scratch (elements)
438  // -----------------------------------
439  Label loop, entry, convert_hole, gc_required, only_change_map, done;
440 
441  if (mode == TRACK_ALLOCATION_SITE) {
442  __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
443  }
444 
445  // Check for empty arrays, which only require a map transition and no changes
446  // to the backing store.
448  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
449  __ b(eq, &only_change_map);
450 
451  __ push(lr);
453  // r5: number of elements (smi-tagged)
454 
455  // Allocate new FixedDoubleArray.
456  // Use lr as a temporary register.
457  __ mov(lr, Operand(r5, LSL, 2));
458  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
459  __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
460  // r6: destination FixedDoubleArray, not tagged as heap object.
462  // r4: source FixedArray.
463 
464  // Set destination FixedDoubleArray's length and map.
465  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
467  // Update receiver's map.
469 
471  __ RecordWriteField(r2,
473  r3,
474  r9,
479  // Replace receiver's backing store with newly created FixedDoubleArray.
480  __ add(r3, r6, Operand(kHeapObjectTag));
482  __ RecordWriteField(r2,
484  r3,
485  r9,
490 
491  // Prepare for conversion loop.
492  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
493  __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
494  __ add(r6, r9, Operand(r5, LSL, 2));
495  __ mov(r4, Operand(kHoleNanLower32));
496  __ mov(r5, Operand(kHoleNanUpper32));
497  // r3: begin of source FixedArray element fields, not tagged
498  // r4: kHoleNanLower32
499  // r5: kHoleNanUpper32
500  // r6: end of destination FixedDoubleArray, not tagged
501  // r9: begin of FixedDoubleArray element fields, not tagged
502 
503  __ b(&entry);
504 
505  __ bind(&only_change_map);
507  __ RecordWriteField(r2,
509  r3,
510  r9,
515  __ b(&done);
516 
517  // Call into runtime if GC is required.
518  __ bind(&gc_required);
519  __ pop(lr);
520  __ b(fail);
521 
522  // Convert and copy elements.
523  __ bind(&loop);
524  __ ldr(lr, MemOperand(r3, 4, PostIndex));
525  // lr: current element
526  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
527 
528  // Normal smi, convert to double and store.
529  __ vmov(s0, lr);
530  __ vcvt_f64_s32(d0, s0);
531  __ vstr(d0, r9, 0);
532  __ add(r9, r9, Operand(8));
533  __ b(&entry);
534 
535  // Hole found, store the-hole NaN.
536  __ bind(&convert_hole);
537  if (FLAG_debug_code) {
538  // Restore a "smi-untagged" heap object.
539  __ SmiTag(lr);
540  __ orr(lr, lr, Operand(1));
541  __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
542  __ Assert(eq, kObjectFoundInSmiOnlyArray);
543  }
544  __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
545 
546  __ bind(&entry);
547  __ cmp(r9, r6);
548  __ b(lt, &loop);
549 
550  __ pop(lr);
551  __ bind(&done);
552 }
553 
554 
556  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
557  // ----------- S t a t e -------------
558  // -- r0 : value
559  // -- r1 : key
560  // -- r2 : receiver
561  // -- lr : return address
562  // -- r3 : target map, scratch for subsequent call
563  // -- r4 : scratch (elements)
564  // -----------------------------------
565  Label entry, loop, convert_hole, gc_required, only_change_map;
566 
567  if (mode == TRACK_ALLOCATION_SITE) {
568  __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
569  }
570 
571  // Check for empty arrays, which only require a map transition and no changes
572  // to the backing store.
574  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
575  __ b(eq, &only_change_map);
576 
577  __ push(lr);
578  __ Push(r3, r2, r1, r0);
580  // r4: source FixedDoubleArray
581  // r5: number of elements (smi-tagged)
582 
583  // Allocate new FixedArray.
584  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
585  __ add(r0, r0, Operand(r5, LSL, 1));
586  __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
587  // r6: destination FixedArray, not tagged as heap object
588  // Set destination FixedDoubleArray's length and map.
589  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
592 
593  // Prepare for conversion loop.
594  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
595  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
596  __ add(r6, r6, Operand(kHeapObjectTag));
597  __ add(r5, r3, Operand(r5, LSL, 1));
598  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
599  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
600  // r3: begin of destination FixedArray element fields, not tagged
601  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
602  // r5: end of destination FixedArray, not tagged
603  // r6: destination FixedArray
604  // r9: heap number map
605  __ b(&entry);
606 
607  // Call into runtime if GC is required.
608  __ bind(&gc_required);
609  __ Pop(r3, r2, r1, r0);
610  __ pop(lr);
611  __ b(fail);
612 
613  __ bind(&loop);
614  __ ldr(r1, MemOperand(r4, 8, PostIndex));
615  // r1: current element's upper 32 bit
616  // r4: address of next element's upper 32 bit
617  __ cmp(r1, Operand(kHoleNanUpper32));
618  __ b(eq, &convert_hole);
619 
620  // Non-hole double, copy value into a heap number.
621  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
622  // r2: new heap number
623  __ ldr(r0, MemOperand(r4, 12, NegOffset));
625  __ mov(r0, r3);
626  __ str(r2, MemOperand(r3, 4, PostIndex));
627  __ RecordWrite(r6,
628  r0,
629  r2,
634  __ b(&entry);
635 
636  // Replace the-hole NaN with the-hole pointer.
637  __ bind(&convert_hole);
638  __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
639  __ str(r0, MemOperand(r3, 4, PostIndex));
640 
641  __ bind(&entry);
642  __ cmp(r3, r5);
643  __ b(lt, &loop);
644 
645  __ Pop(r3, r2, r1, r0);
646  // Replace receiver's backing store with newly created and filled FixedArray.
648  __ RecordWriteField(r2,
650  r6,
651  r9,
656  __ pop(lr);
657 
658  __ bind(&only_change_map);
659  // Update receiver's map.
661  __ RecordWriteField(r2,
663  r3,
664  r9,
669 }
670 
671 
672 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
673  Register string,
674  Register index,
675  Register result,
676  Label* call_runtime) {
677  // Fetch the instance type of the receiver into result register.
678  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
679  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
680 
681  // We need special handling for indirect strings.
682  Label check_sequential;
683  __ tst(result, Operand(kIsIndirectStringMask));
684  __ b(eq, &check_sequential);
685 
686  // Dispatch on the indirect string shape: slice or cons.
687  Label cons_string;
688  __ tst(result, Operand(kSlicedNotConsMask));
689  __ b(eq, &cons_string);
690 
691  // Handle slices.
692  Label indirect_string_loaded;
693  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
694  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
695  __ add(index, index, Operand::SmiUntag(result));
696  __ jmp(&indirect_string_loaded);
697 
698  // Handle cons strings.
699  // Check whether the right hand side is the empty string (i.e. if
700  // this is really a flat string in a cons string). If that is not
701  // the case we would rather go to the runtime system now to flatten
702  // the string.
703  __ bind(&cons_string);
704  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
705  __ CompareRoot(result, Heap::kempty_stringRootIndex);
706  __ b(ne, call_runtime);
707  // Get the first of the two strings and load its instance type.
708  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
709 
710  __ bind(&indirect_string_loaded);
711  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
712  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
713 
714  // Distinguish sequential and external strings. Only these two string
715  // representations can reach here (slices and flat cons strings have been
716  // reduced to the underlying sequential or external string).
717  Label external_string, check_encoding;
718  __ bind(&check_sequential);
720  __ tst(result, Operand(kStringRepresentationMask));
721  __ b(ne, &external_string);
722 
723  // Prepare sequential strings
725  __ add(string,
726  string,
728  __ jmp(&check_encoding);
729 
730  // Handle external strings.
731  __ bind(&external_string);
732  if (FLAG_debug_code) {
733  // Assert that we do not have a cons or slice (indirect strings) here.
734  // Sequential strings have already been ruled out.
735  __ tst(result, Operand(kIsIndirectStringMask));
736  __ Assert(eq, kExternalStringExpectedButNotFound);
737  }
738  // Rule out short external strings.
740  __ tst(result, Operand(kShortExternalStringMask));
741  __ b(ne, call_runtime);
743 
744  Label ascii, done;
745  __ bind(&check_encoding);
747  __ tst(result, Operand(kStringEncodingMask));
748  __ b(ne, &ascii);
749  // Two-byte string.
750  __ ldrh(result, MemOperand(string, index, LSL, 1));
751  __ jmp(&done);
752  __ bind(&ascii);
753  // Ascii string.
754  __ ldrb(result, MemOperand(string, index));
755  __ bind(&done);
756 }
757 
758 
759 static MemOperand ExpConstant(int index, Register base) {
760  return MemOperand(base, index * kDoubleSize);
761 }
762 
763 
764 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
765  DwVfpRegister input,
766  DwVfpRegister result,
767  DwVfpRegister double_scratch1,
768  DwVfpRegister double_scratch2,
769  Register temp1,
770  Register temp2,
771  Register temp3) {
772  ASSERT(!input.is(result));
773  ASSERT(!input.is(double_scratch1));
774  ASSERT(!input.is(double_scratch2));
775  ASSERT(!result.is(double_scratch1));
776  ASSERT(!result.is(double_scratch2));
777  ASSERT(!double_scratch1.is(double_scratch2));
778  ASSERT(!temp1.is(temp2));
779  ASSERT(!temp1.is(temp3));
780  ASSERT(!temp2.is(temp3));
781  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
782 
783  Label zero, infinity, done;
784 
785  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
786 
787  __ vldr(double_scratch1, ExpConstant(0, temp3));
788  __ VFPCompareAndSetFlags(double_scratch1, input);
789  __ b(ge, &zero);
790 
791  __ vldr(double_scratch2, ExpConstant(1, temp3));
792  __ VFPCompareAndSetFlags(input, double_scratch2);
793  __ b(ge, &infinity);
794 
795  __ vldr(double_scratch1, ExpConstant(3, temp3));
796  __ vldr(result, ExpConstant(4, temp3));
797  __ vmul(double_scratch1, double_scratch1, input);
798  __ vadd(double_scratch1, double_scratch1, result);
799  __ VmovLow(temp2, double_scratch1);
800  __ vsub(double_scratch1, double_scratch1, result);
801  __ vldr(result, ExpConstant(6, temp3));
802  __ vldr(double_scratch2, ExpConstant(5, temp3));
803  __ vmul(double_scratch1, double_scratch1, double_scratch2);
804  __ vsub(double_scratch1, double_scratch1, input);
805  __ vsub(result, result, double_scratch1);
806  __ vmul(double_scratch2, double_scratch1, double_scratch1);
807  __ vmul(result, result, double_scratch2);
808  __ vldr(double_scratch2, ExpConstant(7, temp3));
809  __ vmul(result, result, double_scratch2);
810  __ vsub(result, result, double_scratch1);
811  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
812  ASSERT(*reinterpret_cast<double*>
813  (ExternalReference::math_exp_constants(8).address()) == 1);
814  __ vmov(double_scratch2, 1);
815  __ vadd(result, result, double_scratch2);
816  __ mov(temp1, Operand(temp2, LSR, 11));
817  __ Ubfx(temp2, temp2, 0, 11);
818  __ add(temp1, temp1, Operand(0x3ff));
819 
820  // Must not call ExpConstant() after overwriting temp3!
821  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
822  __ add(temp3, temp3, Operand(temp2, LSL, 3));
823  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
824  // The first word is loaded is the lower number register.
825  if (temp2.code() < temp3.code()) {
826  __ orr(temp1, temp3, Operand(temp1, LSL, 20));
827  __ vmov(double_scratch1, temp2, temp1);
828  } else {
829  __ orr(temp1, temp2, Operand(temp1, LSL, 20));
830  __ vmov(double_scratch1, temp3, temp1);
831  }
832  __ vmul(result, result, double_scratch1);
833  __ b(&done);
834 
835  __ bind(&zero);
836  __ vmov(result, kDoubleRegZero);
837  __ b(&done);
838 
839  __ bind(&infinity);
840  __ vldr(result, ExpConstant(2, temp3));
841 
842  __ bind(&done);
843 }
844 
845 #undef __
846 
847 #ifdef DEBUG
848 // add(r0, pc, Operand(-8))
849 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
850 #endif
851 
852 static byte* GetNoCodeAgeSequence(uint32_t* length) {
853  // The sequence of instructions that is patched out for aging code is the
854  // following boilerplate stack-building prologue that is found in FUNCTIONS
855  static bool initialized = false;
856  static uint32_t sequence[kNoCodeAgeSequenceLength];
857  byte* byte_sequence = reinterpret_cast<byte*>(sequence);
858  *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
859  if (!initialized) {
860  // Since patcher is a large object, allocate it dynamically when needed,
861  // to avoid overloading the stack in stress conditions.
862  SmartPointer<CodePatcher>
863  patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
864  PredictableCodeSizeScope scope(patcher->masm(), *length);
865  patcher->masm()->PushFixedFrame(r1);
866  patcher->masm()->nop(ip.code());
867  patcher->masm()->add(
869  initialized = true;
870  }
871  return byte_sequence;
872 }
873 
874 
875 bool Code::IsYoungSequence(byte* sequence) {
876  uint32_t young_length;
877  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
878  bool result = !memcmp(sequence, young_sequence, young_length);
879  ASSERT(result ||
880  Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
881  return result;
882 }
883 
884 
885 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
886  MarkingParity* parity) {
887  if (IsYoungSequence(sequence)) {
888  *age = kNoAgeCodeAge;
889  *parity = NO_MARKING_PARITY;
890  } else {
891  Address target_address = Memory::Address_at(
892  sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
893  Code* stub = GetCodeFromTargetAddress(target_address);
894  GetCodeAgeAndParity(stub, age, parity);
895  }
896 }
897 
898 
899 void Code::PatchPlatformCodeAge(Isolate* isolate,
900  byte* sequence,
901  Code::Age age,
902  MarkingParity parity) {
903  uint32_t young_length;
904  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
905  if (age == kNoAgeCodeAge) {
906  CopyBytes(sequence, young_sequence, young_length);
907  CPU::FlushICache(sequence, young_length);
908  } else {
909  Code* stub = GetCodeAgeStub(isolate, age, parity);
910  CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
911  patcher.masm()->add(r0, pc, Operand(-8));
912  patcher.masm()->ldr(pc, MemOperand(pc, -4));
913  patcher.masm()->emit_code_stub_address(stub);
914  }
915 }
916 
917 
918 } } // namespace v8::internal
919 
920 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
const QwNeonRegister q0
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const Register r3
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const LowDwVfpRegister d0
const int KB
Definition: globals.h:245
const Register r6
static bool IsSupported(CpuFeature f)
Definition: assembler-arm.h:68
static bool enabled()
Definition: serialize.h:485
#define ASSERT(condition)
Definition: checks.h:329
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:119
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const Register r2
const uint32_t kShortExternalStringMask
Definition: objects.h:643
UnaryMathFunction CreateExpFunction()
uint8_t byte
Definition: globals.h:185
static const int kFirstOffset
Definition: objects.h:9165
const Register sp
static const int kParentOffset
Definition: objects.h:9209
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const LowDwVfpRegister d4
static const int kValueOffset
Definition: objects.h:1971
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
static void ProtectCode(void *address, const size_t size)
const Register ip
const Register r9
static Address & Address_at(Address addr)
Definition: v8memory.h:79
const int kHeapObjectTag
Definition: v8.h:5473
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
const Register pc
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
static const int kOffsetOffset
Definition: objects.h:9210
static const int kHeaderSize
Definition: objects.h:3016
const Register lr
static const int kMapOffset
Definition: objects.h:1890
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
const LowDwVfpRegister d2
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:9166
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
static const int kInstrSize
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
static unsigned cache_line_size()
Definition: assembler-arm.h:84
const LowDwVfpRegister d1
const Register fp
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
const Register r4