v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_ARM)
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 #define __ ACCESS_MASM(masm)
39 
41  switch (type) {
42  case TranscendentalCache::SIN: return &sin;
43  case TranscendentalCache::COS: return &cos;
44  case TranscendentalCache::TAN: return &tan;
45  case TranscendentalCache::LOG: return &log;
46  default: UNIMPLEMENTED();
47  }
48  return NULL;
49 }
50 
51 
53  return &sqrt;
54 }
55 
56 // -------------------------------------------------------------------------
57 // Platform-specific RuntimeCallHelper functions.
58 
59 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
60  masm->EnterFrame(StackFrame::INTERNAL);
61  ASSERT(!masm->has_frame());
62  masm->set_has_frame(true);
63 }
64 
65 
66 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
67  masm->LeaveFrame(StackFrame::INTERNAL);
68  ASSERT(masm->has_frame());
69  masm->set_has_frame(false);
70 }
71 
72 
73 // -------------------------------------------------------------------------
74 // Code generators
75 
77  MacroAssembler* masm) {
78  // ----------- S t a t e -------------
79  // -- r0 : value
80  // -- r1 : key
81  // -- r2 : receiver
82  // -- lr : return address
83  // -- r3 : target map, scratch for subsequent call
84  // -- r4 : scratch (elements)
85  // -----------------------------------
86  // Set transitioned map.
88  __ RecordWriteField(r2,
90  r3,
91  r9,
96 }
97 
98 
100  MacroAssembler* masm, Label* fail) {
101  // ----------- S t a t e -------------
102  // -- r0 : value
103  // -- r1 : key
104  // -- r2 : receiver
105  // -- lr : return address
106  // -- r3 : target map, scratch for subsequent call
107  // -- r4 : scratch (elements)
108  // -----------------------------------
109  Label loop, entry, convert_hole, gc_required, only_change_map, done;
110  bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
111 
112  // Check for empty arrays, which only require a map transition and no changes
113  // to the backing store.
115  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
116  __ b(eq, &only_change_map);
117 
118  __ push(lr);
120  // r4: source FixedArray
121  // r5: number of elements (smi-tagged)
122 
123  // Allocate new FixedDoubleArray.
124  __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
125  __ add(lr, lr, Operand(r5, LSL, 2));
126  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
127  // r6: destination FixedDoubleArray, not tagged as heap object
128  // Set destination FixedDoubleArray's length and map.
129  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
132  // Update receiver's map.
133 
135  __ RecordWriteField(r2,
137  r3,
138  r9,
143  // Replace receiver's backing store with newly created FixedDoubleArray.
144  __ add(r3, r6, Operand(kHeapObjectTag));
146  __ RecordWriteField(r2,
148  r3,
149  r9,
154 
155  // Prepare for conversion loop.
156  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
157  __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
158  __ add(r6, r7, Operand(r5, LSL, 2));
159  __ mov(r4, Operand(kHoleNanLower32));
160  __ mov(r5, Operand(kHoleNanUpper32));
161  // r3: begin of source FixedArray element fields, not tagged
162  // r4: kHoleNanLower32
163  // r5: kHoleNanUpper32
164  // r6: end of destination FixedDoubleArray, not tagged
165  // r7: begin of FixedDoubleArray element fields, not tagged
166  if (!vfp3_supported) __ Push(r1, r0);
167 
168  __ b(&entry);
169 
170  __ bind(&only_change_map);
172  __ RecordWriteField(r2,
174  r3,
175  r9,
180  __ b(&done);
181 
182  // Call into runtime if GC is required.
183  __ bind(&gc_required);
184  __ pop(lr);
185  __ b(fail);
186 
187  // Convert and copy elements.
188  __ bind(&loop);
189  __ ldr(r9, MemOperand(r3, 4, PostIndex));
190  // r9: current element
191  __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
192 
193  // Normal smi, convert to double and store.
194  if (vfp3_supported) {
195  CpuFeatures::Scope scope(VFP3);
196  __ vmov(s0, r9);
197  __ vcvt_f64_s32(d0, s0);
198  __ vstr(d0, r7, 0);
199  __ add(r7, r7, Operand(8));
200  } else {
202  r9,
204  d0,
205  r0,
206  r1,
207  lr,
208  s0);
209  __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
210  }
211  __ b(&entry);
212 
213  // Hole found, store the-hole NaN.
214  __ bind(&convert_hole);
215  if (FLAG_debug_code) {
216  // Restore a "smi-untagged" heap object.
217  __ SmiTag(r9);
218  __ orr(r9, r9, Operand(1));
219  __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
220  __ Assert(eq, "object found in smi-only array");
221  }
222  __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
223 
224  __ bind(&entry);
225  __ cmp(r7, r6);
226  __ b(lt, &loop);
227 
228  if (!vfp3_supported) __ Pop(r1, r0);
229  __ pop(lr);
230  __ bind(&done);
231 }
232 
233 
235  MacroAssembler* masm, Label* fail) {
236  // ----------- S t a t e -------------
237  // -- r0 : value
238  // -- r1 : key
239  // -- r2 : receiver
240  // -- lr : return address
241  // -- r3 : target map, scratch for subsequent call
242  // -- r4 : scratch (elements)
243  // -----------------------------------
244  Label entry, loop, convert_hole, gc_required, only_change_map;
245 
246  // Check for empty arrays, which only require a map transition and no changes
247  // to the backing store.
249  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
250  __ b(eq, &only_change_map);
251 
252  __ push(lr);
253  __ Push(r3, r2, r1, r0);
255  // r4: source FixedDoubleArray
256  // r5: number of elements (smi-tagged)
257 
258  // Allocate new FixedArray.
259  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
260  __ add(r0, r0, Operand(r5, LSL, 1));
261  __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
262  // r6: destination FixedArray, not tagged as heap object
263  // Set destination FixedDoubleArray's length and map.
264  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
267 
268  // Prepare for conversion loop.
269  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
270  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
271  __ add(r6, r6, Operand(kHeapObjectTag));
272  __ add(r5, r3, Operand(r5, LSL, 1));
273  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
274  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
275  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
276  // r3: begin of destination FixedArray element fields, not tagged
277  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
278  // r5: end of destination FixedArray, not tagged
279  // r6: destination FixedArray
280  // r7: the-hole pointer
281  // r9: heap number map
282  __ b(&entry);
283 
284  // Call into runtime if GC is required.
285  __ bind(&gc_required);
286  __ Pop(r3, r2, r1, r0);
287  __ pop(lr);
288  __ b(fail);
289 
290  __ bind(&loop);
291  __ ldr(r1, MemOperand(r4, 8, PostIndex));
292  // lr: current element's upper 32 bit
293  // r4: address of next element's upper 32 bit
294  __ cmp(r1, Operand(kHoleNanUpper32));
295  __ b(eq, &convert_hole);
296 
297  // Non-hole double, copy value into a heap number.
298  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
299  // r2: new heap number
300  __ ldr(r0, MemOperand(r4, 12, NegOffset));
302  __ mov(r0, r3);
303  __ str(r2, MemOperand(r3, 4, PostIndex));
304  __ RecordWrite(r6,
305  r0,
306  r2,
311  __ b(&entry);
312 
313  // Replace the-hole NaN with the-hole pointer.
314  __ bind(&convert_hole);
315  __ str(r7, MemOperand(r3, 4, PostIndex));
316 
317  __ bind(&entry);
318  __ cmp(r3, r5);
319  __ b(lt, &loop);
320 
321  __ Pop(r3, r2, r1, r0);
322  // Replace receiver's backing store with newly created and filled FixedArray.
324  __ RecordWriteField(r2,
326  r6,
327  r9,
332  __ pop(lr);
333 
334  __ bind(&only_change_map);
335  // Update receiver's map.
337  __ RecordWriteField(r2,
339  r3,
340  r9,
345 }
346 
347 
348 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
349  Register string,
350  Register index,
351  Register result,
352  Label* call_runtime) {
353  // Fetch the instance type of the receiver into result register.
354  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
355  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
356 
357  // We need special handling for indirect strings.
358  Label check_sequential;
359  __ tst(result, Operand(kIsIndirectStringMask));
360  __ b(eq, &check_sequential);
361 
362  // Dispatch on the indirect string shape: slice or cons.
363  Label cons_string;
364  __ tst(result, Operand(kSlicedNotConsMask));
365  __ b(eq, &cons_string);
366 
367  // Handle slices.
368  Label indirect_string_loaded;
369  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
370  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
371  __ add(index, index, Operand(result, ASR, kSmiTagSize));
372  __ jmp(&indirect_string_loaded);
373 
374  // Handle cons strings.
375  // Check whether the right hand side is the empty string (i.e. if
376  // this is really a flat string in a cons string). If that is not
377  // the case we would rather go to the runtime system now to flatten
378  // the string.
379  __ bind(&cons_string);
380  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
381  __ CompareRoot(result, Heap::kEmptyStringRootIndex);
382  __ b(ne, call_runtime);
383  // Get the first of the two strings and load its instance type.
384  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
385 
386  __ bind(&indirect_string_loaded);
387  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
388  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
389 
390  // Distinguish sequential and external strings. Only these two string
391  // representations can reach here (slices and flat cons strings have been
392  // reduced to the underlying sequential or external string).
393  Label external_string, check_encoding;
394  __ bind(&check_sequential);
396  __ tst(result, Operand(kStringRepresentationMask));
397  __ b(ne, &external_string);
398 
399  // Prepare sequential strings
401  __ add(string,
402  string,
404  __ jmp(&check_encoding);
405 
406  // Handle external strings.
407  __ bind(&external_string);
408  if (FLAG_debug_code) {
409  // Assert that we do not have a cons or slice (indirect strings) here.
410  // Sequential strings have already been ruled out.
411  __ tst(result, Operand(kIsIndirectStringMask));
412  __ Assert(eq, "external string expected, but not found");
413  }
414  // Rule out short external strings.
416  __ tst(result, Operand(kShortExternalStringMask));
417  __ b(ne, call_runtime);
419 
420  Label ascii, done;
421  __ bind(&check_encoding);
423  __ tst(result, Operand(kStringEncodingMask));
424  __ b(ne, &ascii);
425  // Two-byte string.
426  __ ldrh(result, MemOperand(string, index, LSL, 1));
427  __ jmp(&done);
428  __ bind(&ascii);
429  // Ascii string.
430  __ ldrb(result, MemOperand(string, index));
431  __ bind(&done);
432 }
433 
434 #undef __
435 
436 } } // namespace v8::internal
437 
438 #endif // V8_TARGET_ARCH_ARM
static const int kResourceDataOffset
Definition: objects.h:7517
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const Register r3
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const DwVfpRegister d0
const Register r6
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
Definition: checks.h:270
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:90
const uint32_t kStringRepresentationMask
Definition: objects.h:455
const Register r2
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static const int kFirstOffset
Definition: objects.h:7420
static const int kParentOffset
Definition: objects.h:7473
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kValueOffset
Definition: objects.h:1307
const uint32_t kHoleNanUpper32
Definition: v8globals.h:476
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
const Register r9
const int kHeapObjectTag
Definition: v8.h:3848
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
#define __
UnaryMathFunction CreateSqrtFunction()
static void GenerateSmiToDouble(MacroAssembler *masm, Label *fail)
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7282
const Register r0
static const int kElementsOffset
Definition: objects.h:2114
static const int kOffsetOffset
Definition: objects.h:7474
static const int kHeaderSize
Definition: objects.h:2233
const Register lr
static const int kMapOffset
Definition: objects.h:1219
const Register r1
static void GenerateDoubleToObject(MacroAssembler *masm, Label *fail)
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSecondOffset
Definition: objects.h:7421
MemOperand FieldMemOperand(Register object, int offset)
#define UNIMPLEMENTED()
Definition: checks.h:48
const int kSmiTagSize
Definition: v8.h:3854
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
virtual void BeforeCall(MacroAssembler *masm) const
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type)
FlagType type() const
Definition: flags.cc:1358
const Register r5
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
const Register r4
const Register r7