v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 #define __ ACCESS_MASM(masm)
39 
41  switch (type) {
42  case TranscendentalCache::SIN: return &sin;
43  case TranscendentalCache::COS: return &cos;
44  case TranscendentalCache::TAN: return &tan;
45  case TranscendentalCache::LOG: return &log;
46  default: UNIMPLEMENTED();
47  }
48  return NULL;
49 }
50 
51 
53  return &sqrt;
54 }
55 
56 // -------------------------------------------------------------------------
57 // Platform-specific RuntimeCallHelper functions.
58 
59 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
60  masm->EnterFrame(StackFrame::INTERNAL);
61  ASSERT(!masm->has_frame());
62  masm->set_has_frame(true);
63 }
64 
65 
66 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
67  masm->LeaveFrame(StackFrame::INTERNAL);
68  ASSERT(masm->has_frame());
69  masm->set_has_frame(false);
70 }
71 
72 // -------------------------------------------------------------------------
73 // Code generators
74 
76  MacroAssembler* masm) {
77  // ----------- S t a t e -------------
78  // -- a0 : value
79  // -- a1 : key
80  // -- a2 : receiver
81  // -- ra : return address
82  // -- a3 : target map, scratch for subsequent call
83  // -- t0 : scratch (elements)
84  // -----------------------------------
85  // Set transitioned map.
87  __ RecordWriteField(a2,
89  a3,
90  t5,
95 }
96 
97 
99  MacroAssembler* masm, Label* fail) {
100  // ----------- S t a t e -------------
101  // -- a0 : value
102  // -- a1 : key
103  // -- a2 : receiver
104  // -- ra : return address
105  // -- a3 : target map, scratch for subsequent call
106  // -- t0 : scratch (elements)
107  // -----------------------------------
108  Label loop, entry, convert_hole, gc_required, only_change_map, done;
109  bool fpu_supported = CpuFeatures::IsSupported(FPU);
110 
111  Register scratch = t6;
112 
113  // Check for empty arrays, which only require a map transition and no changes
114  // to the backing store.
116  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
117  __ Branch(&only_change_map, eq, at, Operand(t0));
118 
119  __ push(ra);
121  // t0: source FixedArray
122  // t1: number of elements (smi-tagged)
123 
124  // Allocate new FixedDoubleArray.
125  __ sll(scratch, t1, 2);
126  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
127  __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
128  // t2: destination FixedDoubleArray, not tagged as heap object
129  // Set destination FixedDoubleArray's length and map.
130  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
132  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
133  // Update receiver's map.
134 
136  __ RecordWriteField(a2,
138  a3,
139  t5,
144  // Replace receiver's backing store with newly created FixedDoubleArray.
145  __ Addu(a3, t2, Operand(kHeapObjectTag));
147  __ RecordWriteField(a2,
149  a3,
150  t5,
155 
156 
157  // Prepare for conversion loop.
158  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
159  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
160  __ sll(t2, t1, 2);
161  __ Addu(t2, t2, t3);
162  __ li(t0, Operand(kHoleNanLower32));
163  __ li(t1, Operand(kHoleNanUpper32));
164  // t0: kHoleNanLower32
165  // t1: kHoleNanUpper32
166  // t2: end of destination FixedDoubleArray, not tagged
167  // t3: begin of FixedDoubleArray element fields, not tagged
168 
169  if (!fpu_supported) __ Push(a1, a0);
170 
171  __ Branch(&entry);
172 
173  __ bind(&only_change_map);
175  __ RecordWriteField(a2,
177  a3,
178  t5,
183  __ Branch(&done);
184 
185  // Call into runtime if GC is required.
186  __ bind(&gc_required);
187  __ pop(ra);
188  __ Branch(fail);
189 
190  // Convert and copy elements.
191  __ bind(&loop);
192  __ lw(t5, MemOperand(a3));
193  __ Addu(a3, a3, kIntSize);
194  // t5: current element
195  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
196 
197  // Normal smi, convert to double and store.
198  if (fpu_supported) {
199  CpuFeatures::Scope scope(FPU);
200  __ mtc1(t5, f0);
201  __ cvt_d_w(f0, f0);
202  __ sdc1(f0, MemOperand(t3));
203  __ Addu(t3, t3, kDoubleSize);
204  } else {
206  t5,
208  f0,
209  a0,
210  a1,
211  t7,
212  f0);
213  __ sw(a0, MemOperand(t3)); // mantissa
214  __ sw(a1, MemOperand(t3, kIntSize)); // exponent
215  __ Addu(t3, t3, kDoubleSize);
216  }
217  __ Branch(&entry);
218 
219  // Hole found, store the-hole NaN.
220  __ bind(&convert_hole);
221  if (FLAG_debug_code) {
222  // Restore a "smi-untagged" heap object.
223  __ SmiTag(t5);
224  __ Or(t5, t5, Operand(1));
225  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
226  __ Assert(eq, "object found in smi-only array", at, Operand(t5));
227  }
228  __ sw(t0, MemOperand(t3)); // mantissa
229  __ sw(t1, MemOperand(t3, kIntSize)); // exponent
230  __ Addu(t3, t3, kDoubleSize);
231 
232  __ bind(&entry);
233  __ Branch(&loop, lt, t3, Operand(t2));
234 
235  if (!fpu_supported) __ Pop(a1, a0);
236  __ pop(ra);
237  __ bind(&done);
238 }
239 
240 
242  MacroAssembler* masm, Label* fail) {
243  // ----------- S t a t e -------------
244  // -- a0 : value
245  // -- a1 : key
246  // -- a2 : receiver
247  // -- ra : return address
248  // -- a3 : target map, scratch for subsequent call
249  // -- t0 : scratch (elements)
250  // -----------------------------------
251  Label entry, loop, convert_hole, gc_required, only_change_map;
252 
253  // Check for empty arrays, which only require a map transition and no changes
254  // to the backing store.
256  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
257  __ Branch(&only_change_map, eq, at, Operand(t0));
258 
259  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
260 
262  // t0: source FixedArray
263  // t1: number of elements (smi-tagged)
264 
265  // Allocate new FixedArray.
266  __ sll(a0, t1, 1);
267  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
268  __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
269  // t2: destination FixedArray, not tagged as heap object
270  // Set destination FixedDoubleArray's length and map.
271  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
273  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
274 
275  // Prepare for conversion loop.
276  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
277  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
278  __ Addu(t2, t2, Operand(kHeapObjectTag));
279  __ sll(t1, t1, 1);
280  __ Addu(t1, a3, t1);
281  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
282  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
283  // Using offsetted addresses.
284  // a3: begin of destination FixedArray element fields, not tagged
285  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
286  // t1: end of destination FixedArray, not tagged
287  // t2: destination FixedArray
288  // t3: the-hole pointer
289  // t5: heap number map
290  __ Branch(&entry);
291 
292  // Call into runtime if GC is required.
293  __ bind(&gc_required);
294  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
295 
296  __ Branch(fail);
297 
298  __ bind(&loop);
299  __ lw(a1, MemOperand(t0));
300  __ Addu(t0, t0, kDoubleSize);
301  // a1: current element's upper 32 bit
302  // t0: address of next element's upper 32 bit
303  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
304 
305  // Non-hole double, copy value into a heap number.
306  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
307  // a2: new heap number
308  __ lw(a0, MemOperand(t0, -12));
311  __ mov(a0, a3);
312  __ sw(a2, MemOperand(a3));
313  __ Addu(a3, a3, kIntSize);
314  __ RecordWrite(t2,
315  a0,
316  a2,
321  __ Branch(&entry);
322 
323  // Replace the-hole NaN with the-hole pointer.
324  __ bind(&convert_hole);
325  __ sw(t3, MemOperand(a3));
326  __ Addu(a3, a3, kIntSize);
327 
328  __ bind(&entry);
329  __ Branch(&loop, lt, a3, Operand(t1));
330 
331  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
332  // Replace receiver's backing store with newly created and filled FixedArray.
334  __ RecordWriteField(a2,
336  t2,
337  t5,
342  __ pop(ra);
343 
344  __ bind(&only_change_map);
345  // Update receiver's map.
347  __ RecordWriteField(a2,
349  a3,
350  t5,
355 }
356 
357 
358 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
359  Register string,
360  Register index,
361  Register result,
362  Label* call_runtime) {
363  // Fetch the instance type of the receiver into result register.
364  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
365  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
366 
367  // We need special handling for indirect strings.
368  Label check_sequential;
369  __ And(at, result, Operand(kIsIndirectStringMask));
370  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
371 
372  // Dispatch on the indirect string shape: slice or cons.
373  Label cons_string;
374  __ And(at, result, Operand(kSlicedNotConsMask));
375  __ Branch(&cons_string, eq, at, Operand(zero_reg));
376 
377  // Handle slices.
378  Label indirect_string_loaded;
379  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
380  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
381  __ sra(at, result, kSmiTagSize);
382  __ Addu(index, index, at);
383  __ jmp(&indirect_string_loaded);
384 
385  // Handle cons strings.
386  // Check whether the right hand side is the empty string (i.e. if
387  // this is really a flat string in a cons string). If that is not
388  // the case we would rather go to the runtime system now to flatten
389  // the string.
390  __ bind(&cons_string);
391  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
392  __ LoadRoot(at, Heap::kEmptyStringRootIndex);
393  __ Branch(call_runtime, ne, result, Operand(at));
394  // Get the first of the two strings and load its instance type.
395  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
396 
397  __ bind(&indirect_string_loaded);
398  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
399  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
400 
401  // Distinguish sequential and external strings. Only these two string
402  // representations can reach here (slices and flat cons strings have been
403  // reduced to the underlying sequential or external string).
404  Label external_string, check_encoding;
405  __ bind(&check_sequential);
407  __ And(at, result, Operand(kStringRepresentationMask));
408  __ Branch(&external_string, ne, at, Operand(zero_reg));
409 
410  // Prepare sequential strings
412  __ Addu(string,
413  string,
415  __ jmp(&check_encoding);
416 
417  // Handle external strings.
418  __ bind(&external_string);
419  if (FLAG_debug_code) {
420  // Assert that we do not have a cons or slice (indirect strings) here.
421  // Sequential strings have already been ruled out.
422  __ And(at, result, Operand(kIsIndirectStringMask));
423  __ Assert(eq, "external string expected, but not found",
424  at, Operand(zero_reg));
425  }
426  // Rule out short external strings.
428  __ And(at, result, Operand(kShortExternalStringMask));
429  __ Branch(call_runtime, ne, at, Operand(zero_reg));
431 
432  Label ascii, done;
433  __ bind(&check_encoding);
435  __ And(at, result, Operand(kStringEncodingMask));
436  __ Branch(&ascii, ne, at, Operand(zero_reg));
437  // Two-byte string.
438  __ sll(at, index, 1);
439  __ Addu(at, string, at);
440  __ lhu(result, MemOperand(at));
441  __ jmp(&done);
442  __ bind(&ascii);
443  // Ascii string.
444  __ Addu(at, string, index);
445  __ lbu(result, MemOperand(at));
446  __ bind(&done);
447 }
448 
449 #undef __
450 
451 } } // namespace v8::internal
452 
453 #endif // V8_TARGET_ARCH_MIPS
static const int kResourceDataOffset
Definition: objects.h:7747
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const uint32_t kTwoByteStringTag
Definition: objects.h:469
const FPURegister f0
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
Definition: checks.h:270
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:90
const uint32_t kStringRepresentationMask
Definition: objects.h:474
const uint32_t kShortExternalStringMask
Definition: objects.h:502
const int kIntSize
Definition: globals.h:217
static const int kFirstOffset
Definition: objects.h:7653
static const int kParentOffset
Definition: objects.h:7705
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
const uint32_t kHoleNanUpper32
Definition: v8globals.h:469
const int kDoubleSize
Definition: globals.h:218
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const int kHeapObjectTag
Definition: v8.h:4009
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
#define __
UnaryMathFunction CreateSqrtFunction()
static void GenerateSmiToDouble(MacroAssembler *masm, Label *fail)
const uint32_t kShortExternalStringTag
Definition: objects.h:503
static void GenerateMapChangeElementsTransition(MacroAssembler *masm)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7517
static const int kElementsOffset
Definition: objects.h:2172
static const int kOffsetOffset
Definition: objects.h:7706
static const int kHeaderSize
Definition: objects.h:2296
static const int kMapOffset
Definition: objects.h:1261
static void GenerateDoubleToObject(MacroAssembler *masm, Label *fail)
const uint32_t kSlicedNotConsMask
Definition: objects.h:492
static const int kLengthOffset
Definition: objects.h:2295
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSecondOffset
Definition: objects.h:7654
MemOperand FieldMemOperand(Register object, int offset)
#define UNIMPLEMENTED()
Definition: checks.h:48
const int kSmiTagSize
Definition: v8.h:4015
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
virtual void BeforeCall(MacroAssembler *masm) const
static const int kExponentOffset
Definition: objects.h:1348
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type)
const uint32_t kStringEncodingMask
Definition: objects.h:468
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kMantissaOffset
Definition: objects.h:1347