v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_MIPS
31 
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 #include "simulator-mips.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 #define __ masm.
41 
42 
43 #if defined(USE_SIMULATOR)
44 byte* fast_exp_mips_machine_code = NULL;
45 double fast_exp_simulator(double x) {
46  return Simulator::current(Isolate::Current())->CallFP(
47  fast_exp_mips_machine_code, x, 0);
48 }
49 #endif
50 
51 
53  if (!FLAG_fast_math) return &std::exp;
54  size_t actual_size;
55  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
56  if (buffer == NULL) return &std::exp;
57  ExternalReference::InitializeMathExpData();
58 
59  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
60 
61  {
62  DoubleRegister input = f12;
63  DoubleRegister result = f0;
64  DoubleRegister double_scratch1 = f4;
65  DoubleRegister double_scratch2 = f6;
66  Register temp1 = t0;
67  Register temp2 = t1;
68  Register temp3 = t2;
69 
70  if (!IsMipsSoftFloatABI) {
71  // Input value is in f12 anyway, nothing to do.
72  } else {
73  __ Move(input, a0, a1);
74  }
75  __ Push(temp3, temp2, temp1);
77  &masm, input, result, double_scratch1, double_scratch2,
78  temp1, temp2, temp3);
79  __ Pop(temp3, temp2, temp1);
80  if (!IsMipsSoftFloatABI) {
81  // Result is already in f0, nothing to do.
82  } else {
83  __ Move(v0, v1, result);
84  }
85  __ Ret();
86  }
87 
88  CodeDesc desc;
89  masm.GetCode(&desc);
90  ASSERT(!RelocInfo::RequiresRelocation(desc));
91 
92  CPU::FlushICache(buffer, actual_size);
93  OS::ProtectCode(buffer, actual_size);
94 
95 #if !defined(USE_SIMULATOR)
96  return FUNCTION_CAST<UnaryMathFunction>(buffer);
97 #else
98  fast_exp_mips_machine_code = buffer;
99  return &fast_exp_simulator;
100 #endif
101 }
102 
103 
104 #if defined(V8_HOST_ARCH_MIPS)
105 OS::MemCopyUint8Function CreateMemCopyUint8Function(
106  OS::MemCopyUint8Function stub) {
107 #if defined(USE_SIMULATOR)
108  return stub;
109 #else
110  if (Serializer::enabled()) {
111  return stub;
112  }
113 
114  size_t actual_size;
115  byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
116  if (buffer == NULL) return stub;
117 
118  // This code assumes that cache lines are 32 bytes and if the cache line is
119  // larger it will not work correctly.
120  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
121 
122  {
123  Label lastb, unaligned, aligned, chkw,
124  loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
125  leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
126  ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
127 
128  // The size of each prefetch.
129  uint32_t pref_chunk = 32;
130  // The maximum size of a prefetch, it must not be less then pref_chunk.
131  // If the real size of a prefetch is greater then max_pref_size and
132  // the kPrefHintPrepareForStore hint is used, the code will not work
133  // correctly.
134  uint32_t max_pref_size = 128;
135  ASSERT(pref_chunk < max_pref_size);
136 
137  // pref_limit is set based on the fact that we never use an offset
138  // greater then 5 on a store pref and that a single pref can
139  // never be larger then max_pref_size.
140  uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
141  int32_t pref_hint_load = kPrefHintLoadStreamed;
142  int32_t pref_hint_store = kPrefHintPrepareForStore;
143  uint32_t loadstore_chunk = 4;
144 
145  // The initial prefetches may fetch bytes that are before the buffer being
146  // copied. Start copies with an offset of 4 so avoid this situation when
147  // using kPrefHintPrepareForStore.
148  ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
149  pref_chunk * 4 >= max_pref_size);
150 
151  // If the size is less than 8, go to lastb. Regardless of size,
152  // copy dst pointer to v0 for the retuen value.
153  __ slti(t2, a2, 2 * loadstore_chunk);
154  __ bne(t2, zero_reg, &lastb);
155  __ mov(v0, a0); // In delay slot.
156 
157  // If src and dst have different alignments, go to unaligned, if they
158  // have the same alignment (but are not actually aligned) do a partial
159  // load/store to make them aligned. If they are both already aligned
160  // we can start copying at aligned.
161  __ xor_(t8, a1, a0);
162  __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
163  __ bne(t8, zero_reg, &unaligned);
164  __ subu(a3, zero_reg, a0); // In delay slot.
165 
166  __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
167  __ beq(a3, zero_reg, &aligned); // Already aligned.
168  __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
169 
170  __ lwr(t8, MemOperand(a1));
171  __ addu(a1, a1, a3);
172  __ swr(t8, MemOperand(a0));
173  __ addu(a0, a0, a3);
174 
175  // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
176  // count how many bytes we have to copy after all the 64 byte chunks are
177  // copied and a3 to the dst pointer after all the 64 byte chunks have been
178  // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
179  __ bind(&aligned);
180  __ andi(t8, a2, 0x3f);
181  __ beq(a2, t8, &chkw); // Less than 64?
182  __ subu(a3, a2, t8); // In delay slot.
183  __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
184 
185  // When in the loop we prefetch with kPrefHintPrepareForStore hint,
186  // in this case the a0+x should be past the "t0-32" address. This means:
187  // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
188  // x=64 the last "safe" a0 address is "t0-96". In the current version we
189  // will use "pref hint, 128(a0)", so "t0-160" is the limit.
190  if (pref_hint_store == kPrefHintPrepareForStore) {
191  __ addu(t0, a0, a2); // t0 is the "past the end" address.
192  __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
193  }
194 
195  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
196  __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
197  __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
198  __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
199 
200  if (pref_hint_store != kPrefHintPrepareForStore) {
201  __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
202  __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
203  __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
204  }
205  __ bind(&loop16w);
206  __ lw(t0, MemOperand(a1));
207 
208  if (pref_hint_store == kPrefHintPrepareForStore) {
209  __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
210  __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
211  }
212  __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
213 
214  __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
215  __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
216 
217  __ bind(&skip_pref);
218  __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
219  __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
220  __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
221  __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
222  __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
223  __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
224  __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
225 
226  __ sw(t0, MemOperand(a0));
227  __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
228  __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
229  __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
230  __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
231  __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
232  __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
233  __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
234 
235  __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
236  __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
237  __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
238  __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
239  __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
240  __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
241  __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
242  __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
243  __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
244 
245  __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
246  __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
247  __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
248  __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
249  __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
250  __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
251  __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
252  __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
253  __ addiu(a0, a0, 16 * loadstore_chunk);
254  __ bne(a0, a3, &loop16w);
255  __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
256  __ mov(a2, t8);
257 
258  // Here we have src and dest word-aligned but less than 64-bytes to go.
259  // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
260  // down to chk1w to handle the tail end of the copy.
261  __ bind(&chkw);
262  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
263  __ andi(t8, a2, 0x1f);
264  __ beq(a2, t8, &chk1w); // Less than 32?
265  __ nop(); // In delay slot.
266  __ lw(t0, MemOperand(a1));
267  __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
268  __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
269  __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
270  __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
271  __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
272  __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
273  __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
274  __ addiu(a1, a1, 8 * loadstore_chunk);
275  __ sw(t0, MemOperand(a0));
276  __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
277  __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
278  __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
279  __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
280  __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
281  __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
282  __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
283  __ addiu(a0, a0, 8 * loadstore_chunk);
284 
285  // Here we have less than 32 bytes to copy. Set up for a loop to copy
286  // one word at a time. Set a2 to count how many bytes we have to copy
287  // after all the word chunks are copied and a3 to the dst pointer after
288  // all the word chunks have been copied. We will loop, incrementing a0
289  // and a1 untill a0 equals a3.
290  __ bind(&chk1w);
291  __ andi(a2, t8, loadstore_chunk - 1);
292  __ beq(a2, t8, &lastb);
293  __ subu(a3, t8, a2); // In delay slot.
294  __ addu(a3, a0, a3);
295 
296  __ bind(&wordCopy_loop);
297  __ lw(t3, MemOperand(a1));
298  __ addiu(a0, a0, loadstore_chunk);
299  __ addiu(a1, a1, loadstore_chunk);
300  __ bne(a0, a3, &wordCopy_loop);
301  __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
302 
303  __ bind(&lastb);
304  __ Branch(&leave, le, a2, Operand(zero_reg));
305  __ addu(a3, a0, a2);
306 
307  __ bind(&lastbloop);
308  __ lb(v1, MemOperand(a1));
309  __ addiu(a0, a0, 1);
310  __ addiu(a1, a1, 1);
311  __ bne(a0, a3, &lastbloop);
312  __ sb(v1, MemOperand(a0, -1)); // In delay slot.
313 
314  __ bind(&leave);
315  __ jr(ra);
316  __ nop();
317 
318  // Unaligned case. Only the dst gets aligned so we need to do partial
319  // loads of the source followed by normal stores to the dst (once we
320  // have aligned the destination).
321  __ bind(&unaligned);
322  __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
323  __ beq(a3, zero_reg, &ua_chk16w);
324  __ subu(a2, a2, a3); // In delay slot.
325 
326  __ lwr(v1, MemOperand(a1));
327  __ lwl(v1,
328  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
329  __ addu(a1, a1, a3);
330  __ swr(v1, MemOperand(a0));
331  __ addu(a0, a0, a3);
332 
333  // Now the dst (but not the source) is aligned. Set a2 to count how many
334  // bytes we have to copy after all the 64 byte chunks are copied and a3 to
335  // the dst pointer after all the 64 byte chunks have been copied. We will
336  // loop, incrementing a0 and a1 until a0 equals a3.
337  __ bind(&ua_chk16w);
338  __ andi(t8, a2, 0x3f);
339  __ beq(a2, t8, &ua_chkw);
340  __ subu(a3, a2, t8); // In delay slot.
341  __ addu(a3, a0, a3);
342 
343  if (pref_hint_store == kPrefHintPrepareForStore) {
344  __ addu(t0, a0, a2);
345  __ Subu(t9, t0, pref_limit);
346  }
347 
348  __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
349  __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
350  __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
351 
352  if (pref_hint_store != kPrefHintPrepareForStore) {
353  __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
354  __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
355  __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
356  }
357 
358  __ bind(&ua_loop16w);
359  __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
360  __ lwr(t0, MemOperand(a1));
361  __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
362  __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
363 
364  if (pref_hint_store == kPrefHintPrepareForStore) {
365  __ sltu(v1, t9, a0);
366  __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
367  }
368  __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
369 
370  __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
371  __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
372 
373  __ bind(&ua_skip_pref);
374  __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
375  __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
376  __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
377  __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
378  __ lwl(t0,
379  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
380  __ lwl(t1,
381  MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
382  __ lwl(t2,
383  MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
384  __ lwl(t3,
385  MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
386  __ lwl(t4,
387  MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
388  __ lwl(t5,
389  MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
390  __ lwl(t6,
391  MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
392  __ lwl(t7,
393  MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
394  __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
395  __ sw(t0, MemOperand(a0));
396  __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
397  __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
398  __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
399  __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
400  __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
401  __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
402  __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
403  __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
404  __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
405  __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
406  __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
407  __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
408  __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
409  __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
410  __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
411  __ lwl(t0,
412  MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
413  __ lwl(t1,
414  MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
415  __ lwl(t2,
416  MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
417  __ lwl(t3,
418  MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
419  __ lwl(t4,
420  MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
421  __ lwl(t5,
422  MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
423  __ lwl(t6,
424  MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
425  __ lwl(t7,
426  MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
427  __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
428  __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
429  __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
430  __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
431  __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
432  __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
433  __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
434  __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
435  __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
436  __ addiu(a0, a0, 16 * loadstore_chunk);
437  __ bne(a0, a3, &ua_loop16w);
438  __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
439  __ mov(a2, t8);
440 
441  // Here less than 64-bytes. Check for
442  // a 32 byte chunk and copy if there is one. Otherwise jump down to
443  // ua_chk1w to handle the tail end of the copy.
444  __ bind(&ua_chkw);
445  __ Pref(pref_hint_load, MemOperand(a1));
446  __ andi(t8, a2, 0x1f);
447 
448  __ beq(a2, t8, &ua_chk1w);
449  __ nop(); // In delay slot.
450  __ lwr(t0, MemOperand(a1));
451  __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
452  __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
453  __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
454  __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
455  __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
456  __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
457  __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
458  __ lwl(t0,
459  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
460  __ lwl(t1,
461  MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
462  __ lwl(t2,
463  MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
464  __ lwl(t3,
465  MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
466  __ lwl(t4,
467  MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
468  __ lwl(t5,
469  MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
470  __ lwl(t6,
471  MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
472  __ lwl(t7,
473  MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
474  __ addiu(a1, a1, 8 * loadstore_chunk);
475  __ sw(t0, MemOperand(a0));
476  __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
477  __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
478  __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
479  __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
480  __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
481  __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
482  __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
483  __ addiu(a0, a0, 8 * loadstore_chunk);
484 
485  // Less than 32 bytes to copy. Set up for a loop to
486  // copy one word at a time.
487  __ bind(&ua_chk1w);
488  __ andi(a2, t8, loadstore_chunk - 1);
489  __ beq(a2, t8, &ua_smallCopy);
490  __ subu(a3, t8, a2); // In delay slot.
491  __ addu(a3, a0, a3);
492 
493  __ bind(&ua_wordCopy_loop);
494  __ lwr(v1, MemOperand(a1));
495  __ lwl(v1,
496  MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
497  __ addiu(a0, a0, loadstore_chunk);
498  __ addiu(a1, a1, loadstore_chunk);
499  __ bne(a0, a3, &ua_wordCopy_loop);
500  __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
501 
502  // Copy the last 8 bytes.
503  __ bind(&ua_smallCopy);
504  __ beq(a2, zero_reg, &leave);
505  __ addu(a3, a0, a2); // In delay slot.
506 
507  __ bind(&ua_smallCopy_loop);
508  __ lb(v1, MemOperand(a1));
509  __ addiu(a0, a0, 1);
510  __ addiu(a1, a1, 1);
511  __ bne(a0, a3, &ua_smallCopy_loop);
512  __ sb(v1, MemOperand(a0, -1)); // In delay slot.
513 
514  __ jr(ra);
515  __ nop();
516  }
517  CodeDesc desc;
518  masm.GetCode(&desc);
519  ASSERT(!RelocInfo::RequiresRelocation(desc));
520 
521  CPU::FlushICache(buffer, actual_size);
522  OS::ProtectCode(buffer, actual_size);
523  return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
524 #endif
525 }
526 #endif
527 
529 #if defined(USE_SIMULATOR)
530  return &std::sqrt;
531 #else
532  size_t actual_size;
533  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
534  if (buffer == NULL) return &std::sqrt;
535 
536  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
537 
538  __ MovFromFloatParameter(f12);
539  __ sqrt_d(f0, f12);
540  __ MovToFloatResult(f0);
541  __ Ret();
542 
543  CodeDesc desc;
544  masm.GetCode(&desc);
545  ASSERT(!RelocInfo::RequiresRelocation(desc));
546 
547  CPU::FlushICache(buffer, actual_size);
548  OS::ProtectCode(buffer, actual_size);
549  return FUNCTION_CAST<UnaryMathFunction>(buffer);
550 #endif
551 }
552 
553 #undef __
554 
555 
556 // -------------------------------------------------------------------------
557 // Platform-specific RuntimeCallHelper functions.
558 
559 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
560  masm->EnterFrame(StackFrame::INTERNAL);
561  ASSERT(!masm->has_frame());
562  masm->set_has_frame(true);
563 }
564 
565 
566 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
567  masm->LeaveFrame(StackFrame::INTERNAL);
568  ASSERT(masm->has_frame());
569  masm->set_has_frame(false);
570 }
571 
572 
573 // -------------------------------------------------------------------------
574 // Code generators
575 
576 #define __ ACCESS_MASM(masm)
577 
579  MacroAssembler* masm, AllocationSiteMode mode,
580  Label* allocation_memento_found) {
581  // ----------- S t a t e -------------
582  // -- a0 : value
583  // -- a1 : key
584  // -- a2 : receiver
585  // -- ra : return address
586  // -- a3 : target map, scratch for subsequent call
587  // -- t0 : scratch (elements)
588  // -----------------------------------
589  if (mode == TRACK_ALLOCATION_SITE) {
590  ASSERT(allocation_memento_found != NULL);
591  __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
592  }
593 
594  // Set transitioned map.
596  __ RecordWriteField(a2,
598  a3,
599  t5,
604 }
605 
606 
608  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
609  // ----------- S t a t e -------------
610  // -- a0 : value
611  // -- a1 : key
612  // -- a2 : receiver
613  // -- ra : return address
614  // -- a3 : target map, scratch for subsequent call
615  // -- t0 : scratch (elements)
616  // -----------------------------------
617  Label loop, entry, convert_hole, gc_required, only_change_map, done;
618 
619  Register scratch = t6;
620 
621  if (mode == TRACK_ALLOCATION_SITE) {
622  __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
623  }
624 
625  // Check for empty arrays, which only require a map transition and no changes
626  // to the backing store.
628  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
629  __ Branch(&only_change_map, eq, at, Operand(t0));
630 
631  __ push(ra);
633  // t0: source FixedArray
634  // t1: number of elements (smi-tagged)
635 
636  // Allocate new FixedDoubleArray.
637  __ sll(scratch, t1, 2);
638  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
639  __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
640  // t2: destination FixedDoubleArray, not tagged as heap object
641 
642  // Set destination FixedDoubleArray's length and map.
643  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
645  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
646  // Update receiver's map.
647 
649  __ RecordWriteField(a2,
651  a3,
652  t5,
657  // Replace receiver's backing store with newly created FixedDoubleArray.
658  __ Addu(a3, t2, Operand(kHeapObjectTag));
660  __ RecordWriteField(a2,
662  a3,
663  t5,
668 
669 
670  // Prepare for conversion loop.
671  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
672  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
673  __ sll(t2, t1, 2);
674  __ Addu(t2, t2, t3);
675  __ li(t0, Operand(kHoleNanLower32));
676  __ li(t1, Operand(kHoleNanUpper32));
677  // t0: kHoleNanLower32
678  // t1: kHoleNanUpper32
679  // t2: end of destination FixedDoubleArray, not tagged
680  // t3: begin of FixedDoubleArray element fields, not tagged
681 
682  __ Branch(&entry);
683 
684  __ bind(&only_change_map);
686  __ RecordWriteField(a2,
688  a3,
689  t5,
694  __ Branch(&done);
695 
696  // Call into runtime if GC is required.
697  __ bind(&gc_required);
698  __ pop(ra);
699  __ Branch(fail);
700 
701  // Convert and copy elements.
702  __ bind(&loop);
703  __ lw(t5, MemOperand(a3));
704  __ Addu(a3, a3, kIntSize);
705  // t5: current element
706  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
707 
708  // Normal smi, convert to double and store.
709  __ mtc1(t5, f0);
710  __ cvt_d_w(f0, f0);
711  __ sdc1(f0, MemOperand(t3));
712  __ Addu(t3, t3, kDoubleSize);
713 
714  __ Branch(&entry);
715 
716  // Hole found, store the-hole NaN.
717  __ bind(&convert_hole);
718  if (FLAG_debug_code) {
719  // Restore a "smi-untagged" heap object.
720  __ SmiTag(t5);
721  __ Or(t5, t5, Operand(1));
722  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
723  __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
724  }
725  __ sw(t0, MemOperand(t3)); // mantissa
726  __ sw(t1, MemOperand(t3, kIntSize)); // exponent
727  __ Addu(t3, t3, kDoubleSize);
728 
729  __ bind(&entry);
730  __ Branch(&loop, lt, t3, Operand(t2));
731 
732  __ pop(ra);
733  __ bind(&done);
734 }
735 
736 
738  MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
739  // ----------- S t a t e -------------
740  // -- a0 : value
741  // -- a1 : key
742  // -- a2 : receiver
743  // -- ra : return address
744  // -- a3 : target map, scratch for subsequent call
745  // -- t0 : scratch (elements)
746  // -----------------------------------
747  Label entry, loop, convert_hole, gc_required, only_change_map;
748 
749  if (mode == TRACK_ALLOCATION_SITE) {
750  __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
751  }
752 
753  // Check for empty arrays, which only require a map transition and no changes
754  // to the backing store.
756  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757  __ Branch(&only_change_map, eq, at, Operand(t0));
758 
759  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
760 
762  // t0: source FixedArray
763  // t1: number of elements (smi-tagged)
764 
765  // Allocate new FixedArray.
766  __ sll(a0, t1, 1);
767  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
768  __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
769  // t2: destination FixedArray, not tagged as heap object
770  // Set destination FixedDoubleArray's length and map.
771  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
773  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
774 
775  // Prepare for conversion loop.
776  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
777  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
778  __ Addu(t2, t2, Operand(kHeapObjectTag));
779  __ sll(t1, t1, 1);
780  __ Addu(t1, a3, t1);
781  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
782  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
783  // Using offsetted addresses.
784  // a3: begin of destination FixedArray element fields, not tagged
785  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
786  // t1: end of destination FixedArray, not tagged
787  // t2: destination FixedArray
788  // t3: the-hole pointer
789  // t5: heap number map
790  __ Branch(&entry);
791 
792  // Call into runtime if GC is required.
793  __ bind(&gc_required);
794  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
795 
796  __ Branch(fail);
797 
798  __ bind(&loop);
799  __ lw(a1, MemOperand(t0));
800  __ Addu(t0, t0, kDoubleSize);
801  // a1: current element's upper 32 bit
802  // t0: address of next element's upper 32 bit
803  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
804 
805  // Non-hole double, copy value into a heap number.
806  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
807  // a2: new heap number
808  __ lw(a0, MemOperand(t0, -12));
811  __ mov(a0, a3);
812  __ sw(a2, MemOperand(a3));
813  __ Addu(a3, a3, kIntSize);
814  __ RecordWrite(t2,
815  a0,
816  a2,
821  __ Branch(&entry);
822 
823  // Replace the-hole NaN with the-hole pointer.
824  __ bind(&convert_hole);
825  __ sw(t3, MemOperand(a3));
826  __ Addu(a3, a3, kIntSize);
827 
828  __ bind(&entry);
829  __ Branch(&loop, lt, a3, Operand(t1));
830 
831  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
832  // Replace receiver's backing store with newly created and filled FixedArray.
834  __ RecordWriteField(a2,
836  t2,
837  t5,
842  __ pop(ra);
843 
844  __ bind(&only_change_map);
845  // Update receiver's map.
847  __ RecordWriteField(a2,
849  a3,
850  t5,
855 }
856 
857 
858 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
859  Register string,
860  Register index,
861  Register result,
862  Label* call_runtime) {
863  // Fetch the instance type of the receiver into result register.
864  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
865  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
866 
867  // We need special handling for indirect strings.
868  Label check_sequential;
869  __ And(at, result, Operand(kIsIndirectStringMask));
870  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
871 
872  // Dispatch on the indirect string shape: slice or cons.
873  Label cons_string;
874  __ And(at, result, Operand(kSlicedNotConsMask));
875  __ Branch(&cons_string, eq, at, Operand(zero_reg));
876 
877  // Handle slices.
878  Label indirect_string_loaded;
879  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
880  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
881  __ sra(at, result, kSmiTagSize);
882  __ Addu(index, index, at);
883  __ jmp(&indirect_string_loaded);
884 
885  // Handle cons strings.
886  // Check whether the right hand side is the empty string (i.e. if
887  // this is really a flat string in a cons string). If that is not
888  // the case we would rather go to the runtime system now to flatten
889  // the string.
890  __ bind(&cons_string);
891  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
892  __ LoadRoot(at, Heap::kempty_stringRootIndex);
893  __ Branch(call_runtime, ne, result, Operand(at));
894  // Get the first of the two strings and load its instance type.
895  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
896 
897  __ bind(&indirect_string_loaded);
898  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
899  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
900 
901  // Distinguish sequential and external strings. Only these two string
902  // representations can reach here (slices and flat cons strings have been
903  // reduced to the underlying sequential or external string).
904  Label external_string, check_encoding;
905  __ bind(&check_sequential);
907  __ And(at, result, Operand(kStringRepresentationMask));
908  __ Branch(&external_string, ne, at, Operand(zero_reg));
909 
910  // Prepare sequential strings
912  __ Addu(string,
913  string,
915  __ jmp(&check_encoding);
916 
917  // Handle external strings.
918  __ bind(&external_string);
919  if (FLAG_debug_code) {
920  // Assert that we do not have a cons or slice (indirect strings) here.
921  // Sequential strings have already been ruled out.
922  __ And(at, result, Operand(kIsIndirectStringMask));
923  __ Assert(eq, kExternalStringExpectedButNotFound,
924  at, Operand(zero_reg));
925  }
926  // Rule out short external strings.
928  __ And(at, result, Operand(kShortExternalStringMask));
929  __ Branch(call_runtime, ne, at, Operand(zero_reg));
931 
932  Label ascii, done;
933  __ bind(&check_encoding);
935  __ And(at, result, Operand(kStringEncodingMask));
936  __ Branch(&ascii, ne, at, Operand(zero_reg));
937  // Two-byte string.
938  __ sll(at, index, 1);
939  __ Addu(at, string, at);
940  __ lhu(result, MemOperand(at));
941  __ jmp(&done);
942  __ bind(&ascii);
943  // Ascii string.
944  __ Addu(at, string, index);
945  __ lbu(result, MemOperand(at));
946  __ bind(&done);
947 }
948 
949 
950 static MemOperand ExpConstant(int index, Register base) {
951  return MemOperand(base, index * kDoubleSize);
952 }
953 
954 
955 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
956  DoubleRegister input,
957  DoubleRegister result,
958  DoubleRegister double_scratch1,
959  DoubleRegister double_scratch2,
960  Register temp1,
961  Register temp2,
962  Register temp3) {
963  ASSERT(!input.is(result));
964  ASSERT(!input.is(double_scratch1));
965  ASSERT(!input.is(double_scratch2));
966  ASSERT(!result.is(double_scratch1));
967  ASSERT(!result.is(double_scratch2));
968  ASSERT(!double_scratch1.is(double_scratch2));
969  ASSERT(!temp1.is(temp2));
970  ASSERT(!temp1.is(temp3));
971  ASSERT(!temp2.is(temp3));
972  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
973 
974  Label zero, infinity, done;
975 
976  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
977 
978  __ ldc1(double_scratch1, ExpConstant(0, temp3));
979  __ BranchF(&zero, NULL, ge, double_scratch1, input);
980 
981  __ ldc1(double_scratch2, ExpConstant(1, temp3));
982  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
983 
984  __ ldc1(double_scratch1, ExpConstant(3, temp3));
985  __ ldc1(result, ExpConstant(4, temp3));
986  __ mul_d(double_scratch1, double_scratch1, input);
987  __ add_d(double_scratch1, double_scratch1, result);
988  __ FmoveLow(temp2, double_scratch1);
989  __ sub_d(double_scratch1, double_scratch1, result);
990  __ ldc1(result, ExpConstant(6, temp3));
991  __ ldc1(double_scratch2, ExpConstant(5, temp3));
992  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
993  __ sub_d(double_scratch1, double_scratch1, input);
994  __ sub_d(result, result, double_scratch1);
995  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
996  __ mul_d(result, result, double_scratch2);
997  __ ldc1(double_scratch2, ExpConstant(7, temp3));
998  __ mul_d(result, result, double_scratch2);
999  __ sub_d(result, result, double_scratch1);
1000  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1001  ASSERT(*reinterpret_cast<double*>
1002  (ExternalReference::math_exp_constants(8).address()) == 1);
1003  __ Move(double_scratch2, 1);
1004  __ add_d(result, result, double_scratch2);
1005  __ srl(temp1, temp2, 11);
1006  __ Ext(temp2, temp2, 0, 11);
1007  __ Addu(temp1, temp1, Operand(0x3ff));
1008 
1009  // Must not call ExpConstant() after overwriting temp3!
1010  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1011  __ sll(at, temp2, 3);
1012  __ Addu(temp3, temp3, Operand(at));
1013  __ lw(temp2, MemOperand(temp3, 0));
1014  __ lw(temp3, MemOperand(temp3, kPointerSize));
1015  // The first word is loaded is the lower number register.
1016  if (temp2.code() < temp3.code()) {
1017  __ sll(at, temp1, 20);
1018  __ Or(temp1, temp3, at);
1019  __ Move(double_scratch1, temp2, temp1);
1020  } else {
1021  __ sll(at, temp1, 20);
1022  __ Or(temp1, temp2, at);
1023  __ Move(double_scratch1, temp3, temp1);
1024  }
1025  __ mul_d(result, result, double_scratch1);
1026  __ BranchShort(&done);
1027 
1028  __ bind(&zero);
1029  __ Move(result, kDoubleRegZero);
1030  __ BranchShort(&done);
1031 
1032  __ bind(&infinity);
1033  __ ldc1(result, ExpConstant(2, temp3));
1034 
1035  __ bind(&done);
1036 }
1037 
1038 #ifdef DEBUG
1039 // nop(CODE_AGE_MARKER_NOP)
1040 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1041 #endif
1042 
1043 static byte* GetNoCodeAgeSequence(uint32_t* length) {
1044  // The sequence of instructions that is patched out for aging code is the
1045  // following boilerplate stack-building prologue that is found in FUNCTIONS
1046  static bool initialized = false;
1047  static uint32_t sequence[kNoCodeAgeSequenceLength];
1048  byte* byte_sequence = reinterpret_cast<byte*>(sequence);
1049  *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
1050  if (!initialized) {
1051  // Since patcher is a large object, allocate it dynamically when needed,
1052  // to avoid overloading the stack in stress conditions.
1053  SmartPointer<CodePatcher>
1054  patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
1055  PredictableCodeSizeScope scope(patcher->masm(), *length);
1056  patcher->masm()->Push(ra, fp, cp, a1);
1057  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1058  patcher->masm()->Addu(
1060  initialized = true;
1061  }
1062  return byte_sequence;
1063 }
1064 
1065 
1066 bool Code::IsYoungSequence(byte* sequence) {
1067  uint32_t young_length;
1068  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1069  bool result = !memcmp(sequence, young_sequence, young_length);
1070  ASSERT(result ||
1071  Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
1072  return result;
1073 }
1074 
1075 
1076 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
1077  MarkingParity* parity) {
1078  if (IsYoungSequence(sequence)) {
1079  *age = kNoAgeCodeAge;
1080  *parity = NO_MARKING_PARITY;
1081  } else {
1082  Address target_address = Assembler::target_address_at(
1083  sequence + Assembler::kInstrSize);
1084  Code* stub = GetCodeFromTargetAddress(target_address);
1085  GetCodeAgeAndParity(stub, age, parity);
1086  }
1087 }
1088 
1089 
1090 void Code::PatchPlatformCodeAge(Isolate* isolate,
1091  byte* sequence,
1092  Code::Age age,
1093  MarkingParity parity) {
1094  uint32_t young_length;
1095  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1096  if (age == kNoAgeCodeAge) {
1097  CopyBytes(sequence, young_sequence, young_length);
1098  CPU::FlushICache(sequence, young_length);
1099  } else {
1100  Code* stub = GetCodeAgeStub(isolate, age, parity);
1101  CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
1102  // Mark this code sequence for FindPlatformCodeAgeSequence().
1103  patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1104  // Load the stub address to t9 and call it,
1105  // GetCodeAgeAndParity() extracts the stub address from this instruction.
1106  patcher.masm()->li(
1107  t9,
1108  Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
1109  CONSTANT_SIZE);
1110  patcher.masm()->nop(); // Prevent jalr to jal optimization.
1111  patcher.masm()->jalr(t9, a0);
1112  patcher.masm()->nop(); // Branch delay slot nop.
1113  patcher.masm()->nop(); // Pad the empty space.
1114  }
1115 }
1116 
1117 
1118 #undef __
1119 
1120 } } // namespace v8::internal
1121 
1122 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
const FPURegister f4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const int32_t kPrefHintPrepareForStore
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register cp
const FPURegister f0
const int KB
Definition: globals.h:245
int int32_t
Definition: unicode.cc:47
static bool enabled()
Definition: serialize.h:485
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
#define ASSERT(condition)
Definition: checks.h:329
virtual void AfterCall(MacroAssembler *masm) const
double(* UnaryMathFunction)(double x)
Definition: codegen.h:119
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const uint32_t kShortExternalStringMask
Definition: objects.h:643
UnaryMathFunction CreateExpFunction()
const int kIntSize
Definition: globals.h:263
uint8_t byte
Definition: globals.h:185
static const int kFirstOffset
Definition: objects.h:9165
const Register sp
static const int kParentOffset
Definition: objects.h:9209
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint32_t kHoleNanUpper32
Definition: v8globals.h:454
const int kDoubleSize
Definition: globals.h:266
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
static void ProtectCode(void *address, const size_t size)
const bool IsMipsSoftFloatABI
const int kPointerSize
Definition: globals.h:268
const int kHeapObjectTag
Definition: v8.h:5473
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
#define __
UnaryMathFunction CreateSqrtFunction()
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
static const int kElementsOffset
Definition: objects.h:2756
static const int kOffsetOffset
Definition: objects.h:9210
static const int kHeaderSize
Definition: objects.h:3016
static const int kMapOffset
Definition: objects.h:1890
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:9166
const int32_t kPrefHintLoadStreamed
MemOperand FieldMemOperand(Register object, int offset)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
static void GenerateSmiToDouble(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static bool IsYoungSequence(byte *sequence)
const FPURegister f12
const FPURegister f6
static const int kInstrSize
static uint32_t & uint32_at(Address addr)
Definition: v8memory.h:47
const Register fp
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
virtual void BeforeCall(MacroAssembler *masm) const
static void GenerateDoubleToObject(MacroAssembler *masm, AllocationSiteMode mode, Label *fail)
static const int kExponentOffset
Definition: objects.h:1977
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
static const int kMantissaOffset
Definition: objects.h:1976