v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_ARM)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 #define __ ACCESS_MASM(masm)
41 
42 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43  Label* slow,
44  Condition cond,
45  bool never_nan_nan);
46 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47  Register lhs,
48  Register rhs,
49  Label* lhs_not_nan,
50  Label* slow,
51  bool strict);
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54  Register lhs,
55  Register rhs);
56 
57 
58 // Check if the operand is a heap number.
59 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
60  Register scratch1, Register scratch2,
61  Label* not_a_heap_number) {
62  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
63  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
64  __ cmp(scratch1, scratch2);
65  __ b(ne, not_a_heap_number);
66 }
67 
68 
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in eax.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(r0, &check_heap_number);
73  __ Ret();
74 
75  __ bind(&check_heap_number);
76  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
77  __ Ret();
78 
79  __ bind(&call_builtin);
80  __ push(r0);
81  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
82 }
83 
84 
85 void FastNewClosureStub::Generate(MacroAssembler* masm) {
86  // Create a new closure from the given function info in new
87  // space. Set the context to the current context in cp.
88  Label gc;
89 
90  // Pop the function info from the stack.
91  __ pop(r3);
92 
93  // Attempt to allocate new JSFunction in new space.
94  __ AllocateInNewSpace(JSFunction::kSize,
95  r0,
96  r1,
97  r2,
98  &gc,
99  TAG_OBJECT);
100 
101  int map_index = (language_mode_ == CLASSIC_MODE)
104 
105  // Compute the function map in the current global context and set that
106  // as the map of the allocated object.
109  __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
111 
112  // Initialize the rest of the function. We don't have to update the
113  // write barrier because the allocated object is in new space.
114  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
115  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
116  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
124 
125  // Initialize the code pointer in the function to be the one
126  // found in the shared function info object.
128  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
130 
131  // Return result. The argument function info has been popped already.
132  __ Ret();
133 
134  // Create a new closure through the slower runtime call.
135  __ bind(&gc);
136  __ LoadRoot(r4, Heap::kFalseValueRootIndex);
137  __ Push(cp, r3, r4);
138  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
139 }
140 
141 
142 void FastNewContextStub::Generate(MacroAssembler* masm) {
143  // Try to allocate the context in new space.
144  Label gc;
145  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
146 
147  // Attempt to allocate the context in new space.
148  __ AllocateInNewSpace(FixedArray::SizeFor(length),
149  r0,
150  r1,
151  r2,
152  &gc,
153  TAG_OBJECT);
154 
155  // Load the function from the stack.
156  __ ldr(r3, MemOperand(sp, 0));
157 
158  // Set up the object header.
159  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
160  __ mov(r2, Operand(Smi::FromInt(length)));
163 
164  // Set up the fixed slots, copy the global object from the previous context.
166  __ mov(r1, Operand(Smi::FromInt(0)));
171 
172  // Initialize the rest of the slots to undefined.
173  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
174  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
176  }
177 
178  // Remove the on-stack argument and return.
179  __ mov(cp, r0);
180  __ pop();
181  __ Ret();
182 
183  // Need to collect. Call into runtime system.
184  __ bind(&gc);
185  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
186 }
187 
188 
189 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
190  // Stack layout on entry:
191  //
192  // [sp]: function.
193  // [sp + kPointerSize]: serialized scope info
194 
195  // Try to allocate the context in new space.
196  Label gc;
197  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
198  __ AllocateInNewSpace(FixedArray::SizeFor(length),
199  r0, r1, r2, &gc, TAG_OBJECT);
200 
201  // Load the function from the stack.
202  __ ldr(r3, MemOperand(sp, 0));
203 
204  // Load the serialized scope info from the stack.
205  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
206 
207  // Set up the object header.
208  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
210  __ mov(r2, Operand(Smi::FromInt(length)));
212 
213  // If this block context is nested in the global context we get a smi
214  // sentinel instead of a function. The block context should get the
215  // canonical empty function of the global context as its closure which
216  // we still have to look up.
217  Label after_sentinel;
218  __ JumpIfNotSmi(r3, &after_sentinel);
219  if (FLAG_debug_code) {
220  const char* message = "Expected 0 as a Smi sentinel";
221  __ cmp(r3, Operand::Zero());
222  __ Assert(eq, message);
223  }
224  __ ldr(r3, GlobalObjectOperand());
227  __ bind(&after_sentinel);
228 
229  // Set up the fixed slots, copy the global object from the previous context.
235 
236  // Initialize the rest of the slots to the hole value.
237  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
238  for (int i = 0; i < slots_; i++) {
239  __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
240  }
241 
242  // Remove the on-stack argument and return.
243  __ mov(cp, r0);
244  __ add(sp, sp, Operand(2 * kPointerSize));
245  __ Ret();
246 
247  // Need to collect. Call into runtime system.
248  __ bind(&gc);
249  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
250 }
251 
252 
253 static void GenerateFastCloneShallowArrayCommon(
254  MacroAssembler* masm,
255  int length,
257  Label* fail) {
258  // Registers on entry:
259  //
260  // r3: boilerplate literal array.
262 
263  // All sizes here are multiples of kPointerSize.
264  int elements_size = 0;
265  if (length > 0) {
267  ? FixedDoubleArray::SizeFor(length)
268  : FixedArray::SizeFor(length);
269  }
270  int size = JSArray::kSize + elements_size;
271 
272  // Allocate both the JS array and the elements array in one big
273  // allocation. This avoids multiple limit checks.
274  __ AllocateInNewSpace(size,
275  r0,
276  r1,
277  r2,
278  fail,
279  TAG_OBJECT);
280 
281  // Copy the JS array part.
282  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
283  if ((i != JSArray::kElementsOffset) || (length == 0)) {
284  __ ldr(r1, FieldMemOperand(r3, i));
285  __ str(r1, FieldMemOperand(r0, i));
286  }
287  }
288 
289  if (length > 0) {
290  // Get hold of the elements array of the boilerplate and setup the
291  // elements pointer in the resulting object.
293  __ add(r2, r0, Operand(JSArray::kSize));
295 
296  // Copy the elements array.
297  ASSERT((elements_size % kPointerSize) == 0);
298  __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
299  }
300 }
301 
302 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
303  // Stack layout on entry:
304  //
305  // [sp]: constant elements.
306  // [sp + kPointerSize]: literal index.
307  // [sp + (2 * kPointerSize)]: literals array.
308 
309  // Load boilerplate object into r3 and check if we need to create a
310  // boilerplate.
311  Label slow_case;
312  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
313  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
314  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
316  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
317  __ b(eq, &slow_case);
318 
319  FastCloneShallowArrayStub::Mode mode = mode_;
320  if (mode == CLONE_ANY_ELEMENTS) {
321  Label double_elements, check_fast_elements;
324  __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
325  __ b(ne, &check_fast_elements);
326  GenerateFastCloneShallowArrayCommon(masm, 0,
327  COPY_ON_WRITE_ELEMENTS, &slow_case);
328  // Return and remove the on-stack parameters.
329  __ add(sp, sp, Operand(3 * kPointerSize));
330  __ Ret();
331 
332  __ bind(&check_fast_elements);
333  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
334  __ b(ne, &double_elements);
335  GenerateFastCloneShallowArrayCommon(masm, length_,
336  CLONE_ELEMENTS, &slow_case);
337  // Return and remove the on-stack parameters.
338  __ add(sp, sp, Operand(3 * kPointerSize));
339  __ Ret();
340 
341  __ bind(&double_elements);
342  mode = CLONE_DOUBLE_ELEMENTS;
343  // Fall through to generate the code to handle double elements.
344  }
345 
346  if (FLAG_debug_code) {
347  const char* message;
348  Heap::RootListIndex expected_map_index;
349  if (mode == CLONE_ELEMENTS) {
350  message = "Expected (writable) fixed array";
351  expected_map_index = Heap::kFixedArrayMapRootIndex;
352  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
353  message = "Expected (writable) fixed double array";
354  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
355  } else {
357  message = "Expected copy-on-write fixed array";
358  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
359  }
360  __ push(r3);
363  __ CompareRoot(r3, expected_map_index);
364  __ Assert(eq, message);
365  __ pop(r3);
366  }
367 
368  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
369 
370  // Return and remove the on-stack parameters.
371  __ add(sp, sp, Operand(3 * kPointerSize));
372  __ Ret();
373 
374  __ bind(&slow_case);
375  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
376 }
377 
378 
379 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
380  // Stack layout on entry:
381  //
382  // [sp]: object literal flags.
383  // [sp + kPointerSize]: constant properties.
384  // [sp + (2 * kPointerSize)]: literal index.
385  // [sp + (3 * kPointerSize)]: literals array.
386 
387  // Load boilerplate object into r3 and check if we need to create a
388  // boilerplate.
389  Label slow_case;
390  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
391  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
392  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
394  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
395  __ b(eq, &slow_case);
396 
397  // Check that the boilerplate contains only fast properties and we can
398  // statically determine the instance size.
399  int size = JSObject::kHeaderSize + length_ * kPointerSize;
402  __ cmp(r0, Operand(size >> kPointerSizeLog2));
403  __ b(ne, &slow_case);
404 
405  // Allocate the JS object and copy header together with all in-object
406  // properties from the boilerplate.
407  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
408  for (int i = 0; i < size; i += kPointerSize) {
409  __ ldr(r1, FieldMemOperand(r3, i));
410  __ str(r1, FieldMemOperand(r0, i));
411  }
412 
413  // Return and remove the on-stack parameters.
414  __ add(sp, sp, Operand(4 * kPointerSize));
415  __ Ret();
416 
417  __ bind(&slow_case);
418  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
419 }
420 
421 
422 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
423 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
424 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
425 // scratch register. Destroys the source register. No GC occurs during this
426 // stub so you don't have to set up the frame.
427 class ConvertToDoubleStub : public CodeStub {
428  public:
429  ConvertToDoubleStub(Register result_reg_1,
430  Register result_reg_2,
431  Register source_reg,
432  Register scratch_reg)
433  : result1_(result_reg_1),
434  result2_(result_reg_2),
435  source_(source_reg),
436  zeros_(scratch_reg) { }
437 
438  private:
439  Register result1_;
440  Register result2_;
441  Register source_;
442  Register zeros_;
443 
444  // Minor key encoding in 16 bits.
445  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
446  class OpBits: public BitField<Token::Value, 2, 14> {};
447 
448  Major MajorKey() { return ConvertToDouble; }
449  int MinorKey() {
450  // Encode the parameters in a unique 16 bit value.
451  return result1_.code() +
452  (result2_.code() << 4) +
453  (source_.code() << 8) +
454  (zeros_.code() << 12);
455  }
456 
457  void Generate(MacroAssembler* masm);
458 };
459 
460 
461 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
462  Register exponent = result1_;
463  Register mantissa = result2_;
464 
465  Label not_special;
466  // Convert from Smi to integer.
467  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
468  // Move sign bit from source to destination. This works because the sign bit
469  // in the exponent word of the double has the same position and polarity as
470  // the 2's complement sign bit in a Smi.
471  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
472  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
473  // Subtract from 0 if source was negative.
474  __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
475 
476  // We have -1, 0 or 1, which we treat specially. Register source_ contains
477  // absolute value: it is either equal to 1 (special case of -1 and 1),
478  // greater than 1 (not a special case) or less than 1 (special case of 0).
479  __ cmp(source_, Operand(1));
480  __ b(gt, &not_special);
481 
482  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
483  const uint32_t exponent_word_for_1 =
485  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
486  // 1, 0 and -1 all have 0 for the second word.
487  __ mov(mantissa, Operand(0, RelocInfo::NONE));
488  __ Ret();
489 
490  __ bind(&not_special);
491  // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
492  // Gets the wrong answer for 0, but we already checked for that case above.
493  __ CountLeadingZeros(zeros_, source_, mantissa);
494  // Compute exponent and or it into the exponent register.
495  // We use mantissa as a scratch register here. Use a fudge factor to
496  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
497  // that fit in the ARM's constant field.
498  int fudge = 0x400;
499  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
500  __ add(mantissa, mantissa, Operand(fudge));
501  __ orr(exponent,
502  exponent,
503  Operand(mantissa, LSL, HeapNumber::kExponentShift));
504  // Shift up the source chopping the top bit off.
505  __ add(zeros_, zeros_, Operand(1));
506  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
507  __ mov(source_, Operand(source_, LSL, zeros_));
508  // Compute lower part of fraction (last 12 bits).
509  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
510  // And the top (top 20 bits).
511  __ orr(exponent,
512  exponent,
513  Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
514  __ Ret();
515 }
516 
517 
518 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
520  Register scratch1,
521  Register scratch2) {
523  CpuFeatures::Scope scope(VFP3);
524  __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
525  __ vmov(d7.high(), scratch1);
526  __ vcvt_f64_s32(d7, d7.high());
527  __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
528  __ vmov(d6.high(), scratch1);
529  __ vcvt_f64_s32(d6, d6.high());
530  if (destination == kCoreRegisters) {
531  __ vmov(r2, r3, d7);
532  __ vmov(r0, r1, d6);
533  }
534  } else {
535  ASSERT(destination == kCoreRegisters);
536  // Write Smi from r0 to r3 and r2 in double format.
537  __ mov(scratch1, Operand(r0));
538  ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
539  __ push(lr);
540  __ Call(stub1.GetCode());
541  // Write Smi from r1 to r1 and r0 in double format.
542  __ mov(scratch1, Operand(r1));
543  ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
544  __ Call(stub2.GetCode());
545  __ pop(lr);
546  }
547 }
548 
549 
551  MacroAssembler* masm,
553  Register heap_number_map,
554  Register scratch1,
555  Register scratch2,
556  Label* slow) {
557 
558  // Load right operand (r0) to d6 or r2/r3.
559  LoadNumber(masm, destination,
560  r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
561 
562  // Load left operand (r1) to d7 or r0/r1.
563  LoadNumber(masm, destination,
564  r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
565 }
566 
567 
568 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569  Destination destination,
570  Register object,
571  DwVfpRegister dst,
572  Register dst1,
573  Register dst2,
574  Register heap_number_map,
575  Register scratch1,
576  Register scratch2,
577  Label* not_number) {
578  if (FLAG_debug_code) {
579  __ AbortIfNotRootValue(heap_number_map,
580  Heap::kHeapNumberMapRootIndex,
581  "HeapNumberMap register clobbered.");
582  }
583 
584  Label is_smi, done;
585 
586  // Smi-check
587  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
588  // Heap number check
589  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
590 
591  // Handle loading a double from a heap number.
593  destination == kVFPRegisters) {
594  CpuFeatures::Scope scope(VFP3);
595  // Load the double from tagged HeapNumber to double register.
596  __ sub(scratch1, object, Operand(kHeapObjectTag));
597  __ vldr(dst, scratch1, HeapNumber::kValueOffset);
598  } else {
599  ASSERT(destination == kCoreRegisters);
600  // Load the double from heap number to dst1 and dst2 in double format.
601  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
602  }
603  __ jmp(&done);
604 
605  // Handle loading a double from a smi.
606  __ bind(&is_smi);
608  CpuFeatures::Scope scope(VFP3);
609  // Convert smi to double using VFP instructions.
610  __ vmov(dst.high(), scratch1);
611  __ vcvt_f64_s32(dst, dst.high());
612  if (destination == kCoreRegisters) {
613  // Load the converted smi to dst1 and dst2 in double format.
614  __ vmov(dst1, dst2, dst);
615  }
616  } else {
617  ASSERT(destination == kCoreRegisters);
618  // Write smi to dst1 and dst2 double format.
619  __ mov(scratch1, Operand(object));
620  ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
621  __ push(lr);
622  __ Call(stub.GetCode());
623  __ pop(lr);
624  }
625 
626  __ bind(&done);
627 }
628 
629 
630 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
631  Register object,
632  Register dst,
633  Register heap_number_map,
634  Register scratch1,
635  Register scratch2,
636  Register scratch3,
637  DwVfpRegister double_scratch,
638  Label* not_number) {
639  if (FLAG_debug_code) {
640  __ AbortIfNotRootValue(heap_number_map,
641  Heap::kHeapNumberMapRootIndex,
642  "HeapNumberMap register clobbered.");
643  }
644  Label done;
645  Label not_in_int32_range;
646 
647  __ UntagAndJumpIfSmi(dst, object, &done);
648  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
649  __ cmp(scratch1, heap_number_map);
650  __ b(ne, not_number);
651  __ ConvertToInt32(object,
652  dst,
653  scratch1,
654  scratch2,
655  double_scratch,
656  &not_in_int32_range);
657  __ jmp(&done);
658 
659  __ bind(&not_in_int32_range);
660  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
661  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
662 
663  __ EmitOutOfInt32RangeTruncate(dst,
664  scratch1,
665  scratch2,
666  scratch3);
667  __ bind(&done);
668 }
669 
670 
671 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
672  Register int_scratch,
673  Destination destination,
674  DwVfpRegister double_dst,
675  Register dst1,
676  Register dst2,
677  Register scratch2,
678  SwVfpRegister single_scratch) {
679  ASSERT(!int_scratch.is(scratch2));
680  ASSERT(!int_scratch.is(dst1));
681  ASSERT(!int_scratch.is(dst2));
682 
683  Label done;
684 
686  CpuFeatures::Scope scope(VFP3);
687  __ vmov(single_scratch, int_scratch);
688  __ vcvt_f64_s32(double_dst, single_scratch);
689  if (destination == kCoreRegisters) {
690  __ vmov(dst1, dst2, double_dst);
691  }
692  } else {
693  Label fewer_than_20_useful_bits;
694  // Expected output:
695  // | dst2 | dst1 |
696  // | s | exp | mantissa |
697 
698  // Check for zero.
699  __ cmp(int_scratch, Operand::Zero());
700  __ mov(dst2, int_scratch);
701  __ mov(dst1, int_scratch);
702  __ b(eq, &done);
703 
704  // Preload the sign of the value.
705  __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
706  // Get the absolute value of the object (as an unsigned integer).
707  __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
708 
709  // Get mantissa[51:20].
710 
711  // Get the position of the first set bit.
712  __ CountLeadingZeros(dst1, int_scratch, scratch2);
713  __ rsb(dst1, dst1, Operand(31));
714 
715  // Set the exponent.
716  __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
717  __ Bfi(dst2, scratch2, scratch2,
718  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
719 
720  // Clear the first non null bit.
721  __ mov(scratch2, Operand(1));
722  __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
723 
724  __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
725  // Get the number of bits to set in the lower part of the mantissa.
726  __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
727  __ b(mi, &fewer_than_20_useful_bits);
728  // Set the higher 20 bits of the mantissa.
729  __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
730  __ rsb(scratch2, scratch2, Operand(32));
731  __ mov(dst1, Operand(int_scratch, LSL, scratch2));
732  __ b(&done);
733 
734  __ bind(&fewer_than_20_useful_bits);
735  __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
736  __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
737  __ orr(dst2, dst2, scratch2);
738  // Set dst1 to 0.
739  __ mov(dst1, Operand::Zero());
740  }
741  __ bind(&done);
742 }
743 
744 
745 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
746  Register object,
747  Destination destination,
748  DwVfpRegister double_dst,
749  Register dst1,
750  Register dst2,
751  Register heap_number_map,
752  Register scratch1,
753  Register scratch2,
754  SwVfpRegister single_scratch,
755  Label* not_int32) {
756  ASSERT(!scratch1.is(object) && !scratch2.is(object));
757  ASSERT(!scratch1.is(scratch2));
758  ASSERT(!heap_number_map.is(object) &&
759  !heap_number_map.is(scratch1) &&
760  !heap_number_map.is(scratch2));
761 
762  Label done, obj_is_not_smi;
763 
764  __ JumpIfNotSmi(object, &obj_is_not_smi);
765  __ SmiUntag(scratch1, object);
766  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
767  scratch2, single_scratch);
768  __ b(&done);
769 
770  __ bind(&obj_is_not_smi);
771  if (FLAG_debug_code) {
772  __ AbortIfNotRootValue(heap_number_map,
773  Heap::kHeapNumberMapRootIndex,
774  "HeapNumberMap register clobbered.");
775  }
776  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
777 
778  // Load the number.
780  CpuFeatures::Scope scope(VFP3);
781  // Load the double value.
782  __ sub(scratch1, object, Operand(kHeapObjectTag));
783  __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
784 
785  __ EmitVFPTruncate(kRoundToZero,
786  single_scratch,
787  double_dst,
788  scratch1,
789  scratch2,
791 
792  // Jump to not_int32 if the operation did not succeed.
793  __ b(ne, not_int32);
794 
795  if (destination == kCoreRegisters) {
796  __ vmov(dst1, dst2, double_dst);
797  }
798 
799  } else {
800  ASSERT(!scratch1.is(object) && !scratch2.is(object));
801  // Load the double value in the destination registers..
802  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
803 
804  // Check for 0 and -0.
805  __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
806  __ orr(scratch1, scratch1, Operand(dst2));
807  __ cmp(scratch1, Operand::Zero());
808  __ b(eq, &done);
809 
810  // Check that the value can be exactly represented by a 32-bit integer.
811  // Jump to not_int32 if that's not the case.
812  DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
813 
814  // dst1 and dst2 were trashed. Reload the double value.
815  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
816  }
817 
818  __ bind(&done);
819 }
820 
821 
822 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
823  Register object,
824  Register dst,
825  Register heap_number_map,
826  Register scratch1,
827  Register scratch2,
828  Register scratch3,
829  DwVfpRegister double_scratch,
830  Label* not_int32) {
831  ASSERT(!dst.is(object));
832  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
833  ASSERT(!scratch1.is(scratch2) &&
834  !scratch1.is(scratch3) &&
835  !scratch2.is(scratch3));
836 
837  Label done;
838 
839  __ UntagAndJumpIfSmi(dst, object, &done);
840 
841  if (FLAG_debug_code) {
842  __ AbortIfNotRootValue(heap_number_map,
843  Heap::kHeapNumberMapRootIndex,
844  "HeapNumberMap register clobbered.");
845  }
846  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
847 
848  // Object is a heap number.
849  // Convert the floating point value to a 32-bit integer.
851  CpuFeatures::Scope scope(VFP3);
852  SwVfpRegister single_scratch = double_scratch.low();
853  // Load the double value.
854  __ sub(scratch1, object, Operand(kHeapObjectTag));
855  __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
856 
857  __ EmitVFPTruncate(kRoundToZero,
858  single_scratch,
859  double_scratch,
860  scratch1,
861  scratch2,
863 
864  // Jump to not_int32 if the operation did not succeed.
865  __ b(ne, not_int32);
866  // Get the result in the destination register.
867  __ vmov(dst, single_scratch);
868 
869  } else {
870  // Load the double value in the destination registers.
871  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
872  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
873 
874  // Check for 0 and -0.
875  __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
876  __ orr(dst, scratch2, Operand(dst));
877  __ cmp(dst, Operand::Zero());
878  __ b(eq, &done);
879 
880  DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
881 
882  // Registers state after DoubleIs32BitInteger.
883  // dst: mantissa[51:20].
884  // scratch2: 1
885 
886  // Shift back the higher bits of the mantissa.
887  __ mov(dst, Operand(dst, LSR, scratch3));
888  // Set the implicit first bit.
889  __ rsb(scratch3, scratch3, Operand(32));
890  __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
891  // Set the sign.
892  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
893  __ tst(scratch1, Operand(HeapNumber::kSignMask));
894  __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
895  }
896 
897  __ bind(&done);
898 }
899 
900 
901 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
902  Register src1,
903  Register src2,
904  Register dst,
905  Register scratch,
906  Label* not_int32) {
907  // Get exponent alone in scratch.
908  __ Ubfx(scratch,
909  src1,
910  HeapNumber::kExponentShift,
912 
913  // Substract the bias from the exponent.
914  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
915 
916  // src1: higher (exponent) part of the double value.
917  // src2: lower (mantissa) part of the double value.
918  // scratch: unbiased exponent.
919 
920  // Fast cases. Check for obvious non 32-bit integer values.
921  // Negative exponent cannot yield 32-bit integers.
922  __ b(mi, not_int32);
923  // Exponent greater than 31 cannot yield 32-bit integers.
924  // Also, a positive value with an exponent equal to 31 is outside of the
925  // signed 32-bit integer range.
926  // Another way to put it is that if (exponent - signbit) > 30 then the
927  // number cannot be represented as an int32.
928  Register tmp = dst;
929  __ sub(tmp, scratch, Operand(src1, LSR, 31));
930  __ cmp(tmp, Operand(30));
931  __ b(gt, not_int32);
932  // - Bits [21:0] in the mantissa are not null.
933  __ tst(src2, Operand(0x3fffff));
934  __ b(ne, not_int32);
935 
936  // Otherwise the exponent needs to be big enough to shift left all the
937  // non zero bits left. So we need the (30 - exponent) last bits of the
938  // 31 higher bits of the mantissa to be null.
939  // Because bits [21:0] are null, we can check instead that the
940  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
941 
942  // Get the 32 higher bits of the mantissa in dst.
943  __ Ubfx(dst,
944  src2,
947  __ orr(dst,
948  dst,
950 
951  // Create the mask and test the lower bits (of the higher bits).
952  __ rsb(scratch, scratch, Operand(32));
953  __ mov(src2, Operand(1));
954  __ mov(src1, Operand(src2, LSL, scratch));
955  __ sub(src1, src1, Operand(1));
956  __ tst(dst, src1);
957  __ b(ne, not_int32);
958 }
959 
960 
962  MacroAssembler* masm,
963  Token::Value op,
964  Register heap_number_result,
965  Register scratch) {
966  // Using core registers:
967  // r0: Left value (least significant part of mantissa).
968  // r1: Left value (sign, exponent, top of mantissa).
969  // r2: Right value (least significant part of mantissa).
970  // r3: Right value (sign, exponent, top of mantissa).
971 
972  // Assert that heap_number_result is callee-saved.
973  // We currently always use r5 to pass it.
974  ASSERT(heap_number_result.is(r5));
975 
976  // Push the current return address before the C call. Return will be
977  // through pop(pc) below.
978  __ push(lr);
979  __ PrepareCallCFunction(0, 2, scratch);
980  if (masm->use_eabi_hardfloat()) {
981  CpuFeatures::Scope scope(VFP3);
982  __ vmov(d0, r0, r1);
983  __ vmov(d1, r2, r3);
984  }
985  {
986  AllowExternalCallThatCantCauseGC scope(masm);
987  __ CallCFunction(
988  ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
989  }
990  // Store answer in the overwritable heap number. Double returned in
991  // registers r0 and r1 or in d0.
992  if (masm->use_eabi_hardfloat()) {
993  CpuFeatures::Scope scope(VFP3);
994  __ vstr(d0,
995  FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
996  } else {
997  __ Strd(r0, r1, FieldMemOperand(heap_number_result,
999  }
1000  // Place heap_number_result in r0 and return to the pushed return address.
1001  __ mov(r0, Operand(heap_number_result));
1002  __ pop(pc);
1003 }
1004 
1005 
1007  // These variants are compiled ahead of time. See next method.
1008  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
1009  return true;
1010  }
1011  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
1012  return true;
1013  }
1014  // Other register combinations are generated as and when they are needed,
1015  // so it is unsafe to call them from stubs (we can't generate a stub while
1016  // we are generating a stub).
1017  return false;
1018 }
1019 
1020 
1024  stub1.GetCode()->set_is_pregenerated(true);
1025  stub2.GetCode()->set_is_pregenerated(true);
1026 }
1027 
1028 
1029 // See comment for class.
1030 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1031  Label max_negative_int;
1032  // the_int_ has the answer which is a signed int32 but not a Smi.
1033  // We test for the special value that has a different exponent. This test
1034  // has the neat side effect of setting the flags according to the sign.
1035  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1036  __ cmp(the_int_, Operand(0x80000000u));
1037  __ b(eq, &max_negative_int);
1038  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1039  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1040  uint32_t non_smi_exponent =
1041  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1042  __ mov(scratch_, Operand(non_smi_exponent));
1043  // Set the sign bit in scratch_ if the value was negative.
1044  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1045  // Subtract from 0 if the value was negative.
1046  __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
1047  // We should be masking the implict first digit of the mantissa away here,
1048  // but it just ends up combining harmlessly with the last digit of the
1049  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1050  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1051  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1052  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1053  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1054  __ str(scratch_, FieldMemOperand(the_heap_number_,
1056  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1057  __ str(scratch_, FieldMemOperand(the_heap_number_,
1059  __ Ret();
1060 
1061  __ bind(&max_negative_int);
1062  // The max negative int32 is stored as a positive number in the mantissa of
1063  // a double because it uses a sign bit instead of using two's complement.
1064  // The actual mantissa bits stored are all 0 because the implicit most
1065  // significant 1 bit is not stored.
1066  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1067  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1068  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1069  __ mov(ip, Operand(0, RelocInfo::NONE));
1070  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1071  __ Ret();
1072 }
1073 
1074 
1075 // Handle the case where the lhs and rhs are the same object.
1076 // Equality is almost reflexive (everything but NaN), so this is a test
1077 // for "identity and not NaN".
1078 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1079  Label* slow,
1080  Condition cond,
1081  bool never_nan_nan) {
1082  Label not_identical;
1083  Label heap_number, return_equal;
1084  __ cmp(r0, r1);
1085  __ b(ne, &not_identical);
1086 
1087  // The two objects are identical. If we know that one of them isn't NaN then
1088  // we now know they test equal.
1089  if (cond != eq || !never_nan_nan) {
1090  // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1091  // so we do the second best thing - test it ourselves.
1092  // They are both equal and they are not both Smis so both of them are not
1093  // Smis. If it's not a heap number, then return equal.
1094  if (cond == lt || cond == gt) {
1095  __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1096  __ b(ge, slow);
1097  } else {
1098  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1099  __ b(eq, &heap_number);
1100  // Comparing JS objects with <=, >= is complicated.
1101  if (cond != eq) {
1102  __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1103  __ b(ge, slow);
1104  // Normally here we fall through to return_equal, but undefined is
1105  // special: (undefined == undefined) == true, but
1106  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1107  if (cond == le || cond == ge) {
1108  __ cmp(r4, Operand(ODDBALL_TYPE));
1109  __ b(ne, &return_equal);
1110  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1111  __ cmp(r0, r2);
1112  __ b(ne, &return_equal);
1113  if (cond == le) {
1114  // undefined <= undefined should fail.
1115  __ mov(r0, Operand(GREATER));
1116  } else {
1117  // undefined >= undefined should fail.
1118  __ mov(r0, Operand(LESS));
1119  }
1120  __ Ret();
1121  }
1122  }
1123  }
1124  }
1125 
1126  __ bind(&return_equal);
1127  if (cond == lt) {
1128  __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
1129  } else if (cond == gt) {
1130  __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
1131  } else {
1132  __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
1133  }
1134  __ Ret();
1135 
1136  if (cond != eq || !never_nan_nan) {
1137  // For less and greater we don't have to check for NaN since the result of
1138  // x < x is false regardless. For the others here is some code to check
1139  // for NaN.
1140  if (cond != lt && cond != gt) {
1141  __ bind(&heap_number);
1142  // It is a heap number, so return non-equal if it's NaN and equal if it's
1143  // not NaN.
1144 
1145  // The representation of NaN values has all exponent bits (52..62) set,
1146  // and not all mantissa bits (0..51) clear.
1147  // Read top bits of double representation (second word of value).
1149  // Test that exponent bits are all set.
1150  __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1151  // NaNs have all-one exponents so they sign extend to -1.
1152  __ cmp(r3, Operand(-1));
1153  __ b(ne, &return_equal);
1154 
1155  // Shift out flag and all exponent bits, retaining only mantissa.
1157  // Or with all low-bits of mantissa.
1159  __ orr(r0, r3, Operand(r2), SetCC);
1160  // For equal we already have the right value in r0: Return zero (equal)
1161  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1162  // not (it's a NaN). For <= and >= we need to load r0 with the failing
1163  // value if it's a NaN.
1164  if (cond != eq) {
1165  // All-zero means Infinity means equal.
1166  __ Ret(eq);
1167  if (cond == le) {
1168  __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
1169  } else {
1170  __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
1171  }
1172  }
1173  __ Ret();
1174  }
1175  // No fall through here.
1176  }
1177 
1178  __ bind(&not_identical);
1179 }
1180 
1181 
1182 // See comment at call site.
1183 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1184  Register lhs,
1185  Register rhs,
1186  Label* lhs_not_nan,
1187  Label* slow,
1188  bool strict) {
1189  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1190  (lhs.is(r1) && rhs.is(r0)));
1191 
1192  Label rhs_is_smi;
1193  __ JumpIfSmi(rhs, &rhs_is_smi);
1194 
1195  // Lhs is a Smi. Check whether the rhs is a heap number.
1196  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
1197  if (strict) {
1198  // If rhs is not a number and lhs is a Smi then strict equality cannot
1199  // succeed. Return non-equal
1200  // If rhs is r0 then there is already a non zero value in it.
1201  if (!rhs.is(r0)) {
1202  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1203  }
1204  __ Ret(ne);
1205  } else {
1206  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1207  // the runtime.
1208  __ b(ne, slow);
1209  }
1210 
1211  // Lhs is a smi, rhs is a number.
1213  // Convert lhs to a double in d7.
1214  CpuFeatures::Scope scope(VFP3);
1215  __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1216  // Load the double from rhs, tagged HeapNumber r0, to d6.
1217  __ sub(r7, rhs, Operand(kHeapObjectTag));
1218  __ vldr(d6, r7, HeapNumber::kValueOffset);
1219  } else {
1220  __ push(lr);
1221  // Convert lhs to a double in r2, r3.
1222  __ mov(r7, Operand(lhs));
1223  ConvertToDoubleStub stub1(r3, r2, r7, r6);
1224  __ Call(stub1.GetCode());
1225  // Load rhs to a double in r0, r1.
1227  __ pop(lr);
1228  }
1229 
1230  // We now have both loaded as doubles but we can skip the lhs nan check
1231  // since it's a smi.
1232  __ jmp(lhs_not_nan);
1233 
1234  __ bind(&rhs_is_smi);
1235  // Rhs is a smi. Check whether the non-smi lhs is a heap number.
1236  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
1237  if (strict) {
1238  // If lhs is not a number and rhs is a smi then strict equality cannot
1239  // succeed. Return non-equal.
1240  // If lhs is r0 then there is already a non zero value in it.
1241  if (!lhs.is(r0)) {
1242  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1243  }
1244  __ Ret(ne);
1245  } else {
1246  // Smi compared non-strictly with a non-smi non-heap-number. Call
1247  // the runtime.
1248  __ b(ne, slow);
1249  }
1250 
1251  // Rhs is a smi, lhs is a heap number.
1253  CpuFeatures::Scope scope(VFP3);
1254  // Load the double from lhs, tagged HeapNumber r1, to d7.
1255  __ sub(r7, lhs, Operand(kHeapObjectTag));
1256  __ vldr(d7, r7, HeapNumber::kValueOffset);
1257  // Convert rhs to a double in d6 .
1258  __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1259  } else {
1260  __ push(lr);
1261  // Load lhs to a double in r2, r3.
1263  // Convert rhs to a double in r0, r1.
1264  __ mov(r7, Operand(rhs));
1265  ConvertToDoubleStub stub2(r1, r0, r7, r6);
1266  __ Call(stub2.GetCode());
1267  __ pop(lr);
1268  }
1269  // Fall through to both_loaded_as_doubles.
1270 }
1271 
1272 
1273 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1275  Register rhs_exponent = exp_first ? r0 : r1;
1276  Register lhs_exponent = exp_first ? r2 : r3;
1277  Register rhs_mantissa = exp_first ? r1 : r0;
1278  Register lhs_mantissa = exp_first ? r3 : r2;
1279  Label one_is_nan, neither_is_nan;
1280 
1281  __ Sbfx(r4,
1282  lhs_exponent,
1283  HeapNumber::kExponentShift,
1285  // NaNs have all-one exponents so they sign extend to -1.
1286  __ cmp(r4, Operand(-1));
1287  __ b(ne, lhs_not_nan);
1288  __ mov(r4,
1289  Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1290  SetCC);
1291  __ b(ne, &one_is_nan);
1292  __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1293  __ b(ne, &one_is_nan);
1294 
1295  __ bind(lhs_not_nan);
1296  __ Sbfx(r4,
1297  rhs_exponent,
1298  HeapNumber::kExponentShift,
1300  // NaNs have all-one exponents so they sign extend to -1.
1301  __ cmp(r4, Operand(-1));
1302  __ b(ne, &neither_is_nan);
1303  __ mov(r4,
1304  Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1305  SetCC);
1306  __ b(ne, &one_is_nan);
1307  __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1308  __ b(eq, &neither_is_nan);
1309 
1310  __ bind(&one_is_nan);
1311  // NaN comparisons always fail.
1312  // Load whatever we need in r0 to make the comparison fail.
1313  if (cond == lt || cond == le) {
1314  __ mov(r0, Operand(GREATER));
1315  } else {
1316  __ mov(r0, Operand(LESS));
1317  }
1318  __ Ret();
1319 
1320  __ bind(&neither_is_nan);
1321 }
1322 
1323 
1324 // See comment at call site.
1325 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1326  Condition cond) {
1328  Register rhs_exponent = exp_first ? r0 : r1;
1329  Register lhs_exponent = exp_first ? r2 : r3;
1330  Register rhs_mantissa = exp_first ? r1 : r0;
1331  Register lhs_mantissa = exp_first ? r3 : r2;
1332 
1333  // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
1334  if (cond == eq) {
1335  // Doubles are not equal unless they have the same bit pattern.
1336  // Exception: 0 and -0.
1337  __ cmp(rhs_mantissa, Operand(lhs_mantissa));
1338  __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
1339  // Return non-zero if the numbers are unequal.
1340  __ Ret(ne);
1341 
1342  __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
1343  // If exponents are equal then return 0.
1344  __ Ret(eq);
1345 
1346  // Exponents are unequal. The only way we can return that the numbers
1347  // are equal is if one is -0 and the other is 0. We already dealt
1348  // with the case where both are -0 or both are 0.
1349  // We start by seeing if the mantissas (that are equal) or the bottom
1350  // 31 bits of the rhs exponent are non-zero. If so we return not
1351  // equal.
1352  __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
1353  __ mov(r0, Operand(r4), LeaveCC, ne);
1354  __ Ret(ne);
1355  // Now they are equal if and only if the lhs exponent is zero in its
1356  // low 31 bits.
1357  __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1358  __ Ret();
1359  } else {
1360  // Call a native function to do a comparison between two non-NaNs.
1361  // Call C routine that may not cause GC or other trouble.
1362  __ push(lr);
1363  __ PrepareCallCFunction(0, 2, r5);
1364  if (masm->use_eabi_hardfloat()) {
1365  CpuFeatures::Scope scope(VFP3);
1366  __ vmov(d0, r0, r1);
1367  __ vmov(d1, r2, r3);
1368  }
1369 
1370  AllowExternalCallThatCantCauseGC scope(masm);
1371  __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1372  0, 2);
1373  __ pop(pc); // Return.
1374  }
1375 }
1376 
1377 
1378 // See comment at call site.
1379 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1380  Register lhs,
1381  Register rhs) {
1382  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1383  (lhs.is(r1) && rhs.is(r0)));
1384 
1385  // If either operand is a JS object or an oddball value, then they are
1386  // not equal since their pointers are different.
1387  // There is no test for undetectability in strict equality.
1389  Label first_non_object;
1390  // Get the type of the first operand into r2 and compare it with
1391  // FIRST_SPEC_OBJECT_TYPE.
1392  __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
1393  __ b(lt, &first_non_object);
1394 
1395  // Return non-zero (r0 is not zero)
1396  Label return_not_equal;
1397  __ bind(&return_not_equal);
1398  __ Ret();
1399 
1400  __ bind(&first_non_object);
1401  // Check for oddballs: true, false, null, undefined.
1402  __ cmp(r2, Operand(ODDBALL_TYPE));
1403  __ b(eq, &return_not_equal);
1404 
1405  __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
1406  __ b(ge, &return_not_equal);
1407 
1408  // Check for oddballs: true, false, null, undefined.
1409  __ cmp(r3, Operand(ODDBALL_TYPE));
1410  __ b(eq, &return_not_equal);
1411 
1412  // Now that we have the types we might as well check for symbol-symbol.
1413  // Ensure that no non-strings have the symbol bit set.
1415  STATIC_ASSERT(kSymbolTag != 0);
1416  __ and_(r2, r2, Operand(r3));
1417  __ tst(r2, Operand(kIsSymbolMask));
1418  __ b(ne, &return_not_equal);
1419 }
1420 
1421 
1422 // See comment at call site.
1423 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1424  Register lhs,
1425  Register rhs,
1426  Label* both_loaded_as_doubles,
1427  Label* not_heap_numbers,
1428  Label* slow) {
1429  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1430  (lhs.is(r1) && rhs.is(r0)));
1431 
1432  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1433  __ b(ne, not_heap_numbers);
1435  __ cmp(r2, r3);
1436  __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
1437 
1438  // Both are heap numbers. Load them up then jump to the code we have
1439  // for that.
1441  CpuFeatures::Scope scope(VFP3);
1442  __ sub(r7, rhs, Operand(kHeapObjectTag));
1443  __ vldr(d6, r7, HeapNumber::kValueOffset);
1444  __ sub(r7, lhs, Operand(kHeapObjectTag));
1445  __ vldr(d7, r7, HeapNumber::kValueOffset);
1446  } else {
1449  }
1450  __ jmp(both_loaded_as_doubles);
1451 }
1452 
1453 
1454 // Fast negative check for symbol-to-symbol equality.
1455 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1456  Register lhs,
1457  Register rhs,
1458  Label* possible_strings,
1459  Label* not_both_strings) {
1460  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1461  (lhs.is(r1) && rhs.is(r0)));
1462 
1463  // r2 is object type of rhs.
1464  // Ensure that no non-strings have the symbol bit set.
1465  Label object_test;
1466  STATIC_ASSERT(kSymbolTag != 0);
1467  __ tst(r2, Operand(kIsNotStringMask));
1468  __ b(ne, &object_test);
1469  __ tst(r2, Operand(kIsSymbolMask));
1470  __ b(eq, possible_strings);
1471  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1472  __ b(ge, not_both_strings);
1473  __ tst(r3, Operand(kIsSymbolMask));
1474  __ b(eq, possible_strings);
1475 
1476  // Both are symbols. We already checked they weren't the same pointer
1477  // so they are not equal.
1478  __ mov(r0, Operand(NOT_EQUAL));
1479  __ Ret();
1480 
1481  __ bind(&object_test);
1482  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1483  __ b(lt, not_both_strings);
1484  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1485  __ b(lt, not_both_strings);
1486  // If both objects are undetectable, they are equal. Otherwise, they
1487  // are not equal, since they are different objects and an object is not
1488  // equal to undefined.
1492  __ and_(r0, r2, Operand(r3));
1493  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1494  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1495  __ Ret();
1496 }
1497 
1498 
1499 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1500  Register object,
1501  Register result,
1502  Register scratch1,
1503  Register scratch2,
1504  Register scratch3,
1505  bool object_is_smi,
1506  Label* not_found) {
1507  // Use of registers. Register result is used as a temporary.
1508  Register number_string_cache = result;
1509  Register mask = scratch3;
1510 
1511  // Load the number string cache.
1512  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1513 
1514  // Make the hash mask from the length of the number string cache. It
1515  // contains two elements (number and string) for each cache entry.
1516  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1517  // Divide length by two (length is a smi).
1518  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
1519  __ sub(mask, mask, Operand(1)); // Make mask.
1520 
1521  // Calculate the entry in the number string cache. The hash value in the
1522  // number string cache for smis is just the smi value, and the hash for
1523  // doubles is the xor of the upper and lower words. See
1524  // Heap::GetNumberStringCache.
1525  Isolate* isolate = masm->isolate();
1526  Label is_smi;
1527  Label load_result_from_cache;
1528  if (!object_is_smi) {
1529  __ JumpIfSmi(object, &is_smi);
1531  CpuFeatures::Scope scope(VFP3);
1532  __ CheckMap(object,
1533  scratch1,
1534  Heap::kHeapNumberMapRootIndex,
1535  not_found,
1537 
1538  STATIC_ASSERT(8 == kDoubleSize);
1539  __ add(scratch1,
1540  object,
1542  __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
1543  __ eor(scratch1, scratch1, Operand(scratch2));
1544  __ and_(scratch1, scratch1, Operand(mask));
1545 
1546  // Calculate address of entry in string cache: each entry consists
1547  // of two pointer sized fields.
1548  __ add(scratch1,
1549  number_string_cache,
1550  Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1551 
1552  Register probe = mask;
1553  __ ldr(probe,
1555  __ JumpIfSmi(probe, not_found);
1556  __ sub(scratch2, object, Operand(kHeapObjectTag));
1557  __ vldr(d0, scratch2, HeapNumber::kValueOffset);
1558  __ sub(probe, probe, Operand(kHeapObjectTag));
1559  __ vldr(d1, probe, HeapNumber::kValueOffset);
1560  __ VFPCompareAndSetFlags(d0, d1);
1561  __ b(ne, not_found); // The cache did not contain this value.
1562  __ b(&load_result_from_cache);
1563  } else {
1564  __ b(not_found);
1565  }
1566  }
1567 
1568  __ bind(&is_smi);
1569  Register scratch = scratch1;
1570  __ and_(scratch, mask, Operand(object, ASR, 1));
1571  // Calculate address of entry in string cache: each entry consists
1572  // of two pointer sized fields.
1573  __ add(scratch,
1574  number_string_cache,
1575  Operand(scratch, LSL, kPointerSizeLog2 + 1));
1576 
1577  // Check if the entry is the smi we are looking for.
1578  Register probe = mask;
1579  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1580  __ cmp(object, probe);
1581  __ b(ne, not_found);
1582 
1583  // Get the result from the cache.
1584  __ bind(&load_result_from_cache);
1585  __ ldr(result,
1586  FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1587  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1588  1,
1589  scratch1,
1590  scratch2);
1591 }
1592 
1593 
1594 void NumberToStringStub::Generate(MacroAssembler* masm) {
1595  Label runtime;
1596 
1597  __ ldr(r1, MemOperand(sp, 0));
1598 
1599  // Generate code to lookup number in the number string cache.
1600  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
1601  __ add(sp, sp, Operand(1 * kPointerSize));
1602  __ Ret();
1603 
1604  __ bind(&runtime);
1605  // Handle number to string in the runtime system if not found in the cache.
1606  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
1607 }
1608 
1609 
1610 // On entry lhs_ and rhs_ are the values to be compared.
1611 // On exit r0 is 0, positive or negative to indicate the result of
1612 // the comparison.
1613 void CompareStub::Generate(MacroAssembler* masm) {
1614  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1615  (lhs_.is(r1) && rhs_.is(r0)));
1616 
1617  Label slow; // Call builtin.
1618  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1619 
1620  if (include_smi_compare_) {
1621  Label not_two_smis, smi_done;
1622  __ orr(r2, r1, r0);
1623  __ JumpIfNotSmi(r2, &not_two_smis);
1624  __ mov(r1, Operand(r1, ASR, 1));
1625  __ sub(r0, r1, Operand(r0, ASR, 1));
1626  __ Ret();
1627  __ bind(&not_two_smis);
1628  } else if (FLAG_debug_code) {
1629  __ orr(r2, r1, r0);
1630  __ tst(r2, Operand(kSmiTagMask));
1631  __ Assert(ne, "CompareStub: unexpected smi operands.");
1632  }
1633 
1634  // NOTICE! This code is only reached after a smi-fast-case check, so
1635  // it is certain that at least one operand isn't a smi.
1636 
1637  // Handle the case where the objects are identical. Either returns the answer
1638  // or goes to slow. Only falls through if the objects were not identical.
1639  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1640 
1641  // If either is a Smi (we know that not both are), then they can only
1642  // be strictly equal if the other is a HeapNumber.
1643  STATIC_ASSERT(kSmiTag == 0);
1644  ASSERT_EQ(0, Smi::FromInt(0));
1645  __ and_(r2, lhs_, Operand(rhs_));
1646  __ JumpIfNotSmi(r2, &not_smis);
1647  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1648  // 1) Return the answer.
1649  // 2) Go to slow.
1650  // 3) Fall through to both_loaded_as_doubles.
1651  // 4) Jump to lhs_not_nan.
1652  // In cases 3 and 4 we have found out we were dealing with a number-number
1653  // comparison. If VFP3 is supported the double values of the numbers have
1654  // been loaded into d7 and d6. Otherwise, the double values have been loaded
1655  // into r0, r1, r2, and r3.
1656  EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1657 
1658  __ bind(&both_loaded_as_doubles);
1659  // The arguments have been converted to doubles and stored in d6 and d7, if
1660  // VFP3 is supported, or in r0, r1, r2, and r3.
1661  Isolate* isolate = masm->isolate();
1663  __ bind(&lhs_not_nan);
1664  CpuFeatures::Scope scope(VFP3);
1665  Label no_nan;
1666  // ARMv7 VFP3 instructions to implement double precision comparison.
1667  __ VFPCompareAndSetFlags(d7, d6);
1668  Label nan;
1669  __ b(vs, &nan);
1670  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1671  __ mov(r0, Operand(LESS), LeaveCC, lt);
1672  __ mov(r0, Operand(GREATER), LeaveCC, gt);
1673  __ Ret();
1674 
1675  __ bind(&nan);
1676  // If one of the sides was a NaN then the v flag is set. Load r0 with
1677  // whatever it takes to make the comparison fail, since comparisons with NaN
1678  // always fail.
1679  if (cc_ == lt || cc_ == le) {
1680  __ mov(r0, Operand(GREATER));
1681  } else {
1682  __ mov(r0, Operand(LESS));
1683  }
1684  __ Ret();
1685  } else {
1686  // Checks for NaN in the doubles we have loaded. Can return the answer or
1687  // fall through if neither is a NaN. Also binds lhs_not_nan.
1688  EmitNanCheck(masm, &lhs_not_nan, cc_);
1689  // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
1690  // answer. Never falls through.
1691  EmitTwoNonNanDoubleComparison(masm, cc_);
1692  }
1693 
1694  __ bind(&not_smis);
1695  // At this point we know we are dealing with two different objects,
1696  // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1697  if (strict_) {
1698  // This returns non-equal for some object types, or falls through if it
1699  // was not lucky.
1700  EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1701  }
1702 
1703  Label check_for_symbols;
1704  Label flat_string_check;
1705  // Check for heap-number-heap-number comparison. Can jump to slow case,
1706  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1707  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1708  // In this case r2 will contain the type of rhs_. Never falls through.
1709  EmitCheckForTwoHeapNumbers(masm,
1710  lhs_,
1711  rhs_,
1712  &both_loaded_as_doubles,
1713  &check_for_symbols,
1714  &flat_string_check);
1715 
1716  __ bind(&check_for_symbols);
1717  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1718  // symbols.
1719  if (cc_ == eq && !strict_) {
1720  // Returns an answer for two symbols or two detectable objects.
1721  // Otherwise jumps to string case or not both strings case.
1722  // Assumes that r2 is the type of rhs_ on entry.
1723  EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1724  }
1725 
1726  // Check for both being sequential ASCII strings, and inline if that is the
1727  // case.
1728  __ bind(&flat_string_check);
1729 
1730  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1731 
1732  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1733  if (cc_ == eq) {
1735  lhs_,
1736  rhs_,
1737  r2,
1738  r3,
1739  r4);
1740  } else {
1742  lhs_,
1743  rhs_,
1744  r2,
1745  r3,
1746  r4,
1747  r5);
1748  }
1749  // Never falls through to here.
1750 
1751  __ bind(&slow);
1752 
1753  __ Push(lhs_, rhs_);
1754  // Figure out which native to call and setup the arguments.
1755  Builtins::JavaScript native;
1756  if (cc_ == eq) {
1757  native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1758  } else {
1759  native = Builtins::COMPARE;
1760  int ncr; // NaN compare result
1761  if (cc_ == lt || cc_ == le) {
1762  ncr = GREATER;
1763  } else {
1764  ASSERT(cc_ == gt || cc_ == ge); // remaining cases
1765  ncr = LESS;
1766  }
1767  __ mov(r0, Operand(Smi::FromInt(ncr)));
1768  __ push(r0);
1769  }
1770 
1771  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1772  // tagged as a small integer.
1773  __ InvokeBuiltin(native, JUMP_FUNCTION);
1774 }
1775 
1776 
1777 // The stub expects its argument in the tos_ register and returns its result in
1778 // it, too: zero for false, and a non-zero value for true.
1779 void ToBooleanStub::Generate(MacroAssembler* masm) {
1780  // This stub overrides SometimesSetsUpAFrame() to return false. That means
1781  // we cannot call anything that could cause a GC from this stub.
1782  // This stub uses VFP3 instructions.
1783  CpuFeatures::Scope scope(VFP3);
1784 
1785  Label patch;
1786  const Register map = r9.is(tos_) ? r7 : r9;
1787 
1788  // undefined -> false.
1789  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1790 
1791  // Boolean -> its value.
1792  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1793  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1794 
1795  // 'null' -> false.
1796  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1797 
1798  if (types_.Contains(SMI)) {
1799  // Smis: 0 -> false, all other -> true
1800  __ tst(tos_, Operand(kSmiTagMask));
1801  // tos_ contains the correct return value already
1802  __ Ret(eq);
1803  } else if (types_.NeedsMap()) {
1804  // If we need a map later and have a Smi -> patch.
1805  __ JumpIfSmi(tos_, &patch);
1806  }
1807 
1808  if (types_.NeedsMap()) {
1809  __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1810 
1811  if (types_.CanBeUndetectable()) {
1813  __ tst(ip, Operand(1 << Map::kIsUndetectable));
1814  // Undetectable -> false.
1815  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1816  __ Ret(ne);
1817  }
1818  }
1819 
1820  if (types_.Contains(SPEC_OBJECT)) {
1821  // Spec object -> true.
1822  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1823  // tos_ contains the correct non-zero return value already.
1824  __ Ret(ge);
1825  }
1826 
1827  if (types_.Contains(STRING)) {
1828  // String value -> false iff empty.
1829  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1830  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
1831  __ Ret(lt); // the string length is OK as the return value
1832  }
1833 
1834  if (types_.Contains(HEAP_NUMBER)) {
1835  // Heap number -> false iff +0, -0, or NaN.
1836  Label not_heap_number;
1837  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1838  __ b(ne, &not_heap_number);
1840  __ VFPCompareAndSetFlags(d1, 0.0);
1841  // "tos_" is a register, and contains a non zero value by default.
1842  // Hence we only need to overwrite "tos_" with zero to return false for
1843  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1844  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
1845  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
1846  __ Ret();
1847  __ bind(&not_heap_number);
1848  }
1849 
1850  __ bind(&patch);
1851  GenerateTypeTransition(masm);
1852 }
1853 
1854 
1855 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1856  Type type,
1857  Heap::RootListIndex value,
1858  bool result) {
1859  if (types_.Contains(type)) {
1860  // If we see an expected oddball, return its ToBoolean value tos_.
1861  __ LoadRoot(ip, value);
1862  __ cmp(tos_, ip);
1863  // The value of a root is never NULL, so we can avoid loading a non-null
1864  // value into tos_ when we want to return 'true'.
1865  if (!result) {
1866  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1867  }
1868  __ Ret(eq);
1869  }
1870 }
1871 
1872 
1873 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1874  if (!tos_.is(r3)) {
1875  __ mov(r3, Operand(tos_));
1876  }
1877  __ mov(r2, Operand(Smi::FromInt(tos_.code())));
1878  __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
1879  __ Push(r3, r2, r1);
1880  // Patch the caller to an appropriate specialized stub and return the
1881  // operation result to the caller of the stub.
1882  __ TailCallExternalReference(
1883  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1884  3,
1885  1);
1886 }
1887 
1888 
1889 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1890  // We don't allow a GC during a store buffer overflow so there is no need to
1891  // store the registers in any particular way, but we do have to store and
1892  // restore them.
1893  __ stm(db_w, sp, kCallerSaved | lr.bit());
1894  if (save_doubles_ == kSaveFPRegs) {
1895  CpuFeatures::Scope scope(VFP3);
1896  __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1897  for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1898  DwVfpRegister reg = DwVfpRegister::from_code(i);
1899  __ vstr(reg, MemOperand(sp, i * kDoubleSize));
1900  }
1901  }
1902  const int argument_count = 1;
1903  const int fp_argument_count = 0;
1904  const Register scratch = r1;
1905 
1906  AllowExternalCallThatCantCauseGC scope(masm);
1907  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1908  __ mov(r0, Operand(ExternalReference::isolate_address()));
1909  __ CallCFunction(
1910  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1911  argument_count);
1912  if (save_doubles_ == kSaveFPRegs) {
1913  CpuFeatures::Scope scope(VFP3);
1914  for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1915  DwVfpRegister reg = DwVfpRegister::from_code(i);
1916  __ vldr(reg, MemOperand(sp, i * kDoubleSize));
1917  }
1918  __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1919  }
1920  __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1921 }
1922 
1923 
1924 void UnaryOpStub::PrintName(StringStream* stream) {
1925  const char* op_name = Token::Name(op_);
1926  const char* overwrite_name = NULL; // Make g++ happy.
1927  switch (mode_) {
1928  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1929  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1930  }
1931  stream->Add("UnaryOpStub_%s_%s_%s",
1932  op_name,
1933  overwrite_name,
1934  UnaryOpIC::GetName(operand_type_));
1935 }
1936 
1937 
1938 // TODO(svenpanne): Use virtual functions instead of switch.
1939 void UnaryOpStub::Generate(MacroAssembler* masm) {
1940  switch (operand_type_) {
1942  GenerateTypeTransition(masm);
1943  break;
1944  case UnaryOpIC::SMI:
1945  GenerateSmiStub(masm);
1946  break;
1948  GenerateHeapNumberStub(masm);
1949  break;
1950  case UnaryOpIC::GENERIC:
1951  GenerateGenericStub(masm);
1952  break;
1953  }
1954 }
1955 
1956 
1957 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1958  __ mov(r3, Operand(r0)); // the operand
1959  __ mov(r2, Operand(Smi::FromInt(op_)));
1960  __ mov(r1, Operand(Smi::FromInt(mode_)));
1961  __ mov(r0, Operand(Smi::FromInt(operand_type_)));
1962  __ Push(r3, r2, r1, r0);
1963 
1964  __ TailCallExternalReference(
1965  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1966 }
1967 
1968 
1969 // TODO(svenpanne): Use virtual functions instead of switch.
1970 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1971  switch (op_) {
1972  case Token::SUB:
1973  GenerateSmiStubSub(masm);
1974  break;
1975  case Token::BIT_NOT:
1976  GenerateSmiStubBitNot(masm);
1977  break;
1978  default:
1979  UNREACHABLE();
1980  }
1981 }
1982 
1983 
1984 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1985  Label non_smi, slow;
1986  GenerateSmiCodeSub(masm, &non_smi, &slow);
1987  __ bind(&non_smi);
1988  __ bind(&slow);
1989  GenerateTypeTransition(masm);
1990 }
1991 
1992 
1993 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1994  Label non_smi;
1995  GenerateSmiCodeBitNot(masm, &non_smi);
1996  __ bind(&non_smi);
1997  GenerateTypeTransition(masm);
1998 }
1999 
2000 
2001 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2002  Label* non_smi,
2003  Label* slow) {
2004  __ JumpIfNotSmi(r0, non_smi);
2005 
2006  // The result of negating zero or the smallest negative smi is not a smi.
2007  __ bic(ip, r0, Operand(0x80000000), SetCC);
2008  __ b(eq, slow);
2009 
2010  // Return '0 - value'.
2011  __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
2012  __ Ret();
2013 }
2014 
2015 
2016 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2017  Label* non_smi) {
2018  __ JumpIfNotSmi(r0, non_smi);
2019 
2020  // Flip bits and revert inverted smi-tag.
2021  __ mvn(r0, Operand(r0));
2022  __ bic(r0, r0, Operand(kSmiTagMask));
2023  __ Ret();
2024 }
2025 
2026 
2027 // TODO(svenpanne): Use virtual functions instead of switch.
2028 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2029  switch (op_) {
2030  case Token::SUB:
2031  GenerateHeapNumberStubSub(masm);
2032  break;
2033  case Token::BIT_NOT:
2034  GenerateHeapNumberStubBitNot(masm);
2035  break;
2036  default:
2037  UNREACHABLE();
2038  }
2039 }
2040 
2041 
2042 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2043  Label non_smi, slow, call_builtin;
2044  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2045  __ bind(&non_smi);
2046  GenerateHeapNumberCodeSub(masm, &slow);
2047  __ bind(&slow);
2048  GenerateTypeTransition(masm);
2049  __ bind(&call_builtin);
2050  GenerateGenericCodeFallback(masm);
2051 }
2052 
2053 
2054 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2055  Label non_smi, slow;
2056  GenerateSmiCodeBitNot(masm, &non_smi);
2057  __ bind(&non_smi);
2058  GenerateHeapNumberCodeBitNot(masm, &slow);
2059  __ bind(&slow);
2060  GenerateTypeTransition(masm);
2061 }
2062 
2063 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2064  Label* slow) {
2065  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2066  // r0 is a heap number. Get a new heap number in r1.
2067  if (mode_ == UNARY_OVERWRITE) {
2069  __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2071  } else {
2072  Label slow_allocate_heapnumber, heapnumber_allocated;
2073  __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
2074  __ jmp(&heapnumber_allocated);
2075 
2076  __ bind(&slow_allocate_heapnumber);
2077  {
2078  FrameScope scope(masm, StackFrame::INTERNAL);
2079  __ push(r0);
2080  __ CallRuntime(Runtime::kNumberAlloc, 0);
2081  __ mov(r1, Operand(r0));
2082  __ pop(r0);
2083  }
2084 
2085  __ bind(&heapnumber_allocated);
2089  __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2091  __ mov(r0, Operand(r1));
2092  }
2093  __ Ret();
2094 }
2095 
2096 
2097 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2098  MacroAssembler* masm, Label* slow) {
2099  Label impossible;
2100 
2101  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2102  // Convert the heap number is r0 to an untagged integer in r1.
2103  __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
2104 
2105  // Do the bitwise operation and check if the result fits in a smi.
2106  Label try_float;
2107  __ mvn(r1, Operand(r1));
2108  __ add(r2, r1, Operand(0x40000000), SetCC);
2109  __ b(mi, &try_float);
2110 
2111  // Tag the result as a smi and we're done.
2112  __ mov(r0, Operand(r1, LSL, kSmiTagSize));
2113  __ Ret();
2114 
2115  // Try to store the result in a heap number.
2116  __ bind(&try_float);
2117  if (mode_ == UNARY_NO_OVERWRITE) {
2118  Label slow_allocate_heapnumber, heapnumber_allocated;
2119  // Allocate a new heap number without zapping r0, which we need if it fails.
2120  __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
2121  __ jmp(&heapnumber_allocated);
2122 
2123  __ bind(&slow_allocate_heapnumber);
2124  {
2125  FrameScope scope(masm, StackFrame::INTERNAL);
2126  __ push(r0); // Push the heap number, not the untagged int32.
2127  __ CallRuntime(Runtime::kNumberAlloc, 0);
2128  __ mov(r2, r0); // Move the new heap number into r2.
2129  // Get the heap number into r0, now that the new heap number is in r2.
2130  __ pop(r0);
2131  }
2132 
2133  // Convert the heap number in r0 to an untagged integer in r1.
2134  // This can't go slow-case because it's the same number we already
2135  // converted once again.
2136  __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
2137  __ mvn(r1, Operand(r1));
2138 
2139  __ bind(&heapnumber_allocated);
2140  __ mov(r0, r2); // Move newly allocated heap number to r0.
2141  }
2142 
2144  // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2145  CpuFeatures::Scope scope(VFP3);
2146  __ vmov(s0, r1);
2147  __ vcvt_f64_s32(d0, s0);
2148  __ sub(r2, r0, Operand(kHeapObjectTag));
2149  __ vstr(d0, r2, HeapNumber::kValueOffset);
2150  __ Ret();
2151  } else {
2152  // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2153  // have to set up a frame.
2154  WriteInt32ToHeapNumberStub stub(r1, r0, r2);
2155  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2156  }
2157 
2158  __ bind(&impossible);
2159  if (FLAG_debug_code) {
2160  __ stop("Incorrect assumption in bit-not stub");
2161  }
2162 }
2163 
2164 
2165 // TODO(svenpanne): Use virtual functions instead of switch.
2166 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2167  switch (op_) {
2168  case Token::SUB:
2169  GenerateGenericStubSub(masm);
2170  break;
2171  case Token::BIT_NOT:
2172  GenerateGenericStubBitNot(masm);
2173  break;
2174  default:
2175  UNREACHABLE();
2176  }
2177 }
2178 
2179 
2180 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2181  Label non_smi, slow;
2182  GenerateSmiCodeSub(masm, &non_smi, &slow);
2183  __ bind(&non_smi);
2184  GenerateHeapNumberCodeSub(masm, &slow);
2185  __ bind(&slow);
2186  GenerateGenericCodeFallback(masm);
2187 }
2188 
2189 
2190 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2191  Label non_smi, slow;
2192  GenerateSmiCodeBitNot(masm, &non_smi);
2193  __ bind(&non_smi);
2194  GenerateHeapNumberCodeBitNot(masm, &slow);
2195  __ bind(&slow);
2196  GenerateGenericCodeFallback(masm);
2197 }
2198 
2199 
2200 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
2201  // Handle the slow case by jumping to the JavaScript builtin.
2202  __ push(r0);
2203  switch (op_) {
2204  case Token::SUB:
2205  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2206  break;
2207  case Token::BIT_NOT:
2208  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2209  break;
2210  default:
2211  UNREACHABLE();
2212  }
2213 }
2214 
2215 
2216 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2217  Label get_result;
2218 
2219  __ Push(r1, r0);
2220 
2221  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2222  __ mov(r1, Operand(Smi::FromInt(op_)));
2223  __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2224  __ Push(r2, r1, r0);
2225 
2226  __ TailCallExternalReference(
2227  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2228  masm->isolate()),
2229  5,
2230  1);
2231 }
2232 
2233 
2234 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2235  MacroAssembler* masm) {
2236  UNIMPLEMENTED();
2237 }
2238 
2239 
2240 void BinaryOpStub::Generate(MacroAssembler* masm) {
2241  // Explicitly allow generation of nested stubs. It is safe here because
2242  // generation code does not use any raw pointers.
2243  AllowStubCallsScope allow_stub_calls(masm, true);
2244 
2245  switch (operands_type_) {
2247  GenerateTypeTransition(masm);
2248  break;
2249  case BinaryOpIC::SMI:
2250  GenerateSmiStub(masm);
2251  break;
2252  case BinaryOpIC::INT32:
2253  GenerateInt32Stub(masm);
2254  break;
2256  GenerateHeapNumberStub(masm);
2257  break;
2258  case BinaryOpIC::ODDBALL:
2259  GenerateOddballStub(masm);
2260  break;
2262  GenerateBothStringStub(masm);
2263  break;
2264  case BinaryOpIC::STRING:
2265  GenerateStringStub(masm);
2266  break;
2267  case BinaryOpIC::GENERIC:
2268  GenerateGeneric(masm);
2269  break;
2270  default:
2271  UNREACHABLE();
2272  }
2273 }
2274 
2275 
2276 void BinaryOpStub::PrintName(StringStream* stream) {
2277  const char* op_name = Token::Name(op_);
2278  const char* overwrite_name;
2279  switch (mode_) {
2280  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2281  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2282  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2283  default: overwrite_name = "UnknownOverwrite"; break;
2284  }
2285  stream->Add("BinaryOpStub_%s_%s_%s",
2286  op_name,
2287  overwrite_name,
2288  BinaryOpIC::GetName(operands_type_));
2289 }
2290 
2291 
2292 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2293  Register left = r1;
2294  Register right = r0;
2295  Register scratch1 = r7;
2296  Register scratch2 = r9;
2297 
2298  ASSERT(right.is(r0));
2299  STATIC_ASSERT(kSmiTag == 0);
2300 
2301  Label not_smi_result;
2302  switch (op_) {
2303  case Token::ADD:
2304  __ add(right, left, Operand(right), SetCC); // Add optimistically.
2305  __ Ret(vc);
2306  __ sub(right, right, Operand(left)); // Revert optimistic add.
2307  break;
2308  case Token::SUB:
2309  __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2310  __ Ret(vc);
2311  __ sub(right, left, Operand(right)); // Revert optimistic subtract.
2312  break;
2313  case Token::MUL:
2314  // Remove tag from one of the operands. This way the multiplication result
2315  // will be a smi if it fits the smi range.
2316  __ SmiUntag(ip, right);
2317  // Do multiplication
2318  // scratch1 = lower 32 bits of ip * left.
2319  // scratch2 = higher 32 bits of ip * left.
2320  __ smull(scratch1, scratch2, left, ip);
2321  // Check for overflowing the smi range - no overflow if higher 33 bits of
2322  // the result are identical.
2323  __ mov(ip, Operand(scratch1, ASR, 31));
2324  __ cmp(ip, Operand(scratch2));
2325  __ b(ne, &not_smi_result);
2326  // Go slow on zero result to handle -0.
2327  __ cmp(scratch1, Operand(0));
2328  __ mov(right, Operand(scratch1), LeaveCC, ne);
2329  __ Ret(ne);
2330  // We need -0 if we were multiplying a negative number with 0 to get 0.
2331  // We know one of them was zero.
2332  __ add(scratch2, right, Operand(left), SetCC);
2333  __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2334  __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2335  // We fall through here if we multiplied a negative number with 0, because
2336  // that would mean we should produce -0.
2337  break;
2338  case Token::DIV:
2339  // Check for power of two on the right hand side.
2340  __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2341  // Check for positive and no remainder (scratch1 contains right - 1).
2342  __ orr(scratch2, scratch1, Operand(0x80000000u));
2343  __ tst(left, scratch2);
2344  __ b(ne, &not_smi_result);
2345 
2346  // Perform division by shifting.
2347  __ CountLeadingZeros(scratch1, scratch1, scratch2);
2348  __ rsb(scratch1, scratch1, Operand(31));
2349  __ mov(right, Operand(left, LSR, scratch1));
2350  __ Ret();
2351  break;
2352  case Token::MOD:
2353  // Check for two positive smis.
2354  __ orr(scratch1, left, Operand(right));
2355  __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2356  __ b(ne, &not_smi_result);
2357 
2358  // Check for power of two on the right hand side.
2359  __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2360 
2361  // Perform modulus by masking.
2362  __ and_(right, left, Operand(scratch1));
2363  __ Ret();
2364  break;
2365  case Token::BIT_OR:
2366  __ orr(right, left, Operand(right));
2367  __ Ret();
2368  break;
2369  case Token::BIT_AND:
2370  __ and_(right, left, Operand(right));
2371  __ Ret();
2372  break;
2373  case Token::BIT_XOR:
2374  __ eor(right, left, Operand(right));
2375  __ Ret();
2376  break;
2377  case Token::SAR:
2378  // Remove tags from right operand.
2379  __ GetLeastBitsFromSmi(scratch1, right, 5);
2380  __ mov(right, Operand(left, ASR, scratch1));
2381  // Smi tag result.
2382  __ bic(right, right, Operand(kSmiTagMask));
2383  __ Ret();
2384  break;
2385  case Token::SHR:
2386  // Remove tags from operands. We can't do this on a 31 bit number
2387  // because then the 0s get shifted into bit 30 instead of bit 31.
2388  __ SmiUntag(scratch1, left);
2389  __ GetLeastBitsFromSmi(scratch2, right, 5);
2390  __ mov(scratch1, Operand(scratch1, LSR, scratch2));
2391  // Unsigned shift is not allowed to produce a negative number, so
2392  // check the sign bit and the sign bit after Smi tagging.
2393  __ tst(scratch1, Operand(0xc0000000));
2394  __ b(ne, &not_smi_result);
2395  // Smi tag result.
2396  __ SmiTag(right, scratch1);
2397  __ Ret();
2398  break;
2399  case Token::SHL:
2400  // Remove tags from operands.
2401  __ SmiUntag(scratch1, left);
2402  __ GetLeastBitsFromSmi(scratch2, right, 5);
2403  __ mov(scratch1, Operand(scratch1, LSL, scratch2));
2404  // Check that the signed result fits in a Smi.
2405  __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2406  __ b(mi, &not_smi_result);
2407  __ SmiTag(right, scratch1);
2408  __ Ret();
2409  break;
2410  default:
2411  UNREACHABLE();
2412  }
2413  __ bind(&not_smi_result);
2414 }
2415 
2416 
2417 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2418  bool smi_operands,
2419  Label* not_numbers,
2420  Label* gc_required) {
2421  Register left = r1;
2422  Register right = r0;
2423  Register scratch1 = r7;
2424  Register scratch2 = r9;
2425  Register scratch3 = r4;
2426 
2427  ASSERT(smi_operands || (not_numbers != NULL));
2428  if (smi_operands && FLAG_debug_code) {
2429  __ AbortIfNotSmi(left);
2430  __ AbortIfNotSmi(right);
2431  }
2432 
2433  Register heap_number_map = r6;
2434  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2435 
2436  switch (op_) {
2437  case Token::ADD:
2438  case Token::SUB:
2439  case Token::MUL:
2440  case Token::DIV:
2441  case Token::MOD: {
2442  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2443  // depending on whether VFP3 is available or not.
2444  FloatingPointHelper::Destination destination =
2446  op_ != Token::MOD ?
2449 
2450  // Allocate new heap number for result.
2451  Register result = r5;
2452  GenerateHeapResultAllocation(
2453  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2454 
2455  // Load the operands.
2456  if (smi_operands) {
2457  FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2458  } else {
2460  destination,
2461  heap_number_map,
2462  scratch1,
2463  scratch2,
2464  not_numbers);
2465  }
2466 
2467  // Calculate the result.
2468  if (destination == FloatingPointHelper::kVFPRegisters) {
2469  // Using VFP registers:
2470  // d6: Left value
2471  // d7: Right value
2472  CpuFeatures::Scope scope(VFP3);
2473  switch (op_) {
2474  case Token::ADD:
2475  __ vadd(d5, d6, d7);
2476  break;
2477  case Token::SUB:
2478  __ vsub(d5, d6, d7);
2479  break;
2480  case Token::MUL:
2481  __ vmul(d5, d6, d7);
2482  break;
2483  case Token::DIV:
2484  __ vdiv(d5, d6, d7);
2485  break;
2486  default:
2487  UNREACHABLE();
2488  }
2489 
2490  __ sub(r0, result, Operand(kHeapObjectTag));
2491  __ vstr(d5, r0, HeapNumber::kValueOffset);
2492  __ add(r0, r0, Operand(kHeapObjectTag));
2493  __ Ret();
2494  } else {
2495  // Call the C function to handle the double operation.
2497  op_,
2498  result,
2499  scratch1);
2500  if (FLAG_debug_code) {
2501  __ stop("Unreachable code.");
2502  }
2503  }
2504  break;
2505  }
2506  case Token::BIT_OR:
2507  case Token::BIT_XOR:
2508  case Token::BIT_AND:
2509  case Token::SAR:
2510  case Token::SHR:
2511  case Token::SHL: {
2512  if (smi_operands) {
2513  __ SmiUntag(r3, left);
2514  __ SmiUntag(r2, right);
2515  } else {
2516  // Convert operands to 32-bit integers. Right in r2 and left in r3.
2518  left,
2519  r3,
2520  heap_number_map,
2521  scratch1,
2522  scratch2,
2523  scratch3,
2524  d0,
2525  not_numbers);
2527  right,
2528  r2,
2529  heap_number_map,
2530  scratch1,
2531  scratch2,
2532  scratch3,
2533  d0,
2534  not_numbers);
2535  }
2536 
2537  Label result_not_a_smi;
2538  switch (op_) {
2539  case Token::BIT_OR:
2540  __ orr(r2, r3, Operand(r2));
2541  break;
2542  case Token::BIT_XOR:
2543  __ eor(r2, r3, Operand(r2));
2544  break;
2545  case Token::BIT_AND:
2546  __ and_(r2, r3, Operand(r2));
2547  break;
2548  case Token::SAR:
2549  // Use only the 5 least significant bits of the shift count.
2550  __ GetLeastBitsFromInt32(r2, r2, 5);
2551  __ mov(r2, Operand(r3, ASR, r2));
2552  break;
2553  case Token::SHR:
2554  // Use only the 5 least significant bits of the shift count.
2555  __ GetLeastBitsFromInt32(r2, r2, 5);
2556  __ mov(r2, Operand(r3, LSR, r2), SetCC);
2557  // SHR is special because it is required to produce a positive answer.
2558  // The code below for writing into heap numbers isn't capable of
2559  // writing the register as an unsigned int so we go to slow case if we
2560  // hit this case.
2562  __ b(mi, &result_not_a_smi);
2563  } else {
2564  __ b(mi, not_numbers);
2565  }
2566  break;
2567  case Token::SHL:
2568  // Use only the 5 least significant bits of the shift count.
2569  __ GetLeastBitsFromInt32(r2, r2, 5);
2570  __ mov(r2, Operand(r3, LSL, r2));
2571  break;
2572  default:
2573  UNREACHABLE();
2574  }
2575 
2576  // Check that the *signed* result fits in a smi.
2577  __ add(r3, r2, Operand(0x40000000), SetCC);
2578  __ b(mi, &result_not_a_smi);
2579  __ SmiTag(r0, r2);
2580  __ Ret();
2581 
2582  // Allocate new heap number for result.
2583  __ bind(&result_not_a_smi);
2584  Register result = r5;
2585  if (smi_operands) {
2586  __ AllocateHeapNumber(
2587  result, scratch1, scratch2, heap_number_map, gc_required);
2588  } else {
2589  GenerateHeapResultAllocation(
2590  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2591  }
2592 
2593  // r2: Answer as signed int32.
2594  // r5: Heap number to write answer into.
2595 
2596  // Nothing can go wrong now, so move the heap number to r0, which is the
2597  // result.
2598  __ mov(r0, Operand(r5));
2599 
2601  // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2602  // mentioned above SHR needs to always produce a positive result.
2603  CpuFeatures::Scope scope(VFP3);
2604  __ vmov(s0, r2);
2605  if (op_ == Token::SHR) {
2606  __ vcvt_f64_u32(d0, s0);
2607  } else {
2608  __ vcvt_f64_s32(d0, s0);
2609  }
2610  __ sub(r3, r0, Operand(kHeapObjectTag));
2611  __ vstr(d0, r3, HeapNumber::kValueOffset);
2612  __ Ret();
2613  } else {
2614  // Tail call that writes the int32 in r2 to the heap number in r0, using
2615  // r3 as scratch. r0 is preserved and returned.
2616  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2617  __ TailCallStub(&stub);
2618  }
2619  break;
2620  }
2621  default:
2622  UNREACHABLE();
2623  }
2624 }
2625 
2626 
2627 // Generate the smi code. If the operation on smis are successful this return is
2628 // generated. If the result is not a smi and heap number allocation is not
2629 // requested the code falls through. If number allocation is requested but a
2630 // heap number cannot be allocated the code jumps to the lable gc_required.
2631 void BinaryOpStub::GenerateSmiCode(
2632  MacroAssembler* masm,
2633  Label* use_runtime,
2634  Label* gc_required,
2635  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2636  Label not_smis;
2637 
2638  Register left = r1;
2639  Register right = r0;
2640  Register scratch1 = r7;
2641 
2642  // Perform combined smi check on both operands.
2643  __ orr(scratch1, left, Operand(right));
2644  STATIC_ASSERT(kSmiTag == 0);
2645  __ JumpIfNotSmi(scratch1, &not_smis);
2646 
2647  // If the smi-smi operation results in a smi return is generated.
2648  GenerateSmiSmiOperation(masm);
2649 
2650  // If heap number results are possible generate the result in an allocated
2651  // heap number.
2652  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2653  GenerateFPOperation(masm, true, use_runtime, gc_required);
2654  }
2655  __ bind(&not_smis);
2656 }
2657 
2658 
2659 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2660  Label not_smis, call_runtime;
2661 
2662  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2663  result_type_ == BinaryOpIC::SMI) {
2664  // Only allow smi results.
2665  GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2666  } else {
2667  // Allow heap number result and don't make a transition if a heap number
2668  // cannot be allocated.
2669  GenerateSmiCode(masm,
2670  &call_runtime,
2671  &call_runtime,
2672  ALLOW_HEAPNUMBER_RESULTS);
2673  }
2674 
2675  // Code falls through if the result is not returned as either a smi or heap
2676  // number.
2677  GenerateTypeTransition(masm);
2678 
2679  __ bind(&call_runtime);
2680  GenerateCallRuntime(masm);
2681 }
2682 
2683 
2684 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2685  ASSERT(operands_type_ == BinaryOpIC::STRING);
2686  ASSERT(op_ == Token::ADD);
2687  // Try to add arguments as strings, otherwise, transition to the generic
2688  // BinaryOpIC type.
2689  GenerateAddStrings(masm);
2690  GenerateTypeTransition(masm);
2691 }
2692 
2693 
2694 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2695  Label call_runtime;
2696  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2697  ASSERT(op_ == Token::ADD);
2698  // If both arguments are strings, call the string add stub.
2699  // Otherwise, do a transition.
2700 
2701  // Registers containing left and right operands respectively.
2702  Register left = r1;
2703  Register right = r0;
2704 
2705  // Test if left operand is a string.
2706  __ JumpIfSmi(left, &call_runtime);
2707  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2708  __ b(ge, &call_runtime);
2709 
2710  // Test if right operand is a string.
2711  __ JumpIfSmi(right, &call_runtime);
2712  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2713  __ b(ge, &call_runtime);
2714 
2715  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2716  GenerateRegisterArgsPush(masm);
2717  __ TailCallStub(&string_add_stub);
2718 
2719  __ bind(&call_runtime);
2720  GenerateTypeTransition(masm);
2721 }
2722 
2723 
2724 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2725  ASSERT(operands_type_ == BinaryOpIC::INT32);
2726 
2727  Register left = r1;
2728  Register right = r0;
2729  Register scratch1 = r7;
2730  Register scratch2 = r9;
2731  DwVfpRegister double_scratch = d0;
2732  SwVfpRegister single_scratch = s3;
2733 
2734  Register heap_number_result = no_reg;
2735  Register heap_number_map = r6;
2736  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2737 
2738  Label call_runtime;
2739  // Labels for type transition, used for wrong input or output types.
2740  // Both label are currently actually bound to the same position. We use two
2741  // different label to differentiate the cause leading to type transition.
2742  Label transition;
2743 
2744  // Smi-smi fast case.
2745  Label skip;
2746  __ orr(scratch1, left, right);
2747  __ JumpIfNotSmi(scratch1, &skip);
2748  GenerateSmiSmiOperation(masm);
2749  // Fall through if the result is not a smi.
2750  __ bind(&skip);
2751 
2752  switch (op_) {
2753  case Token::ADD:
2754  case Token::SUB:
2755  case Token::MUL:
2756  case Token::DIV:
2757  case Token::MOD: {
2758  // Load both operands and check that they are 32-bit integer.
2759  // Jump to type transition if they are not. The registers r0 and r1 (right
2760  // and left) are preserved for the runtime call.
2761  FloatingPointHelper::Destination destination =
2762  (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
2765 
2767  right,
2768  destination,
2769  d7,
2770  r2,
2771  r3,
2772  heap_number_map,
2773  scratch1,
2774  scratch2,
2775  s0,
2776  &transition);
2778  left,
2779  destination,
2780  d6,
2781  r4,
2782  r5,
2783  heap_number_map,
2784  scratch1,
2785  scratch2,
2786  s0,
2787  &transition);
2788 
2789  if (destination == FloatingPointHelper::kVFPRegisters) {
2790  CpuFeatures::Scope scope(VFP3);
2791  Label return_heap_number;
2792  switch (op_) {
2793  case Token::ADD:
2794  __ vadd(d5, d6, d7);
2795  break;
2796  case Token::SUB:
2797  __ vsub(d5, d6, d7);
2798  break;
2799  case Token::MUL:
2800  __ vmul(d5, d6, d7);
2801  break;
2802  case Token::DIV:
2803  __ vdiv(d5, d6, d7);
2804  break;
2805  default:
2806  UNREACHABLE();
2807  }
2808 
2809  if (op_ != Token::DIV) {
2810  // These operations produce an integer result.
2811  // Try to return a smi if we can.
2812  // Otherwise return a heap number if allowed, or jump to type
2813  // transition.
2814 
2815  __ EmitVFPTruncate(kRoundToZero,
2816  single_scratch,
2817  d5,
2818  scratch1,
2819  scratch2);
2820 
2821  if (result_type_ <= BinaryOpIC::INT32) {
2822  // If the ne condition is set, result does
2823  // not fit in a 32-bit integer.
2824  __ b(ne, &transition);
2825  }
2826 
2827  // Check if the result fits in a smi.
2828  __ vmov(scratch1, single_scratch);
2829  __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2830  // If not try to return a heap number.
2831  __ b(mi, &return_heap_number);
2832  // Check for minus zero. Return heap number for minus zero.
2833  Label not_zero;
2834  __ cmp(scratch1, Operand::Zero());
2835  __ b(ne, &not_zero);
2836  __ vmov(scratch2, d5.high());
2837  __ tst(scratch2, Operand(HeapNumber::kSignMask));
2838  __ b(ne, &return_heap_number);
2839  __ bind(&not_zero);
2840 
2841  // Tag the result and return.
2842  __ SmiTag(r0, scratch1);
2843  __ Ret();
2844  } else {
2845  // DIV just falls through to allocating a heap number.
2846  }
2847 
2848  __ bind(&return_heap_number);
2849  // Return a heap number, or fall through to type transition or runtime
2850  // call if we can't.
2851  if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2852  : BinaryOpIC::INT32)) {
2853  // We are using vfp registers so r5 is available.
2854  heap_number_result = r5;
2855  GenerateHeapResultAllocation(masm,
2856  heap_number_result,
2857  heap_number_map,
2858  scratch1,
2859  scratch2,
2860  &call_runtime);
2861  __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
2862  __ vstr(d5, r0, HeapNumber::kValueOffset);
2863  __ mov(r0, heap_number_result);
2864  __ Ret();
2865  }
2866 
2867  // A DIV operation expecting an integer result falls through
2868  // to type transition.
2869 
2870  } else {
2871  // We preserved r0 and r1 to be able to call runtime.
2872  // Save the left value on the stack.
2873  __ Push(r5, r4);
2874 
2875  Label pop_and_call_runtime;
2876 
2877  // Allocate a heap number to store the result.
2878  heap_number_result = r5;
2879  GenerateHeapResultAllocation(masm,
2880  heap_number_result,
2881  heap_number_map,
2882  scratch1,
2883  scratch2,
2884  &pop_and_call_runtime);
2885 
2886  // Load the left value from the value saved on the stack.
2887  __ Pop(r1, r0);
2888 
2889  // Call the C function to handle the double operation.
2891  masm, op_, heap_number_result, scratch1);
2892  if (FLAG_debug_code) {
2893  __ stop("Unreachable code.");
2894  }
2895 
2896  __ bind(&pop_and_call_runtime);
2897  __ Drop(2);
2898  __ b(&call_runtime);
2899  }
2900 
2901  break;
2902  }
2903 
2904  case Token::BIT_OR:
2905  case Token::BIT_XOR:
2906  case Token::BIT_AND:
2907  case Token::SAR:
2908  case Token::SHR:
2909  case Token::SHL: {
2910  Label return_heap_number;
2911  Register scratch3 = r5;
2912  // Convert operands to 32-bit integers. Right in r2 and left in r3. The
2913  // registers r0 and r1 (right and left) are preserved for the runtime
2914  // call.
2916  left,
2917  r3,
2918  heap_number_map,
2919  scratch1,
2920  scratch2,
2921  scratch3,
2922  d0,
2923  &transition);
2925  right,
2926  r2,
2927  heap_number_map,
2928  scratch1,
2929  scratch2,
2930  scratch3,
2931  d0,
2932  &transition);
2933 
2934  // The ECMA-262 standard specifies that, for shift operations, only the
2935  // 5 least significant bits of the shift value should be used.
2936  switch (op_) {
2937  case Token::BIT_OR:
2938  __ orr(r2, r3, Operand(r2));
2939  break;
2940  case Token::BIT_XOR:
2941  __ eor(r2, r3, Operand(r2));
2942  break;
2943  case Token::BIT_AND:
2944  __ and_(r2, r3, Operand(r2));
2945  break;
2946  case Token::SAR:
2947  __ and_(r2, r2, Operand(0x1f));
2948  __ mov(r2, Operand(r3, ASR, r2));
2949  break;
2950  case Token::SHR:
2951  __ and_(r2, r2, Operand(0x1f));
2952  __ mov(r2, Operand(r3, LSR, r2), SetCC);
2953  // SHR is special because it is required to produce a positive answer.
2954  // We only get a negative result if the shift value (r2) is 0.
2955  // This result cannot be respresented as a signed 32-bit integer, try
2956  // to return a heap number if we can.
2957  // The non vfp3 code does not support this special case, so jump to
2958  // runtime if we don't support it.
2960  __ b(mi, (result_type_ <= BinaryOpIC::INT32)
2961  ? &transition
2962  : &return_heap_number);
2963  } else {
2964  __ b(mi, (result_type_ <= BinaryOpIC::INT32)
2965  ? &transition
2966  : &call_runtime);
2967  }
2968  break;
2969  case Token::SHL:
2970  __ and_(r2, r2, Operand(0x1f));
2971  __ mov(r2, Operand(r3, LSL, r2));
2972  break;
2973  default:
2974  UNREACHABLE();
2975  }
2976 
2977  // Check if the result fits in a smi.
2978  __ add(scratch1, r2, Operand(0x40000000), SetCC);
2979  // If not try to return a heap number. (We know the result is an int32.)
2980  __ b(mi, &return_heap_number);
2981  // Tag the result and return.
2982  __ SmiTag(r0, r2);
2983  __ Ret();
2984 
2985  __ bind(&return_heap_number);
2986  heap_number_result = r5;
2987  GenerateHeapResultAllocation(masm,
2988  heap_number_result,
2989  heap_number_map,
2990  scratch1,
2991  scratch2,
2992  &call_runtime);
2993 
2995  CpuFeatures::Scope scope(VFP3);
2996  if (op_ != Token::SHR) {
2997  // Convert the result to a floating point value.
2998  __ vmov(double_scratch.low(), r2);
2999  __ vcvt_f64_s32(double_scratch, double_scratch.low());
3000  } else {
3001  // The result must be interpreted as an unsigned 32-bit integer.
3002  __ vmov(double_scratch.low(), r2);
3003  __ vcvt_f64_u32(double_scratch, double_scratch.low());
3004  }
3005 
3006  // Store the result.
3007  __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3008  __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3009  __ mov(r0, heap_number_result);
3010  __ Ret();
3011  } else {
3012  // Tail call that writes the int32 in r2 to the heap number in r0, using
3013  // r3 as scratch. r0 is preserved and returned.
3014  __ mov(r0, r5);
3015  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3016  __ TailCallStub(&stub);
3017  }
3018 
3019  break;
3020  }
3021 
3022  default:
3023  UNREACHABLE();
3024  }
3025 
3026  // We never expect DIV to yield an integer result, so we always generate
3027  // type transition code for DIV operations expecting an integer result: the
3028  // code will fall through to this type transition.
3029  if (transition.is_linked() ||
3030  ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3031  __ bind(&transition);
3032  GenerateTypeTransition(masm);
3033  }
3034 
3035  __ bind(&call_runtime);
3036  GenerateCallRuntime(masm);
3037 }
3038 
3039 
3040 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3041  Label call_runtime;
3042 
3043  if (op_ == Token::ADD) {
3044  // Handle string addition here, because it is the only operation
3045  // that does not do a ToNumber conversion on the operands.
3046  GenerateAddStrings(masm);
3047  }
3048 
3049  // Convert oddball arguments to numbers.
3050  Label check, done;
3051  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3052  __ b(ne, &check);
3053  if (Token::IsBitOp(op_)) {
3054  __ mov(r1, Operand(Smi::FromInt(0)));
3055  } else {
3056  __ LoadRoot(r1, Heap::kNanValueRootIndex);
3057  }
3058  __ jmp(&done);
3059  __ bind(&check);
3060  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3061  __ b(ne, &done);
3062  if (Token::IsBitOp(op_)) {
3063  __ mov(r0, Operand(Smi::FromInt(0)));
3064  } else {
3065  __ LoadRoot(r0, Heap::kNanValueRootIndex);
3066  }
3067  __ bind(&done);
3068 
3069  GenerateHeapNumberStub(masm);
3070 }
3071 
3072 
3073 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3074  Label call_runtime;
3075  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3076 
3077  __ bind(&call_runtime);
3078  GenerateCallRuntime(masm);
3079 }
3080 
3081 
3082 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3083  Label call_runtime, call_string_add_or_runtime;
3084 
3085  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3086 
3087  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3088 
3089  __ bind(&call_string_add_or_runtime);
3090  if (op_ == Token::ADD) {
3091  GenerateAddStrings(masm);
3092  }
3093 
3094  __ bind(&call_runtime);
3095  GenerateCallRuntime(masm);
3096 }
3097 
3098 
3099 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3100  ASSERT(op_ == Token::ADD);
3101  Label left_not_string, call_runtime;
3102 
3103  Register left = r1;
3104  Register right = r0;
3105 
3106  // Check if left argument is a string.
3107  __ JumpIfSmi(left, &left_not_string);
3108  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
3109  __ b(ge, &left_not_string);
3110 
3111  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3112  GenerateRegisterArgsPush(masm);
3113  __ TailCallStub(&string_add_left_stub);
3114 
3115  // Left operand is not a string, test right.
3116  __ bind(&left_not_string);
3117  __ JumpIfSmi(right, &call_runtime);
3118  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
3119  __ b(ge, &call_runtime);
3120 
3121  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3122  GenerateRegisterArgsPush(masm);
3123  __ TailCallStub(&string_add_right_stub);
3124 
3125  // At least one argument is not a string.
3126  __ bind(&call_runtime);
3127 }
3128 
3129 
3130 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3131  GenerateRegisterArgsPush(masm);
3132  switch (op_) {
3133  case Token::ADD:
3134  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3135  break;
3136  case Token::SUB:
3137  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3138  break;
3139  case Token::MUL:
3140  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3141  break;
3142  case Token::DIV:
3143  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3144  break;
3145  case Token::MOD:
3146  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3147  break;
3148  case Token::BIT_OR:
3149  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3150  break;
3151  case Token::BIT_AND:
3152  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3153  break;
3154  case Token::BIT_XOR:
3155  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3156  break;
3157  case Token::SAR:
3158  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3159  break;
3160  case Token::SHR:
3161  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3162  break;
3163  case Token::SHL:
3164  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3165  break;
3166  default:
3167  UNREACHABLE();
3168  }
3169 }
3170 
3171 
3172 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3173  Register result,
3174  Register heap_number_map,
3175  Register scratch1,
3176  Register scratch2,
3177  Label* gc_required) {
3178  // Code below will scratch result if allocation fails. To keep both arguments
3179  // intact for the runtime call result cannot be one of these.
3180  ASSERT(!result.is(r0) && !result.is(r1));
3181 
3182  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3183  Label skip_allocation, allocated;
3184  Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3185  // If the overwritable operand is already an object, we skip the
3186  // allocation of a heap number.
3187  __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3188  // Allocate a heap number for the result.
3189  __ AllocateHeapNumber(
3190  result, scratch1, scratch2, heap_number_map, gc_required);
3191  __ b(&allocated);
3192  __ bind(&skip_allocation);
3193  // Use object holding the overwritable operand for result.
3194  __ mov(result, Operand(overwritable_operand));
3195  __ bind(&allocated);
3196  } else {
3197  ASSERT(mode_ == NO_OVERWRITE);
3198  __ AllocateHeapNumber(
3199  result, scratch1, scratch2, heap_number_map, gc_required);
3200  }
3201 }
3202 
3203 
3204 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3205  __ Push(r1, r0);
3206 }
3207 
3208 
3209 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3210  // Untagged case: double input in d2, double result goes
3211  // into d2.
3212  // Tagged case: tagged input on top of stack and in r0,
3213  // tagged result (heap number) goes into r0.
3214 
3215  Label input_not_smi;
3216  Label loaded;
3217  Label calculate;
3218  Label invalid_cache;
3219  const Register scratch0 = r9;
3220  const Register scratch1 = r7;
3221  const Register cache_entry = r0;
3222  const bool tagged = (argument_type_ == TAGGED);
3223 
3225  CpuFeatures::Scope scope(VFP3);
3226  if (tagged) {
3227  // Argument is a number and is on stack and in r0.
3228  // Load argument and check if it is a smi.
3229  __ JumpIfNotSmi(r0, &input_not_smi);
3230 
3231  // Input is a smi. Convert to double and load the low and high words
3232  // of the double into r2, r3.
3233  __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3234  __ b(&loaded);
3235 
3236  __ bind(&input_not_smi);
3237  // Check if input is a HeapNumber.
3238  __ CheckMap(r0,
3239  r1,
3240  Heap::kHeapNumberMapRootIndex,
3241  &calculate,
3243  // Input is a HeapNumber. Load it to a double register and store the
3244  // low and high words into r2, r3.
3246  __ vmov(r2, r3, d0);
3247  } else {
3248  // Input is untagged double in d2. Output goes to d2.
3249  __ vmov(r2, r3, d2);
3250  }
3251  __ bind(&loaded);
3252  // r2 = low 32 bits of double value
3253  // r3 = high 32 bits of double value
3254  // Compute hash (the shifts are arithmetic):
3255  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3256  __ eor(r1, r2, Operand(r3));
3257  __ eor(r1, r1, Operand(r1, ASR, 16));
3258  __ eor(r1, r1, Operand(r1, ASR, 8));
3259  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3260  __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3261 
3262  // r2 = low 32 bits of double value.
3263  // r3 = high 32 bits of double value.
3264  // r1 = TranscendentalCache::hash(double value).
3265  Isolate* isolate = masm->isolate();
3266  ExternalReference cache_array =
3267  ExternalReference::transcendental_cache_array_address(isolate);
3268  __ mov(cache_entry, Operand(cache_array));
3269  // cache_entry points to cache array.
3270  int cache_array_index
3271  = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3272  __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3273  // r0 points to the cache for the type type_.
3274  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3275  __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3276  __ b(eq, &invalid_cache);
3277 
3278 #ifdef DEBUG
3279  // Check that the layout of cache elements match expectations.
3280  { TranscendentalCache::SubCache::Element test_elem[2];
3281  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3282  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3283  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3284  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3285  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3286  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3287  CHECK_EQ(0, elem_in0 - elem_start);
3288  CHECK_EQ(kIntSize, elem_in1 - elem_start);
3289  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3290  }
3291 #endif
3292 
3293  // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3294  __ add(r1, r1, Operand(r1, LSL, 1));
3295  __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3296  // Check if cache matches: Double value is stored in uint32_t[2] array.
3297  __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3298  __ cmp(r2, r4);
3299  __ cmp(r3, r5, eq);
3300  __ b(ne, &calculate);
3301  // Cache hit. Load result, cleanup and return.
3302  Counters* counters = masm->isolate()->counters();
3303  __ IncrementCounter(
3304  counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3305  if (tagged) {
3306  // Pop input value from stack and load result into r0.
3307  __ pop();
3308  __ mov(r0, Operand(r6));
3309  } else {
3310  // Load result into d2.
3312  }
3313  __ Ret();
3314  } // if (CpuFeatures::IsSupported(VFP3))
3315 
3316  __ bind(&calculate);
3317  Counters* counters = masm->isolate()->counters();
3318  __ IncrementCounter(
3319  counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3320  if (tagged) {
3321  __ bind(&invalid_cache);
3322  ExternalReference runtime_function =
3323  ExternalReference(RuntimeFunction(), masm->isolate());
3324  __ TailCallExternalReference(runtime_function, 1, 1);
3325  } else {
3327  CpuFeatures::Scope scope(VFP3);
3328 
3329  Label no_update;
3330  Label skip_cache;
3331 
3332  // Call C function to calculate the result and update the cache.
3333  // Register r0 holds precalculated cache entry address; preserve
3334  // it on the stack and pop it into register cache_entry after the
3335  // call.
3336  __ push(cache_entry);
3337  GenerateCallCFunction(masm, scratch0);
3338  __ GetCFunctionDoubleResult(d2);
3339 
3340  // Try to update the cache. If we cannot allocate a
3341  // heap number, we return the result without updating.
3342  __ pop(cache_entry);
3343  __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3344  __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3346  __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3347  __ Ret();
3348 
3349  __ bind(&invalid_cache);
3350  // The cache is invalid. Call runtime which will recreate the
3351  // cache.
3352  __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3353  __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3355  {
3356  FrameScope scope(masm, StackFrame::INTERNAL);
3357  __ push(r0);
3358  __ CallRuntime(RuntimeFunction(), 1);
3359  }
3361  __ Ret();
3362 
3363  __ bind(&skip_cache);
3364  // Call C function to calculate the result and answer directly
3365  // without updating the cache.
3366  GenerateCallCFunction(masm, scratch0);
3367  __ GetCFunctionDoubleResult(d2);
3368  __ bind(&no_update);
3369 
3370  // We return the value in d2 without adding it to the cache, but
3371  // we cause a scavenging GC so that future allocations will succeed.
3372  {
3373  FrameScope scope(masm, StackFrame::INTERNAL);
3374 
3375  // Allocate an aligned object larger than a HeapNumber.
3376  ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3377  __ mov(scratch0, Operand(4 * kPointerSize));
3378  __ push(scratch0);
3379  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3380  }
3381  __ Ret();
3382  }
3383 }
3384 
3385 
3386 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3387  Register scratch) {
3388  Isolate* isolate = masm->isolate();
3389 
3390  __ push(lr);
3391  __ PrepareCallCFunction(0, 1, scratch);
3392  if (masm->use_eabi_hardfloat()) {
3393  __ vmov(d0, d2);
3394  } else {
3395  __ vmov(r0, r1, d2);
3396  }
3397  AllowExternalCallThatCantCauseGC scope(masm);
3398  switch (type_) {
3400  __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
3401  0, 1);
3402  break;
3404  __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
3405  0, 1);
3406  break;
3408  __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3409  0, 1);
3410  break;
3412  __ CallCFunction(ExternalReference::math_log_double_function(isolate),
3413  0, 1);
3414  break;
3415  default:
3416  UNIMPLEMENTED();
3417  break;
3418  }
3419  __ pop(lr);
3420 }
3421 
3422 
3423 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3424  switch (type_) {
3425  // Add more cases when necessary.
3426  case TranscendentalCache::SIN: return Runtime::kMath_sin;
3427  case TranscendentalCache::COS: return Runtime::kMath_cos;
3428  case TranscendentalCache::TAN: return Runtime::kMath_tan;
3429  case TranscendentalCache::LOG: return Runtime::kMath_log;
3430  default:
3431  UNIMPLEMENTED();
3432  return Runtime::kAbort;
3433  }
3434 }
3435 
3436 
3437 void StackCheckStub::Generate(MacroAssembler* masm) {
3438  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3439 }
3440 
3441 
3442 void InterruptStub::Generate(MacroAssembler* masm) {
3443  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3444 }
3445 
3446 
3447 void MathPowStub::Generate(MacroAssembler* masm) {
3448  CpuFeatures::Scope vfp3_scope(VFP3);
3449  const Register base = r1;
3450  const Register exponent = r2;
3451  const Register heapnumbermap = r5;
3452  const Register heapnumber = r0;
3453  const DoubleRegister double_base = d1;
3454  const DoubleRegister double_exponent = d2;
3455  const DoubleRegister double_result = d3;
3456  const DoubleRegister double_scratch = d0;
3457  const SwVfpRegister single_scratch = s0;
3458  const Register scratch = r9;
3459  const Register scratch2 = r7;
3460 
3461  Label call_runtime, done, int_exponent;
3462  if (exponent_type_ == ON_STACK) {
3463  Label base_is_smi, unpack_exponent;
3464  // The exponent and base are supplied as arguments on the stack.
3465  // This can only happen if the stub is called from non-optimized code.
3466  // Load input parameters from stack to double registers.
3467  __ ldr(base, MemOperand(sp, 1 * kPointerSize));
3468  __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
3469 
3470  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3471 
3472  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3473  __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3474  __ cmp(scratch, heapnumbermap);
3475  __ b(ne, &call_runtime);
3476 
3477  __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3478  __ jmp(&unpack_exponent);
3479 
3480  __ bind(&base_is_smi);
3481  __ vmov(single_scratch, scratch);
3482  __ vcvt_f64_s32(double_base, single_scratch);
3483  __ bind(&unpack_exponent);
3484 
3485  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3486 
3487  __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3488  __ cmp(scratch, heapnumbermap);
3489  __ b(ne, &call_runtime);
3490  __ vldr(double_exponent,
3492  } else if (exponent_type_ == TAGGED) {
3493  // Base is already in double_base.
3494  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3495 
3496  __ vldr(double_exponent,
3498  }
3499 
3500  if (exponent_type_ != INTEGER) {
3501  Label int_exponent_convert;
3502  // Detect integer exponents stored as double.
3503  __ vcvt_u32_f64(single_scratch, double_exponent);
3504  // We do not check for NaN or Infinity here because comparing numbers on
3505  // ARM correctly distinguishes NaNs. We end up calling the built-in.
3506  __ vcvt_f64_u32(double_scratch, single_scratch);
3507  __ VFPCompareAndSetFlags(double_scratch, double_exponent);
3508  __ b(eq, &int_exponent_convert);
3509 
3510  if (exponent_type_ == ON_STACK) {
3511  // Detect square root case. Crankshaft detects constant +/-0.5 at
3512  // compile time and uses DoMathPowHalf instead. We then skip this check
3513  // for non-constant cases of +/-0.5 as these hardly occur.
3514  Label not_plus_half;
3515 
3516  // Test for 0.5.
3517  __ vmov(double_scratch, 0.5);
3518  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3519  __ b(ne, &not_plus_half);
3520 
3521  // Calculates square root of base. Check for the special case of
3522  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3523  __ vmov(double_scratch, -V8_INFINITY);
3524  __ VFPCompareAndSetFlags(double_base, double_scratch);
3525  __ vneg(double_result, double_scratch, eq);
3526  __ b(eq, &done);
3527 
3528  // Add +0 to convert -0 to +0.
3529  __ vadd(double_scratch, double_base, kDoubleRegZero);
3530  __ vsqrt(double_result, double_scratch);
3531  __ jmp(&done);
3532 
3533  __ bind(&not_plus_half);
3534  __ vmov(double_scratch, -0.5);
3535  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3536  __ b(ne, &call_runtime);
3537 
3538  // Calculates square root of base. Check for the special case of
3539  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3540  __ vmov(double_scratch, -V8_INFINITY);
3541  __ VFPCompareAndSetFlags(double_base, double_scratch);
3542  __ vmov(double_result, kDoubleRegZero, eq);
3543  __ b(eq, &done);
3544 
3545  // Add +0 to convert -0 to +0.
3546  __ vadd(double_scratch, double_base, kDoubleRegZero);
3547  __ vmov(double_result, 1);
3548  __ vsqrt(double_scratch, double_scratch);
3549  __ vdiv(double_result, double_result, double_scratch);
3550  __ jmp(&done);
3551  }
3552 
3553  __ push(lr);
3554  {
3555  AllowExternalCallThatCantCauseGC scope(masm);
3556  __ PrepareCallCFunction(0, 2, scratch);
3557  __ SetCallCDoubleArguments(double_base, double_exponent);
3558  __ CallCFunction(
3559  ExternalReference::power_double_double_function(masm->isolate()),
3560  0, 2);
3561  }
3562  __ pop(lr);
3563  __ GetCFunctionDoubleResult(double_result);
3564  __ jmp(&done);
3565 
3566  __ bind(&int_exponent_convert);
3567  __ vcvt_u32_f64(single_scratch, double_exponent);
3568  __ vmov(scratch, single_scratch);
3569  }
3570 
3571  // Calculate power with integer exponent.
3572  __ bind(&int_exponent);
3573 
3574  // Get two copies of exponent in the registers scratch and exponent.
3575  if (exponent_type_ == INTEGER) {
3576  __ mov(scratch, exponent);
3577  } else {
3578  // Exponent has previously been stored into scratch as untagged integer.
3579  __ mov(exponent, scratch);
3580  }
3581  __ vmov(double_scratch, double_base); // Back up base.
3582  __ vmov(double_result, 1.0);
3583 
3584  // Get absolute value of exponent.
3585  __ cmp(scratch, Operand(0));
3586  __ mov(scratch2, Operand(0), LeaveCC, mi);
3587  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3588 
3589  Label while_true;
3590  __ bind(&while_true);
3591  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3592  __ vmul(double_result, double_result, double_scratch, cs);
3593  __ vmul(double_scratch, double_scratch, double_scratch, ne);
3594  __ b(ne, &while_true);
3595 
3596  __ cmp(exponent, Operand(0));
3597  __ b(ge, &done);
3598  __ vmov(double_scratch, 1.0);
3599  __ vdiv(double_result, double_scratch, double_result);
3600  // Test whether result is zero. Bail out to check for subnormal result.
3601  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3602  __ VFPCompareAndSetFlags(double_result, 0.0);
3603  __ b(ne, &done);
3604  // double_exponent may not containe the exponent value if the input was a
3605  // smi. We set it with exponent value before bailing out.
3606  __ vmov(single_scratch, exponent);
3607  __ vcvt_f64_s32(double_exponent, single_scratch);
3608 
3609  // Returning or bailing out.
3610  Counters* counters = masm->isolate()->counters();
3611  if (exponent_type_ == ON_STACK) {
3612  // The arguments are still on the stack.
3613  __ bind(&call_runtime);
3614  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3615 
3616  // The stub is called from non-optimized code, which expects the result
3617  // as heap number in exponent.
3618  __ bind(&done);
3619  __ AllocateHeapNumber(
3620  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3621  __ vstr(double_result,
3623  ASSERT(heapnumber.is(r0));
3624  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3625  __ Ret(2);
3626  } else {
3627  __ push(lr);
3628  {
3629  AllowExternalCallThatCantCauseGC scope(masm);
3630  __ PrepareCallCFunction(0, 2, scratch);
3631  __ SetCallCDoubleArguments(double_base, double_exponent);
3632  __ CallCFunction(
3633  ExternalReference::power_double_double_function(masm->isolate()),
3634  0, 2);
3635  }
3636  __ pop(lr);
3637  __ GetCFunctionDoubleResult(double_result);
3638 
3639  __ bind(&done);
3640  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3641  __ Ret();
3642  }
3643 }
3644 
3645 
3646 bool CEntryStub::NeedsImmovableCode() {
3647  return true;
3648 }
3649 
3650 
3652  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3653  result_size_ == 1;
3654 }
3655 
3656 
3657 void CodeStub::GenerateStubsAheadOfTime() {
3662 }
3663 
3664 
3665 void CodeStub::GenerateFPStubs() {
3666  CEntryStub save_doubles(1, kSaveFPRegs);
3667  Handle<Code> code = save_doubles.GetCode();
3668  code->set_is_pregenerated(true);
3669  StoreBufferOverflowStub stub(kSaveFPRegs);
3670  stub.GetCode()->set_is_pregenerated(true);
3671  code->GetIsolate()->set_fp_stubs_generated(true);
3672 }
3673 
3674 
3676  CEntryStub stub(1, kDontSaveFPRegs);
3677  Handle<Code> code = stub.GetCode();
3678  code->set_is_pregenerated(true);
3679 }
3680 
3681 
3682 void CEntryStub::GenerateCore(MacroAssembler* masm,
3683  Label* throw_normal_exception,
3684  Label* throw_termination_exception,
3685  Label* throw_out_of_memory_exception,
3686  bool do_gc,
3687  bool always_allocate) {
3688  // r0: result parameter for PerformGC, if any
3689  // r4: number of arguments including receiver (C callee-saved)
3690  // r5: pointer to builtin function (C callee-saved)
3691  // r6: pointer to the first argument (C callee-saved)
3692  Isolate* isolate = masm->isolate();
3693 
3694  if (do_gc) {
3695  // Passing r0.
3696  __ PrepareCallCFunction(1, 0, r1);
3697  __ CallCFunction(ExternalReference::perform_gc_function(isolate),
3698  1, 0);
3699  }
3700 
3701  ExternalReference scope_depth =
3702  ExternalReference::heap_always_allocate_scope_depth(isolate);
3703  if (always_allocate) {
3704  __ mov(r0, Operand(scope_depth));
3705  __ ldr(r1, MemOperand(r0));
3706  __ add(r1, r1, Operand(1));
3707  __ str(r1, MemOperand(r0));
3708  }
3709 
3710  // Call C built-in.
3711  // r0 = argc, r1 = argv
3712  __ mov(r0, Operand(r4));
3713  __ mov(r1, Operand(r6));
3714 
3715 #if defined(V8_HOST_ARCH_ARM)
3716  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
3717  int frame_alignment_mask = frame_alignment - 1;
3718  if (FLAG_debug_code) {
3719  if (frame_alignment > kPointerSize) {
3720  Label alignment_as_expected;
3721  ASSERT(IsPowerOf2(frame_alignment));
3722  __ tst(sp, Operand(frame_alignment_mask));
3723  __ b(eq, &alignment_as_expected);
3724  // Don't use Check here, as it will call Runtime_Abort re-entering here.
3725  __ stop("Unexpected alignment");
3726  __ bind(&alignment_as_expected);
3727  }
3728  }
3729 #endif
3730 
3731  __ mov(r2, Operand(ExternalReference::isolate_address()));
3732 
3733  // To let the GC traverse the return address of the exit frames, we need to
3734  // know where the return address is. The CEntryStub is unmovable, so
3735  // we can store the address on the stack to be able to find it again and
3736  // we never have to restore it, because it will not change.
3737  // Compute the return address in lr to return to after the jump below. Pc is
3738  // already at '+ 8' from the current instruction but return is after three
3739  // instructions so add another 4 to pc to get the return address.
3740  {
3741  // Prevent literal pool emission before return address.
3742  Assembler::BlockConstPoolScope block_const_pool(masm);
3743  masm->add(lr, pc, Operand(4));
3744  __ str(lr, MemOperand(sp, 0));
3745  masm->Jump(r5);
3746  }
3747 
3748  if (always_allocate) {
3749  // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
3750  // though (contain the result).
3751  __ mov(r2, Operand(scope_depth));
3752  __ ldr(r3, MemOperand(r2));
3753  __ sub(r3, r3, Operand(1));
3754  __ str(r3, MemOperand(r2));
3755  }
3756 
3757  // check for failure result
3758  Label failure_returned;
3759  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3760  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
3761  __ add(r2, r0, Operand(1));
3762  __ tst(r2, Operand(kFailureTagMask));
3763  __ b(eq, &failure_returned);
3764 
3765  // Exit C frame and return.
3766  // r0:r1: result
3767  // sp: stack pointer
3768  // fp: frame pointer
3769  // Callee-saved register r4 still holds argc.
3770  __ LeaveExitFrame(save_doubles_, r4);
3771  __ mov(pc, lr);
3772 
3773  // check if we should retry or throw exception
3774  Label retry;
3775  __ bind(&failure_returned);
3777  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3778  __ b(eq, &retry);
3779 
3780  // Special handling of out of memory exceptions.
3781  Failure* out_of_memory = Failure::OutOfMemoryException();
3782  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3783  __ b(eq, throw_out_of_memory_exception);
3784 
3785  // Retrieve the pending exception and clear the variable.
3786  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
3787  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3788  isolate)));
3789  __ ldr(r0, MemOperand(ip));
3790  __ str(r3, MemOperand(ip));
3791 
3792  // Special handling of termination exceptions which are uncatchable
3793  // by javascript code.
3794  __ cmp(r0, Operand(isolate->factory()->termination_exception()));
3795  __ b(eq, throw_termination_exception);
3796 
3797  // Handle normal exception.
3798  __ jmp(throw_normal_exception);
3799 
3800  __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
3801 }
3802 
3803 
3804 void CEntryStub::Generate(MacroAssembler* masm) {
3805  // Called from JavaScript; parameters are on stack as if calling JS function
3806  // r0: number of arguments including receiver
3807  // r1: pointer to builtin function
3808  // fp: frame pointer (restored after C call)
3809  // sp: stack pointer (restored as callee's sp after C call)
3810  // cp: current context (C callee-saved)
3811 
3812  // Result returned in r0 or r0+r1 by default.
3813 
3814  // NOTE: Invocations of builtins may return failure objects
3815  // instead of a proper result. The builtin entry handles
3816  // this by performing a garbage collection and retrying the
3817  // builtin once.
3818 
3819  // Compute the argv pointer in a callee-saved register.
3820  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
3821  __ sub(r6, r6, Operand(kPointerSize));
3822 
3823  // Enter the exit frame that transitions from JavaScript to C++.
3824  FrameScope scope(masm, StackFrame::MANUAL);
3825  __ EnterExitFrame(save_doubles_);
3826 
3827  // Set up argc and the builtin function in callee-saved registers.
3828  __ mov(r4, Operand(r0));
3829  __ mov(r5, Operand(r1));
3830 
3831  // r4: number of arguments (C callee-saved)
3832  // r5: pointer to builtin function (C callee-saved)
3833  // r6: pointer to first argument (C callee-saved)
3834 
3835  Label throw_normal_exception;
3836  Label throw_termination_exception;
3837  Label throw_out_of_memory_exception;
3838 
3839  // Call into the runtime system.
3840  GenerateCore(masm,
3841  &throw_normal_exception,
3842  &throw_termination_exception,
3843  &throw_out_of_memory_exception,
3844  false,
3845  false);
3846 
3847  // Do space-specific GC and retry runtime call.
3848  GenerateCore(masm,
3849  &throw_normal_exception,
3850  &throw_termination_exception,
3851  &throw_out_of_memory_exception,
3852  true,
3853  false);
3854 
3855  // Do full GC and retry runtime call one final time.
3856  Failure* failure = Failure::InternalError();
3857  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
3858  GenerateCore(masm,
3859  &throw_normal_exception,
3860  &throw_termination_exception,
3861  &throw_out_of_memory_exception,
3862  true,
3863  true);
3864 
3865  __ bind(&throw_out_of_memory_exception);
3866  // Set external caught exception to false.
3867  Isolate* isolate = masm->isolate();
3868  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3869  isolate);
3870  __ mov(r0, Operand(false, RelocInfo::NONE));
3871  __ mov(r2, Operand(external_caught));
3872  __ str(r0, MemOperand(r2));
3873 
3874  // Set pending exception and r0 to out of memory exception.
3875  Failure* out_of_memory = Failure::OutOfMemoryException();
3876  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3877  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3878  isolate)));
3879  __ str(r0, MemOperand(r2));
3880  // Fall through to the next label.
3881 
3882  __ bind(&throw_termination_exception);
3883  __ ThrowUncatchable(r0);
3884 
3885  __ bind(&throw_normal_exception);
3886  __ Throw(r0);
3887 }
3888 
3889 
3890 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3891  // r0: code entry
3892  // r1: function
3893  // r2: receiver
3894  // r3: argc
3895  // [sp+0]: argv
3896 
3897  Label invoke, handler_entry, exit;
3898 
3899  // Called from C, so do not pop argc and args on exit (preserve sp)
3900  // No need to save register-passed args
3901  // Save callee-saved registers (incl. cp and fp), sp, and lr
3902  __ stm(db_w, sp, kCalleeSaved | lr.bit());
3903 
3905  CpuFeatures::Scope scope(VFP3);
3906  // Save callee-saved vfp registers.
3908  // Set up the reserved register for 0.0.
3909  __ vmov(kDoubleRegZero, 0.0);
3910  }
3911 
3912  // Get address of argv, see stm above.
3913  // r0: code entry
3914  // r1: function
3915  // r2: receiver
3916  // r3: argc
3917 
3918  // Set up argv in r4.
3919  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3921  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
3922  }
3923  __ ldr(r4, MemOperand(sp, offset_to_argv));
3924 
3925  // Push a frame with special values setup to mark it as an entry frame.
3926  // r0: code entry
3927  // r1: function
3928  // r2: receiver
3929  // r3: argc
3930  // r4: argv
3931  Isolate* isolate = masm->isolate();
3932  __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
3933  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3934  __ mov(r7, Operand(Smi::FromInt(marker)));
3935  __ mov(r6, Operand(Smi::FromInt(marker)));
3936  __ mov(r5,
3937  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
3938  __ ldr(r5, MemOperand(r5));
3939  __ Push(r8, r7, r6, r5);
3940 
3941  // Set up frame pointer for the frame to be pushed.
3942  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
3943 
3944  // If this is the outermost JS call, set js_entry_sp value.
3945  Label non_outermost_js;
3946  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
3947  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
3948  __ ldr(r6, MemOperand(r5));
3949  __ cmp(r6, Operand::Zero());
3950  __ b(ne, &non_outermost_js);
3951  __ str(fp, MemOperand(r5));
3952  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3953  Label cont;
3954  __ b(&cont);
3955  __ bind(&non_outermost_js);
3956  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3957  __ bind(&cont);
3958  __ push(ip);
3959 
3960  // Jump to a faked try block that does the invoke, with a faked catch
3961  // block that sets the pending exception.
3962  __ jmp(&invoke);
3963 
3964  // Block literal pool emission whilst taking the position of the handler
3965  // entry. This avoids making the assumption that literal pools are always
3966  // emitted after an instruction is emitted, rather than before.
3967  {
3968  Assembler::BlockConstPoolScope block_const_pool(masm);
3969  __ bind(&handler_entry);
3970  handler_offset_ = handler_entry.pos();
3971  // Caught exception: Store result (exception) in the pending exception
3972  // field in the JSEnv and return a failure sentinel. Coming in here the
3973  // fp will be invalid because the PushTryHandler below sets it to 0 to
3974  // signal the existence of the JSEntry frame.
3975  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3976  isolate)));
3977  }
3978  __ str(r0, MemOperand(ip));
3979  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3980  __ b(&exit);
3981 
3982  // Invoke: Link this frame into the handler chain. There's only one
3983  // handler block in this code object, so its index is 0.
3984  __ bind(&invoke);
3985  // Must preserve r0-r4, r5-r7 are available.
3986  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
3987  // If an exception not caught by another handler occurs, this handler
3988  // returns control to the code after the bl(&invoke) above, which
3989  // restores all kCalleeSaved registers (including cp and fp) to their
3990  // saved values before returning a failure to C.
3991 
3992  // Clear any pending exceptions.
3993  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
3994  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3995  isolate)));
3996  __ str(r5, MemOperand(ip));
3997 
3998  // Invoke the function by calling through JS entry trampoline builtin.
3999  // Notice that we cannot store a reference to the trampoline code directly in
4000  // this stub, because runtime stubs are not traversed when doing GC.
4001 
4002  // Expected registers by Builtins::JSEntryTrampoline
4003  // r0: code entry
4004  // r1: function
4005  // r2: receiver
4006  // r3: argc
4007  // r4: argv
4008  if (is_construct) {
4009  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4010  isolate);
4011  __ mov(ip, Operand(construct_entry));
4012  } else {
4013  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4014  __ mov(ip, Operand(entry));
4015  }
4016  __ ldr(ip, MemOperand(ip)); // deref address
4017 
4018  // Branch and link to JSEntryTrampoline. We don't use the double underscore
4019  // macro for the add instruction because we don't want the coverage tool
4020  // inserting instructions here after we read the pc. We block literal pool
4021  // emission for the same reason.
4022  {
4023  Assembler::BlockConstPoolScope block_const_pool(masm);
4024  __ mov(lr, Operand(pc));
4025  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4026  }
4027 
4028  // Unlink this frame from the handler chain.
4029  __ PopTryHandler();
4030 
4031  __ bind(&exit); // r0 holds result
4032  // Check if the current stack frame is marked as the outermost JS frame.
4033  Label non_outermost_js_2;
4034  __ pop(r5);
4035  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4036  __ b(ne, &non_outermost_js_2);
4037  __ mov(r6, Operand::Zero());
4038  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4039  __ str(r6, MemOperand(r5));
4040  __ bind(&non_outermost_js_2);
4041 
4042  // Restore the top frame descriptors from the stack.
4043  __ pop(r3);
4044  __ mov(ip,
4045  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
4046  __ str(r3, MemOperand(ip));
4047 
4048  // Reset the stack to the callee saved registers.
4049  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4050 
4051  // Restore callee-saved registers and return.
4052 #ifdef DEBUG
4053  if (FLAG_debug_code) {
4054  __ mov(lr, Operand(pc));
4055  }
4056 #endif
4057 
4059  CpuFeatures::Scope scope(VFP3);
4060  // Restore callee-saved vfp registers.
4062  }
4063 
4064  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4065 }
4066 
4067 
4068 // Uses registers r0 to r4.
4069 // Expected input (depending on whether args are in registers or on the stack):
4070 // * object: r0 or at sp + 1 * kPointerSize.
4071 // * function: r1 or at sp.
4072 //
4073 // An inlined call site may have been generated before calling this stub.
4074 // In this case the offset to the inline site to patch is passed on the stack,
4075 // in the safepoint slot for register r4.
4076 // (See LCodeGen::DoInstanceOfKnownGlobal)
4077 void InstanceofStub::Generate(MacroAssembler* masm) {
4078  // Call site inlining and patching implies arguments in registers.
4079  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4080  // ReturnTrueFalse is only implemented for inlined call sites.
4081  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4082 
4083  // Fixed register usage throughout the stub:
4084  const Register object = r0; // Object (lhs).
4085  Register map = r3; // Map of the object.
4086  const Register function = r1; // Function (rhs).
4087  const Register prototype = r4; // Prototype of the function.
4088  const Register inline_site = r9;
4089  const Register scratch = r2;
4090 
4091  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
4092 
4093  Label slow, loop, is_instance, is_not_instance, not_js_object;
4094 
4095  if (!HasArgsInRegisters()) {
4096  __ ldr(object, MemOperand(sp, 1 * kPointerSize));
4097  __ ldr(function, MemOperand(sp, 0));
4098  }
4099 
4100  // Check that the left hand is a JS object and load map.
4101  __ JumpIfSmi(object, &not_js_object);
4102  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4103 
4104  // If there is a call site cache don't look in the global cache, but do the
4105  // real lookup and update the call site cache.
4106  if (!HasCallSiteInlineCheck()) {
4107  Label miss;
4108  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4109  __ b(ne, &miss);
4110  __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
4111  __ b(ne, &miss);
4112  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4113  __ Ret(HasArgsInRegisters() ? 0 : 2);
4114 
4115  __ bind(&miss);
4116  }
4117 
4118  // Get the prototype of the function.
4119  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4120 
4121  // Check that the function prototype is a JS object.
4122  __ JumpIfSmi(prototype, &slow);
4123  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4124 
4125  // Update the global instanceof or call site inlined cache with the current
4126  // map and function. The cached answer will be set when it is known below.
4127  if (!HasCallSiteInlineCheck()) {
4128  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4129  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4130  } else {
4131  ASSERT(HasArgsInRegisters());
4132  // Patch the (relocated) inlined map check.
4133 
4134  // The offset was stored in r4 safepoint slot.
4135  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4136  __ LoadFromSafepointRegisterSlot(scratch, r4);
4137  __ sub(inline_site, lr, scratch);
4138  // Get the map location in scratch and patch it.
4139  __ GetRelocatedValueLocation(inline_site, scratch);
4140  __ ldr(scratch, MemOperand(scratch));
4142  }
4143 
4144  // Register mapping: r3 is object map and r4 is function prototype.
4145  // Get prototype of object into r2.
4146  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4147 
4148  // We don't need map any more. Use it as a scratch register.
4149  Register scratch2 = map;
4150  map = no_reg;
4151 
4152  // Loop through the prototype chain looking for the function prototype.
4153  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4154  __ bind(&loop);
4155  __ cmp(scratch, Operand(prototype));
4156  __ b(eq, &is_instance);
4157  __ cmp(scratch, scratch2);
4158  __ b(eq, &is_not_instance);
4159  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4160  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4161  __ jmp(&loop);
4162 
4163  __ bind(&is_instance);
4164  if (!HasCallSiteInlineCheck()) {
4165  __ mov(r0, Operand(Smi::FromInt(0)));
4166  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4167  } else {
4168  // Patch the call site to return true.
4169  __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4170  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4171  // Get the boolean result location in scratch and patch it.
4172  __ GetRelocatedValueLocation(inline_site, scratch);
4173  __ str(r0, MemOperand(scratch));
4174 
4175  if (!ReturnTrueFalseObject()) {
4176  __ mov(r0, Operand(Smi::FromInt(0)));
4177  }
4178  }
4179  __ Ret(HasArgsInRegisters() ? 0 : 2);
4180 
4181  __ bind(&is_not_instance);
4182  if (!HasCallSiteInlineCheck()) {
4183  __ mov(r0, Operand(Smi::FromInt(1)));
4184  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4185  } else {
4186  // Patch the call site to return false.
4187  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4188  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4189  // Get the boolean result location in scratch and patch it.
4190  __ GetRelocatedValueLocation(inline_site, scratch);
4191  __ str(r0, MemOperand(scratch));
4192 
4193  if (!ReturnTrueFalseObject()) {
4194  __ mov(r0, Operand(Smi::FromInt(1)));
4195  }
4196  }
4197  __ Ret(HasArgsInRegisters() ? 0 : 2);
4198 
4199  Label object_not_null, object_not_null_or_smi;
4200  __ bind(&not_js_object);
4201  // Before null, smi and string value checks, check that the rhs is a function
4202  // as for a non-function rhs an exception needs to be thrown.
4203  __ JumpIfSmi(function, &slow);
4204  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
4205  __ b(ne, &slow);
4206 
4207  // Null is not instance of anything.
4208  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
4209  __ b(ne, &object_not_null);
4210  __ mov(r0, Operand(Smi::FromInt(1)));
4211  __ Ret(HasArgsInRegisters() ? 0 : 2);
4212 
4213  __ bind(&object_not_null);
4214  // Smi values are not instances of anything.
4215  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4216  __ mov(r0, Operand(Smi::FromInt(1)));
4217  __ Ret(HasArgsInRegisters() ? 0 : 2);
4218 
4219  __ bind(&object_not_null_or_smi);
4220  // String values are not instances of anything.
4221  __ IsObjectJSStringType(object, scratch, &slow);
4222  __ mov(r0, Operand(Smi::FromInt(1)));
4223  __ Ret(HasArgsInRegisters() ? 0 : 2);
4224 
4225  // Slow-case. Tail call builtin.
4226  __ bind(&slow);
4227  if (!ReturnTrueFalseObject()) {
4228  if (HasArgsInRegisters()) {
4229  __ Push(r0, r1);
4230  }
4231  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4232  } else {
4233  {
4234  FrameScope scope(masm, StackFrame::INTERNAL);
4235  __ Push(r0, r1);
4236  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4237  }
4238  __ cmp(r0, Operand::Zero());
4239  __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
4240  __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
4241  __ Ret(HasArgsInRegisters() ? 0 : 2);
4242  }
4243 }
4244 
4245 
4246 Register InstanceofStub::left() { return r0; }
4247 
4248 
4249 Register InstanceofStub::right() { return r1; }
4250 
4251 
4252 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4253  // The displacement is the offset of the last parameter (if any)
4254  // relative to the frame pointer.
4255  const int kDisplacement =
4257 
4258  // Check that the key is a smi.
4259  Label slow;
4260  __ JumpIfNotSmi(r1, &slow);
4261 
4262  // Check if the calling frame is an arguments adaptor frame.
4263  Label adaptor;
4267  __ b(eq, &adaptor);
4268 
4269  // Check index against formal parameters count limit passed in
4270  // through register r0. Use unsigned comparison to get negative
4271  // check for free.
4272  __ cmp(r1, r0);
4273  __ b(hs, &slow);
4274 
4275  // Read the argument from the stack and return it.
4276  __ sub(r3, r0, r1);
4277  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4278  __ ldr(r0, MemOperand(r3, kDisplacement));
4279  __ Jump(lr);
4280 
4281  // Arguments adaptor case: Check index against actual arguments
4282  // limit found in the arguments adaptor frame. Use unsigned
4283  // comparison to get negative check for free.
4284  __ bind(&adaptor);
4286  __ cmp(r1, r0);
4287  __ b(cs, &slow);
4288 
4289  // Read the argument from the adaptor frame and return it.
4290  __ sub(r3, r0, r1);
4291  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4292  __ ldr(r0, MemOperand(r3, kDisplacement));
4293  __ Jump(lr);
4294 
4295  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4296  // by calling the runtime system.
4297  __ bind(&slow);
4298  __ push(r1);
4299  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4300 }
4301 
4302 
4303 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4304  // sp[0] : number of parameters
4305  // sp[4] : receiver displacement
4306  // sp[8] : function
4307 
4308  // Check if the calling frame is an arguments adaptor frame.
4309  Label runtime;
4313  __ b(ne, &runtime);
4314 
4315  // Patch the arguments.length and the parameters pointer in the current frame.
4317  __ str(r2, MemOperand(sp, 0 * kPointerSize));
4318  __ add(r3, r3, Operand(r2, LSL, 1));
4320  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4321 
4322  __ bind(&runtime);
4323  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4324 }
4325 
4326 
4327 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4328  // Stack layout:
4329  // sp[0] : number of parameters (tagged)
4330  // sp[4] : address of receiver argument
4331  // sp[8] : function
4332  // Registers used over whole function:
4333  // r6 : allocated object (tagged)
4334  // r9 : mapped parameter count (tagged)
4335 
4336  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4337  // r1 = parameter count (tagged)
4338 
4339  // Check if the calling frame is an arguments adaptor frame.
4340  Label runtime;
4341  Label adaptor_frame, try_allocate;
4345  __ b(eq, &adaptor_frame);
4346 
4347  // No adaptor, parameter count = argument count.
4348  __ mov(r2, r1);
4349  __ b(&try_allocate);
4350 
4351  // We have an adaptor frame. Patch the parameters pointer.
4352  __ bind(&adaptor_frame);
4354  __ add(r3, r3, Operand(r2, LSL, 1));
4356  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4357 
4358  // r1 = parameter count (tagged)
4359  // r2 = argument count (tagged)
4360  // Compute the mapped parameter count = min(r1, r2) in r1.
4361  __ cmp(r1, Operand(r2));
4362  __ mov(r1, Operand(r2), LeaveCC, gt);
4363 
4364  __ bind(&try_allocate);
4365 
4366  // Compute the sizes of backing store, parameter map, and arguments object.
4367  // 1. Parameter map, has 2 extra words containing context and backing store.
4368  const int kParameterMapHeaderSize =
4370  // If there are no mapped parameters, we do not need the parameter_map.
4371  __ cmp(r1, Operand(Smi::FromInt(0)));
4372  __ mov(r9, Operand::Zero(), LeaveCC, eq);
4373  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
4374  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
4375 
4376  // 2. Backing store.
4377  __ add(r9, r9, Operand(r2, LSL, 1));
4378  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
4379 
4380  // 3. Arguments object.
4381  __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
4382 
4383  // Do the allocation of all three objects in one go.
4384  __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
4385 
4386  // r0 = address of new object(s) (tagged)
4387  // r2 = argument count (tagged)
4388  // Get the arguments boilerplate from the current (global) context into r4.
4389  const int kNormalOffset =
4391  const int kAliasedOffset =
4393 
4396  __ cmp(r1, Operand::Zero());
4397  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4398  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4399 
4400  // r0 = address of new object (tagged)
4401  // r1 = mapped parameter count (tagged)
4402  // r2 = argument count (tagged)
4403  // r4 = address of boilerplate object (tagged)
4404  // Copy the JS object part.
4405  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4406  __ ldr(r3, FieldMemOperand(r4, i));
4407  __ str(r3, FieldMemOperand(r0, i));
4408  }
4409 
4410  // Set up the callee in-object property.
4412  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4413  const int kCalleeOffset = JSObject::kHeaderSize +
4415  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
4416 
4417  // Use the length (smi tagged) and set that as an in-object property too.
4419  const int kLengthOffset = JSObject::kHeaderSize +
4421  __ str(r2, FieldMemOperand(r0, kLengthOffset));
4422 
4423  // Set up the elements pointer in the allocated arguments object.
4424  // If we allocated a parameter map, r4 will point there, otherwise
4425  // it will point to the backing store.
4426  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4428 
4429  // r0 = address of new object (tagged)
4430  // r1 = mapped parameter count (tagged)
4431  // r2 = argument count (tagged)
4432  // r4 = address of parameter map or backing store (tagged)
4433  // Initialize parameter map. If there are no mapped arguments, we're done.
4434  Label skip_parameter_map;
4435  __ cmp(r1, Operand(Smi::FromInt(0)));
4436  // Move backing store address to r3, because it is
4437  // expected there when filling in the unmapped arguments.
4438  __ mov(r3, r4, LeaveCC, eq);
4439  __ b(eq, &skip_parameter_map);
4440 
4441  __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
4443  __ add(r6, r1, Operand(Smi::FromInt(2)));
4445  __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
4446  __ add(r6, r4, Operand(r1, LSL, 1));
4447  __ add(r6, r6, Operand(kParameterMapHeaderSize));
4448  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
4449 
4450  // Copy the parameter slots and the holes in the arguments.
4451  // We need to fill in mapped_parameter_count slots. They index the context,
4452  // where parameters are stored in reverse order, at
4453  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4454  // The mapped parameter thus need to get indices
4455  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4456  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4457  // We loop from right to left.
4458  Label parameters_loop, parameters_test;
4459  __ mov(r6, r1);
4460  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
4461  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4462  __ sub(r9, r9, Operand(r1));
4463  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
4464  __ add(r3, r4, Operand(r6, LSL, 1));
4465  __ add(r3, r3, Operand(kParameterMapHeaderSize));
4466 
4467  // r6 = loop variable (tagged)
4468  // r1 = mapping index (tagged)
4469  // r3 = address of backing store (tagged)
4470  // r4 = address of parameter map (tagged)
4471  // r5 = temporary scratch (a.o., for address calculation)
4472  // r7 = the hole value
4473  __ jmp(&parameters_test);
4474 
4475  __ bind(&parameters_loop);
4476  __ sub(r6, r6, Operand(Smi::FromInt(1)));
4477  __ mov(r5, Operand(r6, LSL, 1));
4478  __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4479  __ str(r9, MemOperand(r4, r5));
4480  __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4481  __ str(r7, MemOperand(r3, r5));
4482  __ add(r9, r9, Operand(Smi::FromInt(1)));
4483  __ bind(&parameters_test);
4484  __ cmp(r6, Operand(Smi::FromInt(0)));
4485  __ b(ne, &parameters_loop);
4486 
4487  __ bind(&skip_parameter_map);
4488  // r2 = argument count (tagged)
4489  // r3 = address of backing store (tagged)
4490  // r5 = scratch
4491  // Copy arguments header and remaining slots (if there are any).
4492  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
4495 
4496  Label arguments_loop, arguments_test;
4497  __ mov(r9, r1);
4498  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
4499  __ sub(r4, r4, Operand(r9, LSL, 1));
4500  __ jmp(&arguments_test);
4501 
4502  __ bind(&arguments_loop);
4503  __ sub(r4, r4, Operand(kPointerSize));
4504  __ ldr(r6, MemOperand(r4, 0));
4505  __ add(r5, r3, Operand(r9, LSL, 1));
4507  __ add(r9, r9, Operand(Smi::FromInt(1)));
4508 
4509  __ bind(&arguments_test);
4510  __ cmp(r9, Operand(r2));
4511  __ b(lt, &arguments_loop);
4512 
4513  // Return and remove the on-stack parameters.
4514  __ add(sp, sp, Operand(3 * kPointerSize));
4515  __ Ret();
4516 
4517  // Do the runtime call to allocate the arguments object.
4518  // r2 = argument count (tagged)
4519  __ bind(&runtime);
4520  __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4521  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4522 }
4523 
4524 
4525 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4526  // sp[0] : number of parameters
4527  // sp[4] : receiver displacement
4528  // sp[8] : function
4529  // Check if the calling frame is an arguments adaptor frame.
4530  Label adaptor_frame, try_allocate, runtime;
4534  __ b(eq, &adaptor_frame);
4535 
4536  // Get the length from the frame.
4537  __ ldr(r1, MemOperand(sp, 0));
4538  __ b(&try_allocate);
4539 
4540  // Patch the arguments.length and the parameters pointer.
4541  __ bind(&adaptor_frame);
4543  __ str(r1, MemOperand(sp, 0));
4544  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4546  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4547 
4548  // Try the new space allocation. Start out with computing the size
4549  // of the arguments object and the elements array in words.
4550  Label add_arguments_object;
4551  __ bind(&try_allocate);
4552  __ cmp(r1, Operand(0, RelocInfo::NONE));
4553  __ b(eq, &add_arguments_object);
4554  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4555  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4556  __ bind(&add_arguments_object);
4557  __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4558 
4559  // Do the allocation of both objects in one go.
4560  __ AllocateInNewSpace(r1,
4561  r0,
4562  r2,
4563  r3,
4564  &runtime,
4565  static_cast<AllocationFlags>(TAG_OBJECT |
4566  SIZE_IN_WORDS));
4567 
4568  // Get the arguments boilerplate from the current (global) context.
4573 
4574  // Copy the JS object part.
4575  __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4576 
4577  // Get the length (smi tagged) and set that as an in-object property too.
4579  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4580  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4581  Heap::kArgumentsLengthIndex * kPointerSize));
4582 
4583  // If there are no actual arguments, we're done.
4584  Label done;
4585  __ cmp(r1, Operand(0, RelocInfo::NONE));
4586  __ b(eq, &done);
4587 
4588  // Get the parameters pointer from the stack.
4589  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4590 
4591  // Set up the elements pointer in the allocated arguments object and
4592  // initialize the header in the elements fixed array.
4593  __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4595  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4598  // Untag the length for the loop.
4599  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4600 
4601  // Copy the fixed array slots.
4602  Label loop;
4603  // Set up r4 to point to the first array slot.
4604  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4605  __ bind(&loop);
4606  // Pre-decrement r2 with kPointerSize on each iteration.
4607  // Pre-decrement in order to skip receiver.
4608  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4609  // Post-increment r4 with kPointerSize on each iteration.
4610  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4611  __ sub(r1, r1, Operand(1));
4612  __ cmp(r1, Operand(0, RelocInfo::NONE));
4613  __ b(ne, &loop);
4614 
4615  // Return and remove the on-stack parameters.
4616  __ bind(&done);
4617  __ add(sp, sp, Operand(3 * kPointerSize));
4618  __ Ret();
4619 
4620  // Do the runtime call to allocate the arguments object.
4621  __ bind(&runtime);
4622  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4623 }
4624 
4625 
4626 void RegExpExecStub::Generate(MacroAssembler* masm) {
4627  // Just jump directly to runtime if native RegExp is not selected at compile
4628  // time or if regexp entry in generated code is turned off runtime switch or
4629  // at compilation.
4630 #ifdef V8_INTERPRETED_REGEXP
4631  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4632 #else // V8_INTERPRETED_REGEXP
4633 
4634  // Stack frame on entry.
4635  // sp[0]: last_match_info (expected JSArray)
4636  // sp[4]: previous index
4637  // sp[8]: subject string
4638  // sp[12]: JSRegExp object
4639 
4640  const int kLastMatchInfoOffset = 0 * kPointerSize;
4641  const int kPreviousIndexOffset = 1 * kPointerSize;
4642  const int kSubjectOffset = 2 * kPointerSize;
4643  const int kJSRegExpOffset = 3 * kPointerSize;
4644 
4645  Label runtime, invoke_regexp;
4646 
4647  // Allocation of registers for this function. These are in callee save
4648  // registers and will be preserved by the call to the native RegExp code, as
4649  // this code is called using the normal C calling convention. When calling
4650  // directly from generated code the native RegExp code will not do a GC and
4651  // therefore the content of these registers are safe to use after the call.
4652  Register subject = r4;
4653  Register regexp_data = r5;
4654  Register last_match_info_elements = r6;
4655 
4656  // Ensure that a RegExp stack is allocated.
4657  Isolate* isolate = masm->isolate();
4658  ExternalReference address_of_regexp_stack_memory_address =
4659  ExternalReference::address_of_regexp_stack_memory_address(isolate);
4660  ExternalReference address_of_regexp_stack_memory_size =
4661  ExternalReference::address_of_regexp_stack_memory_size(isolate);
4662  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4663  __ ldr(r0, MemOperand(r0, 0));
4664  __ cmp(r0, Operand(0));
4665  __ b(eq, &runtime);
4666 
4667  // Check that the first argument is a JSRegExp object.
4668  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4669  STATIC_ASSERT(kSmiTag == 0);
4670  __ JumpIfSmi(r0, &runtime);
4671  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4672  __ b(ne, &runtime);
4673 
4674  // Check that the RegExp has been compiled (data contains a fixed array).
4675  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
4676  if (FLAG_debug_code) {
4677  __ tst(regexp_data, Operand(kSmiTagMask));
4678  __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
4679  __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
4680  __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
4681  }
4682 
4683  // regexp_data: RegExp data (FixedArray)
4684  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4685  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4686  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4687  __ b(ne, &runtime);
4688 
4689  // regexp_data: RegExp data (FixedArray)
4690  // Check that the number of captures fit in the static offsets vector buffer.
4691  __ ldr(r2,
4693  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4694  // uses the asumption that smis are 2 * their untagged value.
4695  STATIC_ASSERT(kSmiTag == 0);
4697  __ add(r2, r2, Operand(2)); // r2 was a smi.
4698  // Check that the static offsets vector buffer is large enough.
4700  __ b(hi, &runtime);
4701 
4702  // r2: Number of capture registers
4703  // regexp_data: RegExp data (FixedArray)
4704  // Check that the second argument is a string.
4705  __ ldr(subject, MemOperand(sp, kSubjectOffset));
4706  __ JumpIfSmi(subject, &runtime);
4707  Condition is_string = masm->IsObjectStringType(subject, r0);
4708  __ b(NegateCondition(is_string), &runtime);
4709  // Get the length of the string to r3.
4710  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
4711 
4712  // r2: Number of capture registers
4713  // r3: Length of subject string as a smi
4714  // subject: Subject string
4715  // regexp_data: RegExp data (FixedArray)
4716  // Check that the third argument is a positive smi less than the subject
4717  // string length. A negative value will be greater (unsigned comparison).
4718  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
4719  __ JumpIfNotSmi(r0, &runtime);
4720  __ cmp(r3, Operand(r0));
4721  __ b(ls, &runtime);
4722 
4723  // r2: Number of capture registers
4724  // subject: Subject string
4725  // regexp_data: RegExp data (FixedArray)
4726  // Check that the fourth object is a JSArray object.
4727  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4728  __ JumpIfSmi(r0, &runtime);
4729  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4730  __ b(ne, &runtime);
4731  // Check that the JSArray is in fast case.
4732  __ ldr(last_match_info_elements,
4734  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4735  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
4736  __ b(ne, &runtime);
4737  // Check that the last match info has space for the capture registers and the
4738  // additional information.
4739  __ ldr(r0,
4740  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4741  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4742  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4743  __ b(gt, &runtime);
4744 
4745  // Reset offset for possibly sliced string.
4746  __ mov(r9, Operand(0));
4747  // subject: Subject string
4748  // regexp_data: RegExp data (FixedArray)
4749  // Check the representation and encoding of the subject string.
4750  Label seq_string;
4751  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4753  // First check for flat string. None of the following string type tests will
4754  // succeed if subject is not a string or a short external string.
4755  __ and_(r1,
4756  r0,
4757  Operand(kIsNotStringMask |
4760  SetCC);
4762  __ b(eq, &seq_string);
4763 
4764  // subject: Subject string
4765  // regexp_data: RegExp data (FixedArray)
4766  // r1: whether subject is a string and if yes, its string representation
4767  // Check for flat cons string or sliced string.
4768  // A flat cons string is a cons string where the second part is the empty
4769  // string. In that case the subject string is just the first part of the cons
4770  // string. Also in this case the first part of the cons string is known to be
4771  // a sequential string or an external string.
4772  // In the case of a sliced string its offset has to be taken into account.
4773  Label cons_string, external_string, check_encoding;
4778  __ cmp(r1, Operand(kExternalStringTag));
4779  __ b(lt, &cons_string);
4780  __ b(eq, &external_string);
4781 
4782  // Catch non-string subject or short external string.
4785  __ b(ne, &runtime);
4786 
4787  // String is sliced.
4789  __ mov(r9, Operand(r9, ASR, kSmiTagSize));
4790  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4791  // r9: offset of sliced string, smi-tagged.
4792  __ jmp(&check_encoding);
4793  // String is a cons string, check whether it is flat.
4794  __ bind(&cons_string);
4796  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
4797  __ b(ne, &runtime);
4798  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4799  // Is first part of cons or parent of slice a flat string?
4800  __ bind(&check_encoding);
4801  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4804  __ tst(r0, Operand(kStringRepresentationMask));
4805  __ b(ne, &external_string);
4806 
4807  __ bind(&seq_string);
4808  // subject: Subject string
4809  // regexp_data: RegExp data (FixedArray)
4810  // r0: Instance type of subject string
4813  // Find the code object based on the assumptions above.
4814  __ and_(r0, r0, Operand(kStringEncodingMask));
4815  __ mov(r3, Operand(r0, ASR, 2), SetCC);
4816  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4817  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4818 
4819  // Check that the irregexp code has been generated for the actual string
4820  // encoding. If it has, the field contains a code object otherwise it contains
4821  // a smi (code flushing support).
4822  __ JumpIfSmi(r7, &runtime);
4823 
4824  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4825  // r7: code
4826  // subject: Subject string
4827  // regexp_data: RegExp data (FixedArray)
4828  // Load used arguments before starting to push arguments for call to native
4829  // RegExp code to avoid handling changing stack height.
4830  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
4831  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4832 
4833  // r1: previous index
4834  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4835  // r7: code
4836  // subject: Subject string
4837  // regexp_data: RegExp data (FixedArray)
4838  // All checks done. Now push arguments for native regexp code.
4839  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4840 
4841  // Isolates: note we add an additional parameter here (isolate pointer).
4842  const int kRegExpExecuteArguments = 9;
4843  const int kParameterRegisters = 4;
4844  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4845 
4846  // Stack pointer now points to cell where return address is to be written.
4847  // Arguments are before that on the stack or in registers.
4848 
4849  // Argument 9 (sp[20]): Pass current isolate address.
4850  __ mov(r0, Operand(ExternalReference::isolate_address()));
4851  __ str(r0, MemOperand(sp, 5 * kPointerSize));
4852 
4853  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
4854  __ mov(r0, Operand(1));
4855  __ str(r0, MemOperand(sp, 4 * kPointerSize));
4856 
4857  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
4858  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4859  __ ldr(r0, MemOperand(r0, 0));
4860  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4861  __ ldr(r2, MemOperand(r2, 0));
4862  __ add(r0, r0, Operand(r2));
4863  __ str(r0, MemOperand(sp, 3 * kPointerSize));
4864 
4865  // Argument 6: Set the number of capture registers to zero to force global
4866  // regexps to behave as non-global. This does not affect non-global regexps.
4867  __ mov(r0, Operand(0));
4868  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4869 
4870  // Argument 5 (sp[4]): static offsets vector buffer.
4871  __ mov(r0,
4872  Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4873  __ str(r0, MemOperand(sp, 1 * kPointerSize));
4874 
4875  // For arguments 4 and 3 get string length, calculate start of string data and
4876  // calculate the shift of the index (0 for ASCII and 1 for two byte).
4877  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4878  __ eor(r3, r3, Operand(1));
4879  // Load the length from the original subject string from the previous stack
4880  // frame. Therefore we have to use fp, which points exactly to two pointer
4881  // sizes below the previous sp. (Because creating a new stack frame pushes
4882  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4883  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4884  // If slice offset is not 0, load the length from the original sliced string.
4885  // Argument 4, r3: End of string data
4886  // Argument 3, r2: Start of string data
4887  // Prepare start and end index of the input.
4888  __ add(r9, r8, Operand(r9, LSL, r3));
4889  __ add(r2, r9, Operand(r1, LSL, r3));
4890 
4891  __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
4892  __ mov(r8, Operand(r8, ASR, kSmiTagSize));
4893  __ add(r3, r9, Operand(r8, LSL, r3));
4894 
4895  // Argument 2 (r1): Previous index.
4896  // Already there
4897 
4898  // Argument 1 (r0): Subject string.
4899  __ mov(r0, subject);
4900 
4901  // Locate the code entry and call it.
4902  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
4903  DirectCEntryStub stub;
4904  stub.GenerateCall(masm, r7);
4905 
4906  __ LeaveExitFrame(false, no_reg);
4907 
4908  // r0: result
4909  // subject: subject string (callee saved)
4910  // regexp_data: RegExp data (callee saved)
4911  // last_match_info_elements: Last match info elements (callee saved)
4912 
4913  // Check the result.
4914  Label success;
4915 
4916  __ cmp(r0, Operand(1));
4917  // We expect exactly one result since we force the called regexp to behave
4918  // as non-global.
4919  __ b(eq, &success);
4920  Label failure;
4921  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
4922  __ b(eq, &failure);
4924  // If not exception it can only be retry. Handle that in the runtime system.
4925  __ b(ne, &runtime);
4926  // Result must now be exception. If there is no pending exception already a
4927  // stack overflow (on the backtrack stack) was detected in RegExp code but
4928  // haven't created the exception yet. Handle that in the runtime system.
4929  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4930  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
4931  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4932  isolate)));
4933  __ ldr(r0, MemOperand(r2, 0));
4934  __ cmp(r0, r1);
4935  __ b(eq, &runtime);
4936 
4937  __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
4938 
4939  // Check if the exception is a termination. If so, throw as uncatchable.
4940  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
4941 
4942  Label termination_exception;
4943  __ b(eq, &termination_exception);
4944 
4945  __ Throw(r0);
4946 
4947  __ bind(&termination_exception);
4948  __ ThrowUncatchable(r0);
4949 
4950  __ bind(&failure);
4951  // For failure and exception return null.
4952  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
4953  __ add(sp, sp, Operand(4 * kPointerSize));
4954  __ Ret();
4955 
4956  // Process the result from the native regexp code.
4957  __ bind(&success);
4958  __ ldr(r1,
4960  // Calculate number of capture registers (number_of_captures + 1) * 2.
4961  STATIC_ASSERT(kSmiTag == 0);
4963  __ add(r1, r1, Operand(2)); // r1 was a smi.
4964 
4965  // r1: number of capture registers
4966  // r4: subject string
4967  // Store the capture count.
4968  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
4969  __ str(r2, FieldMemOperand(last_match_info_elements,
4971  // Store last subject and last input.
4972  __ str(subject,
4973  FieldMemOperand(last_match_info_elements,
4975  __ mov(r2, subject);
4976  __ RecordWriteField(last_match_info_elements,
4978  r2,
4979  r7,
4981  kDontSaveFPRegs);
4982  __ str(subject,
4983  FieldMemOperand(last_match_info_elements,
4985  __ RecordWriteField(last_match_info_elements,
4987  subject,
4988  r7,
4990  kDontSaveFPRegs);
4991 
4992  // Get the static offsets vector filled by the native regexp code.
4993  ExternalReference address_of_static_offsets_vector =
4994  ExternalReference::address_of_static_offsets_vector(isolate);
4995  __ mov(r2, Operand(address_of_static_offsets_vector));
4996 
4997  // r1: number of capture registers
4998  // r2: offsets vector
4999  Label next_capture, done;
5000  // Capture register counter starts from number of capture registers and
5001  // counts down until wraping after zero.
5002  __ add(r0,
5003  last_match_info_elements,
5005  __ bind(&next_capture);
5006  __ sub(r1, r1, Operand(1), SetCC);
5007  __ b(mi, &done);
5008  // Read the value from the static offsets vector buffer.
5009  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
5010  // Store the smi value in the last match info.
5011  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
5012  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
5013  __ jmp(&next_capture);
5014  __ bind(&done);
5015 
5016  // Return last match info.
5017  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
5018  __ add(sp, sp, Operand(4 * kPointerSize));
5019  __ Ret();
5020 
5021  // External string. Short external strings have already been ruled out.
5022  // r0: scratch
5023  __ bind(&external_string);
5024  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
5026  if (FLAG_debug_code) {
5027  // Assert that we do not have a cons or slice (indirect strings) here.
5028  // Sequential strings have already been ruled out.
5029  __ tst(r0, Operand(kIsIndirectStringMask));
5030  __ Assert(eq, "external string expected, but not found");
5031  }
5032  __ ldr(subject,
5034  // Move the pointer so that offset-wise, it looks like a sequential string.
5036  __ sub(subject,
5037  subject,
5039  __ jmp(&seq_string);
5040 
5041  // Do the runtime call to execute the regexp.
5042  __ bind(&runtime);
5043  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5044 #endif // V8_INTERPRETED_REGEXP
5045 }
5046 
5047 
5048 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5049  const int kMaxInlineLength = 100;
5050  Label slowcase;
5051  Label done;
5052  Factory* factory = masm->isolate()->factory();
5053 
5054  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
5055  STATIC_ASSERT(kSmiTag == 0);
5056  STATIC_ASSERT(kSmiTagSize == 1);
5057  __ JumpIfNotSmi(r1, &slowcase);
5058  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5059  __ b(hi, &slowcase);
5060  // Smi-tagging is equivalent to multiplying by 2.
5061  // Allocate RegExpResult followed by FixedArray with size in ebx.
5062  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5063  // Elements: [Map][Length][..elements..]
5064  // Size of JSArray with two in-object properties and the header of a
5065  // FixedArray.
5066  int objects_size =
5068  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5069  __ add(r2, r5, Operand(objects_size));
5070  __ AllocateInNewSpace(
5071  r2, // In: Size, in words.
5072  r0, // Out: Start of allocation (tagged).
5073  r3, // Scratch register.
5074  r4, // Scratch register.
5075  &slowcase,
5076  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5077  // r0: Start of allocated area, object-tagged.
5078  // r1: Number of elements in array, as smi.
5079  // r5: Number of elements, untagged.
5080 
5081  // Set JSArray map to global.regexp_result_map().
5082  // Set empty properties FixedArray.
5083  // Set elements to point to FixedArray allocated right after the JSArray.
5084  // Interleave operations for better latency.
5086  __ add(r3, r0, Operand(JSRegExpResult::kSize));
5087  __ mov(r4, Operand(factory->empty_fixed_array()));
5093 
5094  // Set input, index and length fields from arguments.
5095  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
5096  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
5097  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
5101 
5102  // Fill out the elements FixedArray.
5103  // r0: JSArray, tagged.
5104  // r3: FixedArray, tagged.
5105  // r5: Number of elements in array, untagged.
5106 
5107  // Set map.
5108  __ mov(r2, Operand(factory->fixed_array_map()));
5110  // Set FixedArray length.
5111  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5113  // Fill contents of fixed-array with the-hole.
5114  __ mov(r2, Operand(factory->the_hole_value()));
5115  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5116  // Fill fixed array elements with hole.
5117  // r0: JSArray, tagged.
5118  // r2: the hole.
5119  // r3: Start of elements in FixedArray.
5120  // r5: Number of elements to fill.
5121  Label loop;
5122  __ cmp(r5, Operand(0));
5123  __ bind(&loop);
5124  __ b(le, &done); // Jump if r5 is negative or zero.
5125  __ sub(r5, r5, Operand(1), SetCC);
5126  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5127  __ jmp(&loop);
5128 
5129  __ bind(&done);
5130  __ add(sp, sp, Operand(3 * kPointerSize));
5131  __ Ret();
5132 
5133  __ bind(&slowcase);
5134  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5135 }
5136 
5137 
5138 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5139  // Cache the called function in a global property cell. Cache states
5140  // are uninitialized, monomorphic (indicated by a JSFunction), and
5141  // megamorphic.
5142  // r1 : the function to call
5143  // r2 : cache cell for call target
5144  Label done;
5145 
5147  masm->isolate()->heap()->undefined_value());
5149  masm->isolate()->heap()->the_hole_value());
5150 
5151  // Load the cache state into r3.
5153 
5154  // A monomorphic cache hit or an already megamorphic state: invoke the
5155  // function without changing the state.
5156  __ cmp(r3, r1);
5157  __ b(eq, &done);
5158  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
5159  __ b(eq, &done);
5160 
5161  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5162  // megamorphic.
5163  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
5164  // MegamorphicSentinel is an immortal immovable object (undefined) so no
5165  // write-barrier is needed.
5166  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
5168 
5169  // An uninitialized cache is patched with the function.
5171  // No need for a write barrier here - cells are rescanned.
5172 
5173  __ bind(&done);
5174 }
5175 
5176 
5177 void CallFunctionStub::Generate(MacroAssembler* masm) {
5178  // r1 : the function to call
5179  // r2 : cache cell for call target
5180  Label slow, non_function;
5181 
5182  // The receiver might implicitly be the global object. This is
5183  // indicated by passing the hole as the receiver to the call
5184  // function stub.
5185  if (ReceiverMightBeImplicit()) {
5186  Label call;
5187  // Get the receiver from the stack.
5188  // function, receiver [, arguments]
5189  __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
5190  // Call as function is indicated with the hole.
5191  __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5192  __ b(ne, &call);
5193  // Patch the receiver on the stack with the global receiver object.
5196  __ str(r3, MemOperand(sp, argc_ * kPointerSize));
5197  __ bind(&call);
5198  }
5199 
5200  // Check that the function is really a JavaScript function.
5201  // r1: pushed function (to be verified)
5202  __ JumpIfSmi(r1, &non_function);
5203  // Get the map of the function object.
5204  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
5205  __ b(ne, &slow);
5206 
5207  if (RecordCallTarget()) {
5208  GenerateRecordCallTarget(masm);
5209  }
5210 
5211  // Fast-case: Invoke the function now.
5212  // r1: pushed function
5213  ParameterCount actual(argc_);
5214 
5215  if (ReceiverMightBeImplicit()) {
5216  Label call_as_function;
5217  __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5218  __ b(eq, &call_as_function);
5219  __ InvokeFunction(r1,
5220  actual,
5221  JUMP_FUNCTION,
5222  NullCallWrapper(),
5223  CALL_AS_METHOD);
5224  __ bind(&call_as_function);
5225  }
5226  __ InvokeFunction(r1,
5227  actual,
5228  JUMP_FUNCTION,
5229  NullCallWrapper(),
5231 
5232  // Slow-case: Non-function called.
5233  __ bind(&slow);
5234  if (RecordCallTarget()) {
5235  // If there is a call target cache, mark it megamorphic in the
5236  // non-function case. MegamorphicSentinel is an immortal immovable
5237  // object (undefined) so no write barrier is needed.
5239  masm->isolate()->heap()->undefined_value());
5240  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5242  }
5243  // Check for function proxy.
5244  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5245  __ b(ne, &non_function);
5246  __ push(r1); // put proxy as additional argument
5247  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
5248  __ mov(r2, Operand(0, RelocInfo::NONE));
5249  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5250  __ SetCallKind(r5, CALL_AS_METHOD);
5251  {
5252  Handle<Code> adaptor =
5253  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5254  __ Jump(adaptor, RelocInfo::CODE_TARGET);
5255  }
5256 
5257  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5258  // of the original receiver from the call site).
5259  __ bind(&non_function);
5260  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5261  __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5262  __ mov(r2, Operand(0, RelocInfo::NONE));
5263  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5264  __ SetCallKind(r5, CALL_AS_METHOD);
5265  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5266  RelocInfo::CODE_TARGET);
5267 }
5268 
5269 
5270 void CallConstructStub::Generate(MacroAssembler* masm) {
5271  // r0 : number of arguments
5272  // r1 : the function to call
5273  // r2 : cache cell for call target
5274  Label slow, non_function_call;
5275 
5276  // Check that the function is not a smi.
5277  __ JumpIfSmi(r1, &non_function_call);
5278  // Check that the function is a JSFunction.
5279  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
5280  __ b(ne, &slow);
5281 
5282  if (RecordCallTarget()) {
5283  GenerateRecordCallTarget(masm);
5284  }
5285 
5286  // Jump to the function-specific construct stub.
5289  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
5290 
5291  // r0: number of arguments
5292  // r1: called object
5293  // r3: object type
5294  Label do_call;
5295  __ bind(&slow);
5296  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5297  __ b(ne, &non_function_call);
5298  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5299  __ jmp(&do_call);
5300 
5301  __ bind(&non_function_call);
5302  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5303  __ bind(&do_call);
5304  // Set expected number of arguments to zero (not changing r0).
5305  __ mov(r2, Operand(0, RelocInfo::NONE));
5306  __ SetCallKind(r5, CALL_AS_METHOD);
5307  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5308  RelocInfo::CODE_TARGET);
5309 }
5310 
5311 
5312 // Unfortunately you have to run without snapshots to see most of these
5313 // names in the profile since most compare stubs end up in the snapshot.
5314 void CompareStub::PrintName(StringStream* stream) {
5315  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5316  (lhs_.is(r1) && rhs_.is(r0)));
5317  const char* cc_name;
5318  switch (cc_) {
5319  case lt: cc_name = "LT"; break;
5320  case gt: cc_name = "GT"; break;
5321  case le: cc_name = "LE"; break;
5322  case ge: cc_name = "GE"; break;
5323  case eq: cc_name = "EQ"; break;
5324  case ne: cc_name = "NE"; break;
5325  default: cc_name = "UnknownCondition"; break;
5326  }
5327  bool is_equality = cc_ == eq || cc_ == ne;
5328  stream->Add("CompareStub_%s", cc_name);
5329  stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
5330  stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
5331  if (strict_ && is_equality) stream->Add("_STRICT");
5332  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5333  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5334  if (!include_smi_compare_) stream->Add("_NO_SMI");
5335 }
5336 
5337 
5338 int CompareStub::MinorKey() {
5339  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5340  // stubs the never NaN NaN condition is only taken into account if the
5341  // condition is equals.
5342  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5343  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5344  (lhs_.is(r1) && rhs_.is(r0)));
5345  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5346  | RegisterField::encode(lhs_.is(r0))
5347  | StrictField::encode(strict_)
5348  | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5349  | IncludeNumberCompareField::encode(include_number_compare_)
5350  | IncludeSmiCompareField::encode(include_smi_compare_);
5351 }
5352 
5353 
5354 // StringCharCodeAtGenerator
5355 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5356  Label flat_string;
5357  Label ascii_string;
5358  Label got_char_code;
5359  Label sliced_string;
5360 
5361  // If the receiver is a smi trigger the non-string case.
5362  __ JumpIfSmi(object_, receiver_not_string_);
5363 
5364  // Fetch the instance type of the receiver into result register.
5365  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5366  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5367  // If the receiver is not a string trigger the non-string case.
5368  __ tst(result_, Operand(kIsNotStringMask));
5369  __ b(ne, receiver_not_string_);
5370 
5371  // If the index is non-smi trigger the non-smi case.
5372  __ JumpIfNotSmi(index_, &index_not_smi_);
5373  __ bind(&got_smi_index_);
5374 
5375  // Check for index out of range.
5376  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
5377  __ cmp(ip, Operand(index_));
5378  __ b(ls, index_out_of_range_);
5379 
5380  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
5381 
5383  object_,
5384  index_,
5385  result_,
5386  &call_runtime_);
5387 
5388  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
5389  __ bind(&exit_);
5390 }
5391 
5392 
5394  MacroAssembler* masm,
5395  const RuntimeCallHelper& call_helper) {
5396  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5397 
5398  // Index is not a smi.
5399  __ bind(&index_not_smi_);
5400  // If index is a heap number, try converting it to an integer.
5401  __ CheckMap(index_,
5402  result_,
5403  Heap::kHeapNumberMapRootIndex,
5404  index_not_number_,
5406  call_helper.BeforeCall(masm);
5407  __ push(object_);
5408  __ push(index_); // Consumed by runtime conversion function.
5409  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5410  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5411  } else {
5412  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5413  // NumberToSmi discards numbers that are not exact integers.
5414  __ CallRuntime(Runtime::kNumberToSmi, 1);
5415  }
5416  // Save the conversion result before the pop instructions below
5417  // have a chance to overwrite it.
5418  __ Move(index_, r0);
5419  __ pop(object_);
5420  // Reload the instance type.
5421  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5422  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5423  call_helper.AfterCall(masm);
5424  // If index is still not a smi, it must be out of range.
5425  __ JumpIfNotSmi(index_, index_out_of_range_);
5426  // Otherwise, return to the fast path.
5427  __ jmp(&got_smi_index_);
5428 
5429  // Call runtime. We get here when the receiver is a string and the
5430  // index is a number, but the code of getting the actual character
5431  // is too complex (e.g., when the string needs to be flattened).
5432  __ bind(&call_runtime_);
5433  call_helper.BeforeCall(masm);
5434  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
5435  __ Push(object_, index_);
5436  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5437  __ Move(result_, r0);
5438  call_helper.AfterCall(masm);
5439  __ jmp(&exit_);
5440 
5441  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5442 }
5443 
5444 
5445 // -------------------------------------------------------------------------
5446 // StringCharFromCodeGenerator
5447 
5448 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5449  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5450  STATIC_ASSERT(kSmiTag == 0);
5453  __ tst(code_,
5454  Operand(kSmiTagMask |
5456  __ b(ne, &slow_case_);
5457 
5458  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5459  // At this point code register contains smi tagged ASCII char code.
5460  STATIC_ASSERT(kSmiTag == 0);
5461  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
5462  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5463  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
5464  __ b(eq, &slow_case_);
5465  __ bind(&exit_);
5466 }
5467 
5468 
5470  MacroAssembler* masm,
5471  const RuntimeCallHelper& call_helper) {
5472  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5473 
5474  __ bind(&slow_case_);
5475  call_helper.BeforeCall(masm);
5476  __ push(code_);
5477  __ CallRuntime(Runtime::kCharFromCode, 1);
5478  __ Move(result_, r0);
5479  call_helper.AfterCall(masm);
5480  __ jmp(&exit_);
5481 
5482  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5483 }
5484 
5485 
5486 // -------------------------------------------------------------------------
5487 // StringCharAtGenerator
5488 
5489 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5490  char_code_at_generator_.GenerateFast(masm);
5491  char_from_code_generator_.GenerateFast(masm);
5492 }
5493 
5494 
5496  MacroAssembler* masm,
5497  const RuntimeCallHelper& call_helper) {
5498  char_code_at_generator_.GenerateSlow(masm, call_helper);
5499  char_from_code_generator_.GenerateSlow(masm, call_helper);
5500 }
5501 
5502 
5503 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5504  Register dest,
5505  Register src,
5506  Register count,
5507  Register scratch,
5508  bool ascii) {
5509  Label loop;
5510  Label done;
5511  // This loop just copies one character at a time, as it is only used for very
5512  // short strings.
5513  if (!ascii) {
5514  __ add(count, count, Operand(count), SetCC);
5515  } else {
5516  __ cmp(count, Operand(0, RelocInfo::NONE));
5517  }
5518  __ b(eq, &done);
5519 
5520  __ bind(&loop);
5521  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5522  // Perform sub between load and dependent store to get the load time to
5523  // complete.
5524  __ sub(count, count, Operand(1), SetCC);
5525  __ strb(scratch, MemOperand(dest, 1, PostIndex));
5526  // last iteration.
5527  __ b(gt, &loop);
5528 
5529  __ bind(&done);
5530 }
5531 
5532 
5533 enum CopyCharactersFlags {
5534  COPY_ASCII = 1,
5535  DEST_ALWAYS_ALIGNED = 2
5536 };
5537 
5538 
5539 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5540  Register dest,
5541  Register src,
5542  Register count,
5543  Register scratch1,
5544  Register scratch2,
5545  Register scratch3,
5546  Register scratch4,
5547  Register scratch5,
5548  int flags) {
5549  bool ascii = (flags & COPY_ASCII) != 0;
5550  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5551 
5552  if (dest_always_aligned && FLAG_debug_code) {
5553  // Check that destination is actually word aligned if the flag says
5554  // that it is.
5555  __ tst(dest, Operand(kPointerAlignmentMask));
5556  __ Check(eq, "Destination of copy not aligned.");
5557  }
5558 
5559  const int kReadAlignment = 4;
5560  const int kReadAlignmentMask = kReadAlignment - 1;
5561  // Ensure that reading an entire aligned word containing the last character
5562  // of a string will not read outside the allocated area (because we pad up
5563  // to kObjectAlignment).
5564  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5565  // Assumes word reads and writes are little endian.
5566  // Nothing to do for zero characters.
5567  Label done;
5568  if (!ascii) {
5569  __ add(count, count, Operand(count), SetCC);
5570  } else {
5571  __ cmp(count, Operand(0, RelocInfo::NONE));
5572  }
5573  __ b(eq, &done);
5574 
5575  // Assume that you cannot read (or write) unaligned.
5576  Label byte_loop;
5577  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5578  __ cmp(count, Operand(8));
5579  __ add(count, dest, Operand(count));
5580  Register limit = count; // Read until src equals this.
5581  __ b(lt, &byte_loop);
5582 
5583  if (!dest_always_aligned) {
5584  // Align dest by byte copying. Copies between zero and three bytes.
5585  __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
5586  Label dest_aligned;
5587  __ b(eq, &dest_aligned);
5588  __ cmp(scratch4, Operand(2));
5589  __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
5590  __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
5591  __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
5592  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5593  __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
5594  __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
5595  __ bind(&dest_aligned);
5596  }
5597 
5598  Label simple_loop;
5599 
5600  __ sub(scratch4, dest, Operand(src));
5601  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
5602  __ b(eq, &simple_loop);
5603  // Shift register is number of bits in a source word that
5604  // must be combined with bits in the next source word in order
5605  // to create a destination word.
5606 
5607  // Complex loop for src/dst that are not aligned the same way.
5608  {
5609  Label loop;
5610  __ mov(scratch4, Operand(scratch4, LSL, 3));
5611  Register left_shift = scratch4;
5612  __ and_(src, src, Operand(~3)); // Round down to load previous word.
5613  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5614  // Store the "shift" most significant bits of scratch in the least
5615  // signficant bits (i.e., shift down by (32-shift)).
5616  __ rsb(scratch2, left_shift, Operand(32));
5617  Register right_shift = scratch2;
5618  __ mov(scratch1, Operand(scratch1, LSR, right_shift));
5619 
5620  __ bind(&loop);
5621  __ ldr(scratch3, MemOperand(src, 4, PostIndex));
5622  __ sub(scratch5, limit, Operand(dest));
5623  __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
5624  __ str(scratch1, MemOperand(dest, 4, PostIndex));
5625  __ mov(scratch1, Operand(scratch3, LSR, right_shift));
5626  // Loop if four or more bytes left to copy.
5627  // Compare to eight, because we did the subtract before increasing dst.
5628  __ sub(scratch5, scratch5, Operand(8), SetCC);
5629  __ b(ge, &loop);
5630  }
5631  // There is now between zero and three bytes left to copy (negative that
5632  // number is in scratch5), and between one and three bytes already read into
5633  // scratch1 (eight times that number in scratch4). We may have read past
5634  // the end of the string, but because objects are aligned, we have not read
5635  // past the end of the object.
5636  // Find the minimum of remaining characters to move and preloaded characters
5637  // and write those as bytes.
5638  __ add(scratch5, scratch5, Operand(4), SetCC);
5639  __ b(eq, &done);
5640  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
5641  // Move minimum of bytes read and bytes left to copy to scratch4.
5642  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
5643  // Between one and three (value in scratch5) characters already read into
5644  // scratch ready to write.
5645  __ cmp(scratch5, Operand(2));
5646  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5647  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
5648  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
5649  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
5650  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
5651  // Copy any remaining bytes.
5652  __ b(&byte_loop);
5653 
5654  // Simple loop.
5655  // Copy words from src to dst, until less than four bytes left.
5656  // Both src and dest are word aligned.
5657  __ bind(&simple_loop);
5658  {
5659  Label loop;
5660  __ bind(&loop);
5661  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5662  __ sub(scratch3, limit, Operand(dest));
5663  __ str(scratch1, MemOperand(dest, 4, PostIndex));
5664  // Compare to 8, not 4, because we do the substraction before increasing
5665  // dest.
5666  __ cmp(scratch3, Operand(8));
5667  __ b(ge, &loop);
5668  }
5669 
5670  // Copy bytes from src to dst until dst hits limit.
5671  __ bind(&byte_loop);
5672  __ cmp(dest, Operand(limit));
5673  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
5674  __ b(ge, &done);
5675  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5676  __ b(&byte_loop);
5677 
5678  __ bind(&done);
5679 }
5680 
5681 
5682 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5683  Register c1,
5684  Register c2,
5685  Register scratch1,
5686  Register scratch2,
5687  Register scratch3,
5688  Register scratch4,
5689  Register scratch5,
5690  Label* not_found) {
5691  // Register scratch3 is the general scratch register in this function.
5692  Register scratch = scratch3;
5693 
5694  // Make sure that both characters are not digits as such strings has a
5695  // different hash algorithm. Don't try to look for these in the symbol table.
5696  Label not_array_index;
5697  __ sub(scratch, c1, Operand(static_cast<int>('0')));
5698  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5699  __ b(hi, &not_array_index);
5700  __ sub(scratch, c2, Operand(static_cast<int>('0')));
5701  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5702 
5703  // If check failed combine both characters into single halfword.
5704  // This is required by the contract of the method: code at the
5705  // not_found branch expects this combination in c1 register
5706  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
5707  __ b(ls, not_found);
5708 
5709  __ bind(&not_array_index);
5710  // Calculate the two character string hash.
5711  Register hash = scratch1;
5712  StringHelper::GenerateHashInit(masm, hash, c1);
5715 
5716  // Collect the two characters in a register.
5717  Register chars = c1;
5718  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
5719 
5720  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5721  // hash: hash of two character string.
5722 
5723  // Load symbol table
5724  // Load address of first element of the symbol table.
5725  Register symbol_table = c2;
5726  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5727 
5728  Register undefined = scratch4;
5729  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5730 
5731  // Calculate capacity mask from the symbol table capacity.
5732  Register mask = scratch2;
5733  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5734  __ mov(mask, Operand(mask, ASR, 1));
5735  __ sub(mask, mask, Operand(1));
5736 
5737  // Calculate untagged address of the first element of the symbol table.
5738  Register first_symbol_table_element = symbol_table;
5739  __ add(first_symbol_table_element, symbol_table,
5741 
5742  // Registers
5743  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5744  // hash: hash of two character string
5745  // mask: capacity mask
5746  // first_symbol_table_element: address of the first element of
5747  // the symbol table
5748  // undefined: the undefined object
5749  // scratch: -
5750 
5751  // Perform a number of probes in the symbol table.
5752  const int kProbes = 4;
5753  Label found_in_symbol_table;
5754  Label next_probe[kProbes];
5755  Register candidate = scratch5; // Scratch register contains candidate.
5756  for (int i = 0; i < kProbes; i++) {
5757  // Calculate entry in symbol table.
5758  if (i > 0) {
5759  __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5760  } else {
5761  __ mov(candidate, hash);
5762  }
5763 
5764  __ and_(candidate, candidate, Operand(mask));
5765 
5766  // Load the entry from the symble table.
5768  __ ldr(candidate,
5769  MemOperand(first_symbol_table_element,
5770  candidate,
5771  LSL,
5772  kPointerSizeLog2));
5773 
5774  // If entry is undefined no string with this hash can be found.
5775  Label is_string;
5776  __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
5777  __ b(ne, &is_string);
5778 
5779  __ cmp(undefined, candidate);
5780  __ b(eq, not_found);
5781  // Must be the hole (deleted entry).
5782  if (FLAG_debug_code) {
5783  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
5784  __ cmp(ip, candidate);
5785  __ Assert(eq, "oddball in symbol table is not undefined or the hole");
5786  }
5787  __ jmp(&next_probe[i]);
5788 
5789  __ bind(&is_string);
5790 
5791  // Check that the candidate is a non-external ASCII string. The instance
5792  // type is still in the scratch register from the CompareObjectType
5793  // operation.
5794  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5795 
5796  // If length is not 2 the string is not a candidate.
5797  __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5798  __ cmp(scratch, Operand(Smi::FromInt(2)));
5799  __ b(ne, &next_probe[i]);
5800 
5801  // Check if the two characters match.
5802  // Assumes that word load is little endian.
5803  __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5804  __ cmp(chars, scratch);
5805  __ b(eq, &found_in_symbol_table);
5806  __ bind(&next_probe[i]);
5807  }
5808 
5809  // No matching 2 character string found by probing.
5810  __ jmp(not_found);
5811 
5812  // Scratch register contains result when we fall through to here.
5813  Register result = candidate;
5814  __ bind(&found_in_symbol_table);
5815  __ Move(r0, result);
5816 }
5817 
5818 
5819 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5820  Register hash,
5821  Register character) {
5822  // hash = character + (character << 10);
5823  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5824  // Untag smi seed and add the character.
5825  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
5826  // hash += hash << 10;
5827  __ add(hash, hash, Operand(hash, LSL, 10));
5828  // hash ^= hash >> 6;
5829  __ eor(hash, hash, Operand(hash, LSR, 6));
5830 }
5831 
5832 
5833 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5834  Register hash,
5835  Register character) {
5836  // hash += character;
5837  __ add(hash, hash, Operand(character));
5838  // hash += hash << 10;
5839  __ add(hash, hash, Operand(hash, LSL, 10));
5840  // hash ^= hash >> 6;
5841  __ eor(hash, hash, Operand(hash, LSR, 6));
5842 }
5843 
5844 
5845 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5846  Register hash) {
5847  // hash += hash << 3;
5848  __ add(hash, hash, Operand(hash, LSL, 3));
5849  // hash ^= hash >> 11;
5850  __ eor(hash, hash, Operand(hash, LSR, 11));
5851  // hash += hash << 15;
5852  __ add(hash, hash, Operand(hash, LSL, 15));
5853 
5854  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
5855 
5856  // if (hash == 0) hash = 27;
5857  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
5858 }
5859 
5860 
5861 void SubStringStub::Generate(MacroAssembler* masm) {
5862  Label runtime;
5863 
5864  // Stack frame on entry.
5865  // lr: return address
5866  // sp[0]: to
5867  // sp[4]: from
5868  // sp[8]: string
5869 
5870  // This stub is called from the native-call %_SubString(...), so
5871  // nothing can be assumed about the arguments. It is tested that:
5872  // "string" is a sequential string,
5873  // both "from" and "to" are smis, and
5874  // 0 <= from <= to <= string.length.
5875  // If any of these assumptions fail, we call the runtime system.
5876 
5877  const int kToOffset = 0 * kPointerSize;
5878  const int kFromOffset = 1 * kPointerSize;
5879  const int kStringOffset = 2 * kPointerSize;
5880 
5881  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
5882  STATIC_ASSERT(kFromOffset == kToOffset + 4);
5883  STATIC_ASSERT(kSmiTag == 0);
5885 
5886  // I.e., arithmetic shift right by one un-smi-tags.
5887  __ mov(r2, Operand(r2, ASR, 1), SetCC);
5888  __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
5889  // If either to or from had the smi tag bit set, then carry is set now.
5890  __ b(cs, &runtime); // Either "from" or "to" is not a smi.
5891  // We want to bailout to runtime here if From is negative. In that case, the
5892  // next instruction is not executed and we fall through to bailing out to
5893  // runtime. pl is the opposite of mi.
5894  // Both r2 and r3 are untagged integers.
5895  __ sub(r2, r2, Operand(r3), SetCC, pl);
5896  __ b(mi, &runtime); // Fail if from > to.
5897 
5898  // Make sure first argument is a string.
5899  __ ldr(r0, MemOperand(sp, kStringOffset));
5900  STATIC_ASSERT(kSmiTag == 0);
5901  __ JumpIfSmi(r0, &runtime);
5902  Condition is_string = masm->IsObjectStringType(r0, r1);
5903  __ b(NegateCondition(is_string), &runtime);
5904 
5905  // Short-cut for the case of trivial substring.
5906  Label return_r0;
5907  // r0: original string
5908  // r2: result string length
5910  __ cmp(r2, Operand(r4, ASR, 1));
5911  // Return original string.
5912  __ b(eq, &return_r0);
5913  // Longer than original string's length or negative: unsafe arguments.
5914  __ b(hi, &runtime);
5915  // Shorter than original string's length: an actual substring.
5916 
5917  // Deal with different string types: update the index if necessary
5918  // and put the underlying string into r5.
5919  // r0: original string
5920  // r1: instance type
5921  // r2: length
5922  // r3: from index (untagged)
5923  Label underlying_unpacked, sliced_string, seq_or_external_string;
5924  // If the string is not indirect, it can only be sequential or external.
5927  __ tst(r1, Operand(kIsIndirectStringMask));
5928  __ b(eq, &seq_or_external_string);
5929 
5930  __ tst(r1, Operand(kSlicedNotConsMask));
5931  __ b(ne, &sliced_string);
5932  // Cons string. Check whether it is flat, then fetch first part.
5934  __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
5935  __ b(ne, &runtime);
5937  // Update instance type.
5940  __ jmp(&underlying_unpacked);
5941 
5942  __ bind(&sliced_string);
5943  // Sliced string. Fetch parent and correct start index by offset.
5946  __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
5947  // Update instance type.
5950  __ jmp(&underlying_unpacked);
5951 
5952  __ bind(&seq_or_external_string);
5953  // Sequential or external string. Just move string to the expected register.
5954  __ mov(r5, r0);
5955 
5956  __ bind(&underlying_unpacked);
5957 
5958  if (FLAG_string_slices) {
5959  Label copy_routine;
5960  // r5: underlying subject string
5961  // r1: instance type of underlying subject string
5962  // r2: length
5963  // r3: adjusted start index (untagged)
5964  __ cmp(r2, Operand(SlicedString::kMinLength));
5965  // Short slice. Copy instead of slicing.
5966  __ b(lt, &copy_routine);
5967  // Allocate new sliced string. At this point we do not reload the instance
5968  // type including the string encoding because we simply rely on the info
5969  // provided by the original string. It does not matter if the original
5970  // string's encoding is wrong because we always have to recheck encoding of
5971  // the newly created string's parent anyways due to externalized strings.
5972  Label two_byte_slice, set_slice_header;
5975  __ tst(r1, Operand(kStringEncodingMask));
5976  __ b(eq, &two_byte_slice);
5977  __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
5978  __ jmp(&set_slice_header);
5979  __ bind(&two_byte_slice);
5980  __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
5981  __ bind(&set_slice_header);
5982  __ mov(r3, Operand(r3, LSL, 1));
5985  __ jmp(&return_r0);
5986 
5987  __ bind(&copy_routine);
5988  }
5989 
5990  // r5: underlying subject string
5991  // r1: instance type of underlying subject string
5992  // r2: length
5993  // r3: adjusted start index (untagged)
5994  Label two_byte_sequential, sequential_string, allocate_result;
5997  __ tst(r1, Operand(kExternalStringTag));
5998  __ b(eq, &sequential_string);
5999 
6000  // Handle external string.
6001  // Rule out short external strings.
6003  __ tst(r1, Operand(kShortExternalStringTag));
6004  __ b(ne, &runtime);
6006  // r5 already points to the first character of underlying string.
6007  __ jmp(&allocate_result);
6008 
6009  __ bind(&sequential_string);
6010  // Locate first character of underlying subject string.
6013 
6014  __ bind(&allocate_result);
6015  // Sequential acii string. Allocate the result.
6017  __ tst(r1, Operand(kStringEncodingMask));
6018  __ b(eq, &two_byte_sequential);
6019 
6020  // Allocate and copy the resulting ASCII string.
6021  __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
6022 
6023  // Locate first character of substring to copy.
6024  __ add(r5, r5, r3);
6025  // Locate first character of result.
6027 
6028  // r0: result string
6029  // r1: first character of result string
6030  // r2: result string length
6031  // r5: first character of substring to copy
6034  COPY_ASCII | DEST_ALWAYS_ALIGNED);
6035  __ jmp(&return_r0);
6036 
6037  // Allocate and copy the resulting two-byte string.
6038  __ bind(&two_byte_sequential);
6039  __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
6040 
6041  // Locate first character of substring to copy.
6042  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6043  __ add(r5, r5, Operand(r3, LSL, 1));
6044  // Locate first character of result.
6046 
6047  // r0: result string.
6048  // r1: first character of result.
6049  // r2: result length.
6050  // r5: first character of substring to copy.
6053  masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
6054 
6055  __ bind(&return_r0);
6056  Counters* counters = masm->isolate()->counters();
6057  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
6058  __ add(sp, sp, Operand(3 * kPointerSize));
6059  __ Ret();
6060 
6061  // Just jump to runtime to create the sub string.
6062  __ bind(&runtime);
6063  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6064 }
6065 
6066 
6067 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6068  Register left,
6069  Register right,
6070  Register scratch1,
6071  Register scratch2,
6072  Register scratch3) {
6073  Register length = scratch1;
6074 
6075  // Compare lengths.
6076  Label strings_not_equal, check_zero_length;
6077  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
6078  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6079  __ cmp(length, scratch2);
6080  __ b(eq, &check_zero_length);
6081  __ bind(&strings_not_equal);
6082  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6083  __ Ret();
6084 
6085  // Check if the length is zero.
6086  Label compare_chars;
6087  __ bind(&check_zero_length);
6088  STATIC_ASSERT(kSmiTag == 0);
6089  __ cmp(length, Operand(0));
6090  __ b(ne, &compare_chars);
6091  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6092  __ Ret();
6093 
6094  // Compare characters.
6095  __ bind(&compare_chars);
6096  GenerateAsciiCharsCompareLoop(masm,
6097  left, right, length, scratch2, scratch3,
6098  &strings_not_equal);
6099 
6100  // Characters are equal.
6101  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6102  __ Ret();
6103 }
6104 
6105 
6106 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6107  Register left,
6108  Register right,
6109  Register scratch1,
6110  Register scratch2,
6111  Register scratch3,
6112  Register scratch4) {
6113  Label result_not_equal, compare_lengths;
6114  // Find minimum length and length difference.
6115  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6116  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6117  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6118  Register length_delta = scratch3;
6119  __ mov(scratch1, scratch2, LeaveCC, gt);
6120  Register min_length = scratch1;
6121  STATIC_ASSERT(kSmiTag == 0);
6122  __ cmp(min_length, Operand(0));
6123  __ b(eq, &compare_lengths);
6124 
6125  // Compare loop.
6126  GenerateAsciiCharsCompareLoop(masm,
6127  left, right, min_length, scratch2, scratch4,
6128  &result_not_equal);
6129 
6130  // Compare lengths - strings up to min-length are equal.
6131  __ bind(&compare_lengths);
6132  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6133  // Use length_delta as result if it's zero.
6134  __ mov(r0, Operand(length_delta), SetCC);
6135  __ bind(&result_not_equal);
6136  // Conditionally update the result based either on length_delta or
6137  // the last comparion performed in the loop above.
6138  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6139  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6140  __ Ret();
6141 }
6142 
6143 
6144 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6145  MacroAssembler* masm,
6146  Register left,
6147  Register right,
6148  Register length,
6149  Register scratch1,
6150  Register scratch2,
6151  Label* chars_not_equal) {
6152  // Change index to run from -length to -1 by adding length to string
6153  // start. This means that loop ends when index reaches zero, which
6154  // doesn't need an additional compare.
6155  __ SmiUntag(length);
6156  __ add(scratch1, length,
6158  __ add(left, left, Operand(scratch1));
6159  __ add(right, right, Operand(scratch1));
6160  __ rsb(length, length, Operand::Zero());
6161  Register index = length; // index = -length;
6162 
6163  // Compare loop.
6164  Label loop;
6165  __ bind(&loop);
6166  __ ldrb(scratch1, MemOperand(left, index));
6167  __ ldrb(scratch2, MemOperand(right, index));
6168  __ cmp(scratch1, scratch2);
6169  __ b(ne, chars_not_equal);
6170  __ add(index, index, Operand(1), SetCC);
6171  __ b(ne, &loop);
6172 }
6173 
6174 
6175 void StringCompareStub::Generate(MacroAssembler* masm) {
6176  Label runtime;
6177 
6178  Counters* counters = masm->isolate()->counters();
6179 
6180  // Stack frame on entry.
6181  // sp[0]: right string
6182  // sp[4]: left string
6183  __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
6184 
6185  Label not_same;
6186  __ cmp(r0, r1);
6187  __ b(ne, &not_same);
6188  STATIC_ASSERT(EQUAL == 0);
6189  STATIC_ASSERT(kSmiTag == 0);
6190  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6191  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
6192  __ add(sp, sp, Operand(2 * kPointerSize));
6193  __ Ret();
6194 
6195  __ bind(&not_same);
6196 
6197  // Check that both objects are sequential ASCII strings.
6198  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
6199 
6200  // Compare flat ASCII strings natively. Remove arguments from stack first.
6201  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
6202  __ add(sp, sp, Operand(2 * kPointerSize));
6204 
6205  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6206  // tagged as a small integer.
6207  __ bind(&runtime);
6208  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6209 }
6210 
6211 
6212 void StringAddStub::Generate(MacroAssembler* masm) {
6213  Label call_runtime, call_builtin;
6214  Builtins::JavaScript builtin_id = Builtins::ADD;
6215 
6216  Counters* counters = masm->isolate()->counters();
6217 
6218  // Stack on entry:
6219  // sp[0]: second argument (right).
6220  // sp[4]: first argument (left).
6221 
6222  // Load the two arguments.
6223  __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6224  __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6225 
6226  // Make sure that both arguments are strings if not known in advance.
6227  if (flags_ == NO_STRING_ADD_FLAGS) {
6228  __ JumpIfEitherSmi(r0, r1, &call_runtime);
6229  // Load instance types.
6234  STATIC_ASSERT(kStringTag == 0);
6235  // If either is not a string, go to runtime.
6236  __ tst(r4, Operand(kIsNotStringMask));
6237  __ tst(r5, Operand(kIsNotStringMask), eq);
6238  __ b(ne, &call_runtime);
6239  } else {
6240  // Here at least one of the arguments is definitely a string.
6241  // We convert the one that is not known to be a string.
6242  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6243  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6244  GenerateConvertArgument(
6245  masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6246  builtin_id = Builtins::STRING_ADD_RIGHT;
6247  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6248  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6249  GenerateConvertArgument(
6250  masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6251  builtin_id = Builtins::STRING_ADD_LEFT;
6252  }
6253  }
6254 
6255  // Both arguments are strings.
6256  // r0: first string
6257  // r1: second string
6258  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6259  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6260  {
6261  Label strings_not_empty;
6262  // Check if either of the strings are empty. In that case return the other.
6265  STATIC_ASSERT(kSmiTag == 0);
6266  __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
6267  __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
6268  STATIC_ASSERT(kSmiTag == 0);
6269  // Else test if second string is empty.
6270  __ cmp(r3, Operand(Smi::FromInt(0)), ne);
6271  __ b(ne, &strings_not_empty); // If either string was empty, return r0.
6272 
6273  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6274  __ add(sp, sp, Operand(2 * kPointerSize));
6275  __ Ret();
6276 
6277  __ bind(&strings_not_empty);
6278  }
6279 
6280  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
6281  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
6282  // Both strings are non-empty.
6283  // r0: first string
6284  // r1: second string
6285  // r2: length of first string
6286  // r3: length of second string
6287  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6288  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6289  // Look at the length of the result of adding the two strings.
6290  Label string_add_flat_result, longer_than_two;
6291  // Adding two lengths can't overflow.
6293  __ add(r6, r2, Operand(r3));
6294  // Use the symbol table when adding two one character strings, as it
6295  // helps later optimizations to return a symbol here.
6296  __ cmp(r6, Operand(2));
6297  __ b(ne, &longer_than_two);
6298 
6299  // Check that both strings are non-external ASCII strings.
6300  if (flags_ != NO_STRING_ADD_FLAGS) {
6305  }
6306  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
6307  &call_runtime);
6308 
6309  // Get the two characters forming the sub string.
6312 
6313  // Try to lookup two character string in symbol table. If it is not found
6314  // just allocate a new one.
6315  Label make_two_character_string;
6317  masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
6318  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6319  __ add(sp, sp, Operand(2 * kPointerSize));
6320  __ Ret();
6321 
6322  __ bind(&make_two_character_string);
6323  // Resulting string has length 2 and first chars of two strings
6324  // are combined into single halfword in r2 register.
6325  // So we can fill resulting string without two loops by a single
6326  // halfword store instruction (which assumes that processor is
6327  // in a little endian mode)
6328  __ mov(r6, Operand(2));
6329  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6331  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6332  __ add(sp, sp, Operand(2 * kPointerSize));
6333  __ Ret();
6334 
6335  __ bind(&longer_than_two);
6336  // Check if resulting string will be flat.
6337  __ cmp(r6, Operand(ConsString::kMinLength));
6338  __ b(lt, &string_add_flat_result);
6339  // Handle exceptionally long strings in the runtime system.
6340  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6342  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6343  __ cmp(r6, Operand(String::kMaxLength + 1));
6344  __ b(hs, &call_runtime);
6345 
6346  // If result is not supposed to be flat, allocate a cons string object.
6347  // If both strings are ASCII the result is an ASCII cons string.
6348  if (flags_ != NO_STRING_ADD_FLAGS) {
6353  }
6354  Label non_ascii, allocated, ascii_data;
6356  __ tst(r4, Operand(kStringEncodingMask));
6357  __ tst(r5, Operand(kStringEncodingMask), ne);
6358  __ b(eq, &non_ascii);
6359 
6360  // Allocate an ASCII cons string.
6361  __ bind(&ascii_data);
6362  __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
6363  __ bind(&allocated);
6364  // Fill the fields of the cons string.
6367  __ mov(r0, Operand(r7));
6368  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6369  __ add(sp, sp, Operand(2 * kPointerSize));
6370  __ Ret();
6371 
6372  __ bind(&non_ascii);
6373  // At least one of the strings is two-byte. Check whether it happens
6374  // to contain only ASCII characters.
6375  // r4: first instance type.
6376  // r5: second instance type.
6377  __ tst(r4, Operand(kAsciiDataHintMask));
6378  __ tst(r5, Operand(kAsciiDataHintMask), ne);
6379  __ b(ne, &ascii_data);
6380  __ eor(r4, r4, Operand(r5));
6382  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6383  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6384  __ b(eq, &ascii_data);
6385 
6386  // Allocate a two byte cons string.
6387  __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
6388  __ jmp(&allocated);
6389 
6390  // We cannot encounter sliced strings or cons strings here since:
6392  // Handle creating a flat result from either external or sequential strings.
6393  // Locate the first characters' locations.
6394  // r0: first string
6395  // r1: second string
6396  // r2: length of first string
6397  // r3: length of second string
6398  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6399  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6400  // r6: sum of lengths.
6401  Label first_prepared, second_prepared;
6402  __ bind(&string_add_flat_result);
6403  if (flags_ != NO_STRING_ADD_FLAGS) {
6408  }
6409 
6410  // Check whether both strings have same encoding
6411  __ eor(r7, r4, Operand(r5));
6412  __ tst(r7, Operand(kStringEncodingMask));
6413  __ b(ne, &call_runtime);
6414 
6416  __ tst(r4, Operand(kStringRepresentationMask));
6418  __ add(r7,
6419  r0,
6421  LeaveCC,
6422  eq);
6423  __ b(eq, &first_prepared);
6424  // External string: rule out short external string and load string resource.
6426  __ tst(r4, Operand(kShortExternalStringMask));
6427  __ b(ne, &call_runtime);
6429  __ bind(&first_prepared);
6430 
6432  __ tst(r5, Operand(kStringRepresentationMask));
6434  __ add(r1,
6435  r1,
6437  LeaveCC,
6438  eq);
6439  __ b(eq, &second_prepared);
6440  // External string: rule out short external string and load string resource.
6442  __ tst(r5, Operand(kShortExternalStringMask));
6443  __ b(ne, &call_runtime);
6445  __ bind(&second_prepared);
6446 
6447  Label non_ascii_string_add_flat_result;
6448  // r7: first character of first string
6449  // r1: first character of second string
6450  // r2: length of first string.
6451  // r3: length of second string.
6452  // r6: sum of lengths.
6453  // Both strings have the same encoding.
6455  __ tst(r5, Operand(kStringEncodingMask));
6456  __ b(eq, &non_ascii_string_add_flat_result);
6457 
6458  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6460  // r0: result string.
6461  // r7: first character of first string.
6462  // r1: first character of second string.
6463  // r2: length of first string.
6464  // r3: length of second string.
6465  // r6: first character of result.
6467  // r6: next character of result.
6469  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6470  __ add(sp, sp, Operand(2 * kPointerSize));
6471  __ Ret();
6472 
6473  __ bind(&non_ascii_string_add_flat_result);
6474  __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
6476  // r0: result string.
6477  // r7: first character of first string.
6478  // r1: first character of second string.
6479  // r2: length of first string.
6480  // r3: length of second string.
6481  // r6: first character of result.
6482  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
6483  // r6: next character of result.
6484  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
6485  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6486  __ add(sp, sp, Operand(2 * kPointerSize));
6487  __ Ret();
6488 
6489  // Just jump to runtime to add the two strings.
6490  __ bind(&call_runtime);
6491  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6492 
6493  if (call_builtin.is_linked()) {
6494  __ bind(&call_builtin);
6495  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6496  }
6497 }
6498 
6499 
6500 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6501  int stack_offset,
6502  Register arg,
6503  Register scratch1,
6504  Register scratch2,
6505  Register scratch3,
6506  Register scratch4,
6507  Label* slow) {
6508  // First check if the argument is already a string.
6509  Label not_string, done;
6510  __ JumpIfSmi(arg, &not_string);
6511  __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6512  __ b(lt, &done);
6513 
6514  // Check the number to string cache.
6515  Label not_cached;
6516  __ bind(&not_string);
6517  // Puts the cached result into scratch1.
6519  arg,
6520  scratch1,
6521  scratch2,
6522  scratch3,
6523  scratch4,
6524  false,
6525  &not_cached);
6526  __ mov(arg, scratch1);
6527  __ str(arg, MemOperand(sp, stack_offset));
6528  __ jmp(&done);
6529 
6530  // Check if the argument is a safe string wrapper.
6531  __ bind(&not_cached);
6532  __ JumpIfSmi(arg, slow);
6533  __ CompareObjectType(
6534  arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
6535  __ b(ne, slow);
6536  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6537  __ and_(scratch2,
6538  scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6539  __ cmp(scratch2,
6541  __ b(ne, slow);
6542  __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6543  __ str(arg, MemOperand(sp, stack_offset));
6544 
6545  __ bind(&done);
6546 }
6547 
6548 
6549 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6550  ASSERT(state_ == CompareIC::SMIS);
6551  Label miss;
6552  __ orr(r2, r1, r0);
6553  __ JumpIfNotSmi(r2, &miss);
6554 
6555  if (GetCondition() == eq) {
6556  // For equality we do not care about the sign of the result.
6557  __ sub(r0, r0, r1, SetCC);
6558  } else {
6559  // Untag before subtracting to avoid handling overflow.
6560  __ SmiUntag(r1);
6561  __ sub(r0, r1, SmiUntagOperand(r0));
6562  }
6563  __ Ret();
6564 
6565  __ bind(&miss);
6566  GenerateMiss(masm);
6567 }
6568 
6569 
6570 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6571  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6572 
6573  Label generic_stub;
6574  Label unordered, maybe_undefined1, maybe_undefined2;
6575  Label miss;
6576  __ and_(r2, r1, Operand(r0));
6577  __ JumpIfSmi(r2, &generic_stub);
6578 
6579  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6580  __ b(ne, &maybe_undefined1);
6581  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6582  __ b(ne, &maybe_undefined2);
6583 
6584  // Inlining the double comparison and falling back to the general compare
6585  // stub if NaN is involved or VFP3 is unsupported.
6587  CpuFeatures::Scope scope(VFP3);
6588 
6589  // Load left and right operand
6590  __ sub(r2, r1, Operand(kHeapObjectTag));
6591  __ vldr(d0, r2, HeapNumber::kValueOffset);
6592  __ sub(r2, r0, Operand(kHeapObjectTag));
6593  __ vldr(d1, r2, HeapNumber::kValueOffset);
6594 
6595  // Compare operands
6596  __ VFPCompareAndSetFlags(d0, d1);
6597 
6598  // Don't base result on status bits when a NaN is involved.
6599  __ b(vs, &unordered);
6600 
6601  // Return a result of -1, 0, or 1, based on status bits.
6602  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6603  __ mov(r0, Operand(LESS), LeaveCC, lt);
6604  __ mov(r0, Operand(GREATER), LeaveCC, gt);
6605  __ Ret();
6606  }
6607 
6608  __ bind(&unordered);
6609  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6610  __ bind(&generic_stub);
6611  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6612 
6613  __ bind(&maybe_undefined1);
6615  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
6616  __ b(ne, &miss);
6617  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6618  __ b(ne, &maybe_undefined2);
6619  __ jmp(&unordered);
6620  }
6621 
6622  __ bind(&maybe_undefined2);
6624  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
6625  __ b(eq, &unordered);
6626  }
6627 
6628  __ bind(&miss);
6629  GenerateMiss(masm);
6630 }
6631 
6632 
6633 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6634  ASSERT(state_ == CompareIC::SYMBOLS);
6635  Label miss;
6636 
6637  // Registers containing left and right operands respectively.
6638  Register left = r1;
6639  Register right = r0;
6640  Register tmp1 = r2;
6641  Register tmp2 = r3;
6642 
6643  // Check that both operands are heap objects.
6644  __ JumpIfEitherSmi(left, right, &miss);
6645 
6646  // Check that both operands are symbols.
6647  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6648  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6649  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6650  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6651  STATIC_ASSERT(kSymbolTag != 0);
6652  __ and_(tmp1, tmp1, Operand(tmp2));
6653  __ tst(tmp1, Operand(kIsSymbolMask));
6654  __ b(eq, &miss);
6655 
6656  // Symbols are compared by identity.
6657  __ cmp(left, right);
6658  // Make sure r0 is non-zero. At this point input operands are
6659  // guaranteed to be non-zero.
6660  ASSERT(right.is(r0));
6661  STATIC_ASSERT(EQUAL == 0);
6662  STATIC_ASSERT(kSmiTag == 0);
6663  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6664  __ Ret();
6665 
6666  __ bind(&miss);
6667  GenerateMiss(masm);
6668 }
6669 
6670 
6671 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6672  ASSERT(state_ == CompareIC::STRINGS);
6673  Label miss;
6674 
6675  bool equality = Token::IsEqualityOp(op_);
6676 
6677  // Registers containing left and right operands respectively.
6678  Register left = r1;
6679  Register right = r0;
6680  Register tmp1 = r2;
6681  Register tmp2 = r3;
6682  Register tmp3 = r4;
6683  Register tmp4 = r5;
6684 
6685  // Check that both operands are heap objects.
6686  __ JumpIfEitherSmi(left, right, &miss);
6687 
6688  // Check that both operands are strings. This leaves the instance
6689  // types loaded in tmp1 and tmp2.
6690  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6691  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6692  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6693  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6695  __ orr(tmp3, tmp1, tmp2);
6696  __ tst(tmp3, Operand(kIsNotStringMask));
6697  __ b(ne, &miss);
6698 
6699  // Fast check for identical strings.
6700  __ cmp(left, right);
6701  STATIC_ASSERT(EQUAL == 0);
6702  STATIC_ASSERT(kSmiTag == 0);
6703  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6704  __ Ret(eq);
6705 
6706  // Handle not identical strings.
6707 
6708  // Check that both strings are symbols. If they are, we're done
6709  // because we already know they are not identical.
6710  if (equality) {
6711  ASSERT(GetCondition() == eq);
6712  STATIC_ASSERT(kSymbolTag != 0);
6713  __ and_(tmp3, tmp1, Operand(tmp2));
6714  __ tst(tmp3, Operand(kIsSymbolMask));
6715  // Make sure r0 is non-zero. At this point input operands are
6716  // guaranteed to be non-zero.
6717  ASSERT(right.is(r0));
6718  __ Ret(ne);
6719  }
6720 
6721  // Check that both strings are sequential ASCII.
6722  Label runtime;
6723  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6724  tmp1, tmp2, tmp3, tmp4, &runtime);
6725 
6726  // Compare flat ASCII strings. Returns when done.
6727  if (equality) {
6729  masm, left, right, tmp1, tmp2, tmp3);
6730  } else {
6732  masm, left, right, tmp1, tmp2, tmp3, tmp4);
6733  }
6734 
6735  // Handle more complex cases in runtime.
6736  __ bind(&runtime);
6737  __ Push(left, right);
6738  if (equality) {
6739  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6740  } else {
6741  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6742  }
6743 
6744  __ bind(&miss);
6745  GenerateMiss(masm);
6746 }
6747 
6748 
6749 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6750  ASSERT(state_ == CompareIC::OBJECTS);
6751  Label miss;
6752  __ and_(r2, r1, Operand(r0));
6753  __ JumpIfSmi(r2, &miss);
6754 
6755  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
6756  __ b(ne, &miss);
6757  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
6758  __ b(ne, &miss);
6759 
6760  ASSERT(GetCondition() == eq);
6761  __ sub(r0, r0, Operand(r1));
6762  __ Ret();
6763 
6764  __ bind(&miss);
6765  GenerateMiss(masm);
6766 }
6767 
6768 
6769 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6770  Label miss;
6771  __ and_(r2, r1, Operand(r0));
6772  __ JumpIfSmi(r2, &miss);
6775  __ cmp(r2, Operand(known_map_));
6776  __ b(ne, &miss);
6777  __ cmp(r3, Operand(known_map_));
6778  __ b(ne, &miss);
6779 
6780  __ sub(r0, r0, Operand(r1));
6781  __ Ret();
6782 
6783  __ bind(&miss);
6784  GenerateMiss(masm);
6785 }
6786 
6787 
6788 
6789 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6790  {
6791  // Call the runtime system in a fresh internal frame.
6792  ExternalReference miss =
6793  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
6794 
6795  FrameScope scope(masm, StackFrame::INTERNAL);
6796  __ Push(r1, r0);
6797  __ push(lr);
6798  __ Push(r1, r0);
6799  __ mov(ip, Operand(Smi::FromInt(op_)));
6800  __ push(ip);
6801  __ CallExternalReference(miss, 3);
6802  // Compute the entry point of the rewritten stub.
6803  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6804  // Restore registers.
6805  __ pop(lr);
6806  __ pop(r0);
6807  __ pop(r1);
6808  }
6809 
6810  __ Jump(r2);
6811 }
6812 
6813 
6814 void DirectCEntryStub::Generate(MacroAssembler* masm) {
6815  __ ldr(pc, MemOperand(sp, 0));
6816 }
6817 
6818 
6819 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6820  ExternalReference function) {
6821  __ mov(r2, Operand(function));
6822  GenerateCall(masm, r2);
6823 }
6824 
6825 
6826 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6827  Register target) {
6828  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6829  RelocInfo::CODE_TARGET));
6830 
6831  // Prevent literal pool emission during calculation of return address.
6832  Assembler::BlockConstPoolScope block_const_pool(masm);
6833 
6834  // Push return address (accessible to GC through exit frame pc).
6835  // Note that using pc with str is deprecated.
6836  Label start;
6837  __ bind(&start);
6838  __ add(ip, pc, Operand(Assembler::kInstrSize));
6839  __ str(ip, MemOperand(sp, 0));
6840  __ Jump(target); // Call the C++ function.
6842  masm->SizeOfCodeGeneratedSince(&start));
6843 }
6844 
6845 
6846 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6847  Label* miss,
6848  Label* done,
6849  Register receiver,
6850  Register properties,
6851  Handle<String> name,
6852  Register scratch0) {
6853  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6854  // not equal to the name and kProbes-th slot is not used (its name is the
6855  // undefined value), it guarantees the hash table doesn't contain the
6856  // property. It's true even if some slots represent deleted properties
6857  // (their names are the hole value).
6858  for (int i = 0; i < kInlinedProbes; i++) {
6859  // scratch0 points to properties hash.
6860  // Compute the masked index: (hash + i + i * i) & mask.
6861  Register index = scratch0;
6862  // Capacity is smi 2^n.
6863  __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
6864  __ sub(index, index, Operand(1));
6865  __ and_(index, index, Operand(
6866  Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6867 
6868  // Scale the index by multiplying by the entry size.
6870  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
6871 
6872  Register entity_name = scratch0;
6873  // Having undefined at this place means the name is not contained.
6874  ASSERT_EQ(kSmiTagSize, 1);
6875  Register tmp = properties;
6876  __ add(tmp, properties, Operand(index, LSL, 1));
6877  __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6878 
6879  ASSERT(!tmp.is(entity_name));
6880  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6881  __ cmp(entity_name, tmp);
6882  __ b(eq, done);
6883 
6884  if (i != kInlinedProbes - 1) {
6885  // Load the hole ready for use below:
6886  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
6887 
6888  // Stop if found the property.
6889  __ cmp(entity_name, Operand(Handle<String>(name)));
6890  __ b(eq, miss);
6891 
6892  Label the_hole;
6893  __ cmp(entity_name, tmp);
6894  __ b(eq, &the_hole);
6895 
6896  // Check if the entry name is not a symbol.
6897  __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6898  __ ldrb(entity_name,
6900  __ tst(entity_name, Operand(kIsSymbolMask));
6901  __ b(eq, miss);
6902 
6903  __ bind(&the_hole);
6904 
6905  // Restore the properties.
6906  __ ldr(properties,
6908  }
6909  }
6910 
6911  const int spill_mask =
6912  (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
6913  r2.bit() | r1.bit() | r0.bit());
6914 
6915  __ stm(db_w, sp, spill_mask);
6917  __ mov(r1, Operand(Handle<String>(name)));
6919  __ CallStub(&stub);
6920  __ cmp(r0, Operand(0));
6921  __ ldm(ia_w, sp, spill_mask);
6922 
6923  __ b(eq, done);
6924  __ b(ne, miss);
6925 }
6926 
6927 
6928 // Probe the string dictionary in the |elements| register. Jump to the
6929 // |done| label if a property with the given name is found. Jump to
6930 // the |miss| label otherwise.
6931 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
6932 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6933  Label* miss,
6934  Label* done,
6935  Register elements,
6936  Register name,
6937  Register scratch1,
6938  Register scratch2) {
6939  ASSERT(!elements.is(scratch1));
6940  ASSERT(!elements.is(scratch2));
6941  ASSERT(!name.is(scratch1));
6942  ASSERT(!name.is(scratch2));
6943 
6944  // Assert that name contains a string.
6945  if (FLAG_debug_code) __ AbortIfNotString(name);
6946 
6947  // Compute the capacity mask.
6948  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
6949  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
6950  __ sub(scratch1, scratch1, Operand(1));
6951 
6952  // Generate an unrolled loop that performs a few probes before
6953  // giving up. Measurements done on Gmail indicate that 2 probes
6954  // cover ~93% of loads from dictionaries.
6955  for (int i = 0; i < kInlinedProbes; i++) {
6956  // Compute the masked index: (hash + i + i * i) & mask.
6957  __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6958  if (i > 0) {
6959  // Add the probe offset (i + i * i) left shifted to avoid right shifting
6960  // the hash in a separate instruction. The value hash + i + i * i is right
6961  // shifted in the following and instruction.
6962  ASSERT(StringDictionary::GetProbeOffset(i) <
6963  1 << (32 - String::kHashFieldOffset));
6964  __ add(scratch2, scratch2, Operand(
6965  StringDictionary::GetProbeOffset(i) << String::kHashShift));
6966  }
6967  __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
6968 
6969  // Scale the index by multiplying by the element size.
6971  // scratch2 = scratch2 * 3.
6972  __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
6973 
6974  // Check if the key is identical to the name.
6975  __ add(scratch2, elements, Operand(scratch2, LSL, 2));
6976  __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
6977  __ cmp(name, Operand(ip));
6978  __ b(eq, done);
6979  }
6980 
6981  const int spill_mask =
6982  (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
6983  r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
6984  ~(scratch1.bit() | scratch2.bit());
6985 
6986  __ stm(db_w, sp, spill_mask);
6987  if (name.is(r0)) {
6988  ASSERT(!elements.is(r1));
6989  __ Move(r1, name);
6990  __ Move(r0, elements);
6991  } else {
6992  __ Move(r0, elements);
6993  __ Move(r1, name);
6994  }
6996  __ CallStub(&stub);
6997  __ cmp(r0, Operand(0));
6998  __ mov(scratch2, Operand(r2));
6999  __ ldm(ia_w, sp, spill_mask);
7000 
7001  __ b(ne, done);
7002  __ b(eq, miss);
7003 }
7004 
7005 
7006 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7007  // This stub overrides SometimesSetsUpAFrame() to return false. That means
7008  // we cannot call anything that could cause a GC from this stub.
7009  // Registers:
7010  // result: StringDictionary to probe
7011  // r1: key
7012  // : StringDictionary to probe.
7013  // index_: will hold an index of entry if lookup is successful.
7014  // might alias with result_.
7015  // Returns:
7016  // result_ is zero if lookup failed, non zero otherwise.
7017 
7018  Register result = r0;
7019  Register dictionary = r0;
7020  Register key = r1;
7021  Register index = r2;
7022  Register mask = r3;
7023  Register hash = r4;
7024  Register undefined = r5;
7025  Register entry_key = r6;
7026 
7027  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7028 
7029  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
7030  __ mov(mask, Operand(mask, ASR, kSmiTagSize));
7031  __ sub(mask, mask, Operand(1));
7032 
7033  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
7034 
7035  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7036 
7037  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7038  // Compute the masked index: (hash + i + i * i) & mask.
7039  // Capacity is smi 2^n.
7040  if (i > 0) {
7041  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7042  // the hash in a separate instruction. The value hash + i + i * i is right
7043  // shifted in the following and instruction.
7044  ASSERT(StringDictionary::GetProbeOffset(i) <
7045  1 << (32 - String::kHashFieldOffset));
7046  __ add(index, hash, Operand(
7047  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7048  } else {
7049  __ mov(index, Operand(hash));
7050  }
7051  __ and_(index, mask, Operand(index, LSR, String::kHashShift));
7052 
7053  // Scale the index by multiplying by the entry size.
7055  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
7056 
7057  ASSERT_EQ(kSmiTagSize, 1);
7058  __ add(index, dictionary, Operand(index, LSL, 2));
7059  __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
7060 
7061  // Having undefined at this place means the name is not contained.
7062  __ cmp(entry_key, Operand(undefined));
7063  __ b(eq, &not_in_dictionary);
7064 
7065  // Stop if found the property.
7066  __ cmp(entry_key, Operand(key));
7067  __ b(eq, &in_dictionary);
7068 
7069  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7070  // Check if the entry name is not a symbol.
7071  __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7072  __ ldrb(entry_key,
7074  __ tst(entry_key, Operand(kIsSymbolMask));
7075  __ b(eq, &maybe_in_dictionary);
7076  }
7077  }
7078 
7079  __ bind(&maybe_in_dictionary);
7080  // If we are doing negative lookup then probing failure should be
7081  // treated as a lookup success. For positive lookup probing failure
7082  // should be treated as lookup failure.
7083  if (mode_ == POSITIVE_LOOKUP) {
7084  __ mov(result, Operand::Zero());
7085  __ Ret();
7086  }
7087 
7088  __ bind(&in_dictionary);
7089  __ mov(result, Operand(1));
7090  __ Ret();
7091 
7092  __ bind(&not_in_dictionary);
7093  __ mov(result, Operand::Zero());
7094  __ Ret();
7095 }
7096 
7097 
7098 struct AheadOfTimeWriteBarrierStubList {
7099  Register object, value, address;
7100  RememberedSetAction action;
7101 };
7102 
7103 #define REG(Name) { kRegister_ ## Name ## _Code }
7104 
7105 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7106  // Used in RegExpExecStub.
7107  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
7108  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
7109  // Used in CompileArrayPushCall.
7110  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7111  // Also used in KeyedStoreIC::GenerateGeneric.
7112  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
7113  // Used in CompileStoreGlobal.
7114  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
7115  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7116  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
7117  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
7118  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7119  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
7120  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
7121  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7122  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
7123  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
7124  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7125  // and ElementsTransitionGenerator::GenerateSmiToDouble
7126  // and ElementsTransitionGenerator::GenerateDoubleToObject
7127  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
7128  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
7129  // ElementsTransitionGenerator::GenerateDoubleToObject
7130  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
7131  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
7132  // StoreArrayLiteralElementStub::Generate
7133  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7134  // Null termination.
7135  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7136 };
7137 
7138 #undef REG
7139 
7141  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7142  !entry->object.is(no_reg);
7143  entry++) {
7144  if (object_.is(entry->object) &&
7145  value_.is(entry->value) &&
7146  address_.is(entry->address) &&
7147  remembered_set_action_ == entry->action &&
7148  save_fp_regs_mode_ == kDontSaveFPRegs) {
7149  return true;
7150  }
7151  }
7152  return false;
7153 }
7154 
7155 
7157  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7158 }
7159 
7160 
7163  stub1.GetCode()->set_is_pregenerated(true);
7164 }
7165 
7166 
7168  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7169  !entry->object.is(no_reg);
7170  entry++) {
7171  RecordWriteStub stub(entry->object,
7172  entry->value,
7173  entry->address,
7174  entry->action,
7175  kDontSaveFPRegs);
7176  stub.GetCode()->set_is_pregenerated(true);
7177  }
7178 }
7179 
7180 
7181 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7182 // the value has just been written into the object, now this stub makes sure
7183 // we keep the GC informed. The word in the object where the value has been
7184 // written is in the address register.
7185 void RecordWriteStub::Generate(MacroAssembler* masm) {
7186  Label skip_to_incremental_noncompacting;
7187  Label skip_to_incremental_compacting;
7188 
7189  // The first two instructions are generated with labels so as to get the
7190  // offset fixed up correctly by the bind(Label*) call. We patch it back and
7191  // forth between a compare instructions (a nop in this position) and the
7192  // real branch when we start and stop incremental heap marking.
7193  // See RecordWriteStub::Patch for details.
7194  {
7195  // Block literal pool emission, as the position of these two instructions
7196  // is assumed by the patching code.
7197  Assembler::BlockConstPoolScope block_const_pool(masm);
7198  __ b(&skip_to_incremental_noncompacting);
7199  __ b(&skip_to_incremental_compacting);
7200  }
7201 
7202  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7203  __ RememberedSetHelper(object_,
7204  address_,
7205  value_,
7206  save_fp_regs_mode_,
7208  }
7209  __ Ret();
7210 
7211  __ bind(&skip_to_incremental_noncompacting);
7212  GenerateIncremental(masm, INCREMENTAL);
7213 
7214  __ bind(&skip_to_incremental_compacting);
7215  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7216 
7217  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7218  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7219  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
7220  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
7221  PatchBranchIntoNop(masm, 0);
7223 }
7224 
7225 
7226 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7227  regs_.Save(masm);
7228 
7229  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7230  Label dont_need_remembered_set;
7231 
7232  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7233  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7234  regs_.scratch0(),
7235  &dont_need_remembered_set);
7236 
7237  __ CheckPageFlag(regs_.object(),
7238  regs_.scratch0(),
7240  ne,
7241  &dont_need_remembered_set);
7242 
7243  // First notify the incremental marker if necessary, then update the
7244  // remembered set.
7245  CheckNeedsToInformIncrementalMarker(
7246  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7247  InformIncrementalMarker(masm, mode);
7248  regs_.Restore(masm);
7249  __ RememberedSetHelper(object_,
7250  address_,
7251  value_,
7252  save_fp_regs_mode_,
7254 
7255  __ bind(&dont_need_remembered_set);
7256  }
7257 
7258  CheckNeedsToInformIncrementalMarker(
7259  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7260  InformIncrementalMarker(masm, mode);
7261  regs_.Restore(masm);
7262  __ Ret();
7263 }
7264 
7265 
7266 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7267  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7268  int argument_count = 3;
7269  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7270  Register address =
7271  r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7272  ASSERT(!address.is(regs_.object()));
7273  ASSERT(!address.is(r0));
7274  __ Move(address, regs_.address());
7275  __ Move(r0, regs_.object());
7276  if (mode == INCREMENTAL_COMPACTION) {
7277  __ Move(r1, address);
7278  } else {
7279  ASSERT(mode == INCREMENTAL);
7280  __ ldr(r1, MemOperand(address, 0));
7281  }
7282  __ mov(r2, Operand(ExternalReference::isolate_address()));
7283 
7284  AllowExternalCallThatCantCauseGC scope(masm);
7285  if (mode == INCREMENTAL_COMPACTION) {
7286  __ CallCFunction(
7287  ExternalReference::incremental_evacuation_record_write_function(
7288  masm->isolate()),
7289  argument_count);
7290  } else {
7291  ASSERT(mode == INCREMENTAL);
7292  __ CallCFunction(
7293  ExternalReference::incremental_marking_record_write_function(
7294  masm->isolate()),
7295  argument_count);
7296  }
7297  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7298 }
7299 
7300 
7301 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7302  MacroAssembler* masm,
7303  OnNoNeedToInformIncrementalMarker on_no_need,
7304  Mode mode) {
7305  Label on_black;
7306  Label need_incremental;
7307  Label need_incremental_pop_scratch;
7308 
7309  // Let's look at the color of the object: If it is not black we don't have
7310  // to inform the incremental marker.
7311  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7312 
7313  regs_.Restore(masm);
7314  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7315  __ RememberedSetHelper(object_,
7316  address_,
7317  value_,
7318  save_fp_regs_mode_,
7320  } else {
7321  __ Ret();
7322  }
7323 
7324  __ bind(&on_black);
7325 
7326  // Get the value from the slot.
7327  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7328 
7329  if (mode == INCREMENTAL_COMPACTION) {
7330  Label ensure_not_white;
7331 
7332  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7333  regs_.scratch1(), // Scratch.
7335  eq,
7336  &ensure_not_white);
7337 
7338  __ CheckPageFlag(regs_.object(),
7339  regs_.scratch1(), // Scratch.
7341  eq,
7342  &need_incremental);
7343 
7344  __ bind(&ensure_not_white);
7345  }
7346 
7347  // We need extra registers for this, so we push the object and the address
7348  // register temporarily.
7349  __ Push(regs_.object(), regs_.address());
7350  __ EnsureNotWhite(regs_.scratch0(), // The value.
7351  regs_.scratch1(), // Scratch.
7352  regs_.object(), // Scratch.
7353  regs_.address(), // Scratch.
7354  &need_incremental_pop_scratch);
7355  __ Pop(regs_.object(), regs_.address());
7356 
7357  regs_.Restore(masm);
7358  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7359  __ RememberedSetHelper(object_,
7360  address_,
7361  value_,
7362  save_fp_regs_mode_,
7364  } else {
7365  __ Ret();
7366  }
7367 
7368  __ bind(&need_incremental_pop_scratch);
7369  __ Pop(regs_.object(), regs_.address());
7370 
7371  __ bind(&need_incremental);
7372 
7373  // Fall through when we need to inform the incremental marker.
7374 }
7375 
7376 
7377 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7378  // ----------- S t a t e -------------
7379  // -- r0 : element value to store
7380  // -- r1 : array literal
7381  // -- r2 : map of array literal
7382  // -- r3 : element index as smi
7383  // -- r4 : array literal index in function as smi
7384  // -----------------------------------
7385 
7386  Label element_done;
7387  Label double_elements;
7388  Label smi_element;
7389  Label slow_elements;
7390  Label fast_elements;
7391 
7392  __ CheckFastElements(r2, r5, &double_elements);
7393  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
7394  __ JumpIfSmi(r0, &smi_element);
7395  __ CheckFastSmiElements(r2, r5, &fast_elements);
7396 
7397  // Store into the array literal requires a elements transition. Call into
7398  // the runtime.
7399  __ bind(&slow_elements);
7400  // call.
7401  __ Push(r1, r3, r0);
7404  __ Push(r5, r4);
7405  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7406 
7407  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7408  __ bind(&fast_elements);
7410  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7411  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7412  __ str(r0, MemOperand(r6, 0));
7413  // Update the write barrier for the array store.
7414  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
7416  __ Ret();
7417 
7418  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7419  // and value is Smi.
7420  __ bind(&smi_element);
7422  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7424  __ Ret();
7425 
7426  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7427  __ bind(&double_elements);
7429  __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
7430  &slow_elements);
7431  __ Ret();
7432 }
7433 
7434 #undef __
7435 
7436 } } // namespace v8::internal
7437 
7438 #endif // V8_TARGET_ARCH_ARM
static const int kResourceDataOffset
Definition: objects.h:7517
const Register cp
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:4994
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static const int kNumRegisters
const intptr_t kSmiTagMask
Definition: v8.h:3855
static int GetBranchOffset(Instr instr)
static const int kCodeOffset
Definition: objects.h:5606
static const int kEvacuationCandidateMask
Definition: spaces.h:407
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static void LoadNumberAsInt32Double(MacroAssembler *masm, Register object, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register heap_number_map, Register scratch1, Register scratch2, SwVfpRegister single_scratch, Label *not_int32)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
const RegList kCallerSaved
Definition: frames-arm.h:75
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:408
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
const Register r3
static const int kStaticOffsetsVectorSize
Definition: jsregexp.h:1649
static const int kArgumentsObjectSize
Definition: heap.h:863
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const int kFailureTypeTagSize
Definition: objects.h:1037
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2220
static Failure * InternalError()
Definition: objects-inl.h:1011
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:973
void Generate(MacroAssembler *masm)
static void DoubleIs32BitInteger(MacroAssembler *masm, Register src1, Register src2, Register dst, Register scratch, Label *not_int32)
const DwVfpRegister d5
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
const DwVfpRegister d0
static const int kDataOffset
Definition: objects.h:6432
static const int kGlobalReceiverOffset
Definition: objects.h:6085
const Register r6
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, int flags)
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1021
static void LoadNumberAsInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
Flag flags[]
Definition: flags.cc:1467
static const int kExponentBias
Definition: objects.h:1321
int int32_t
Definition: unicode.cc:47
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5052
SwVfpRegister high() const
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1016
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
const DwVfpRegister d6
#define ASSERT(condition)
Definition: checks.h:270
static void LoadOperands(MacroAssembler *masm, FloatingPointHelper::Destination destination, Register heap_number_map, Register scratch1, Register scratch2, Label *not_number)
friend class BlockConstPoolScope
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5057
#define kFirstCalleeSavedDoubleReg
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2285
const uint32_t kStringRepresentationMask
Definition: objects.h:455
MemOperand GlobalObjectOperand()
const Register r2
static DwVfpRegister from_code(int code)
static const int kSize
Definition: objects.h:8134
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
static const int kGlobalContextOffset
Definition: objects.h:6084
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
const uint32_t kAsciiDataHintTag
Definition: objects.h:479
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:152
const int kIntSize
Definition: globals.h:231
static const int kZeroHash
Definition: objects.h:6816
#define V8_INFINITY
Definition: globals.h:32
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7099
static const int kSize
Definition: objects.h:8112
static const int kLastCaptureCountOffset
Definition: jsregexp.h:150
static const int kFirstOffset
Definition: objects.h:7420
static const int kMinLength
Definition: objects.h:7433
const int kNumDoubleCalleeSaved
Definition: frames-arm.h:86
const uint32_t kNotStringTag
Definition: objects.h:438
const Register sp
static const int kParentOffset
Definition: objects.h:7473
const SwVfpRegister s3
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1324
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
static const int kArgumentsObjectSizeStrict
Definition: heap.h:866
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static int ActivationFrameAlignment()
static const int kLengthOffset
Definition: objects.h:7098
const uint32_t kIsSymbolMask
Definition: objects.h:443
static const int kExponentShift
Definition: objects.h:1322
const intptr_t kFailureTagMask
Definition: v8globals.h:73
static const int kValueOffset
Definition: objects.h:1307
const int kFailureTagSize
Definition: v8globals.h:72
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:232
#define kLastCalleeSavedDoubleReg
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6478
static const int kInputOffset
Definition: objects.h:8133
static bool IsBitOp(Value op)
Definition: token.h:256
const SwVfpRegister s13
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
const Register ip
void Generate(MacroAssembler *masm)
const Register r9
const int kPointerSize
Definition: globals.h:234
static void LoadSmis(MacroAssembler *masm, Destination destination, Register scratch1, Register scratch2)
static void CallCCodeForDoubleOperation(MacroAssembler *masm, Token::Value op, Register heap_number_result, Register scratch)
static const int kPcLoadDelta
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5011
const DwVfpRegister d7
const int kHeapObjectTag
Definition: v8.h:3848
const RegList kCalleeSaved
Definition: frames-arm.h:63
const uint32_t kAsciiDataHintMask
Definition: objects.h:478
#define __
const Register pc
static void ConvertNumberToInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2113
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static const int kMinLength
Definition: objects.h:7485
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const DwVfpRegister d3
static const int kHeaderSize
Definition: objects.h:7282
static const int kNextFunctionLinkOffset
Definition: objects.h:5989
void Generate(MacroAssembler *masm)
const int kBitsPerByte
Definition: globals.h:251
static int SizeFor(int length)
Definition: objects.h:2369
const Register r0
static const int kElementsOffset
Definition: objects.h:2114
bool IsPowerOf2(T x)
Definition: utils.h:50
const uint32_t kStringTag
Definition: objects.h:437
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7474
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:147
static const int kHeaderSize
Definition: objects.h:2233
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
const Register lr
#define ISOLATE
Definition: isolate.h:1410
void GenerateCall(MacroAssembler *masm, ExternalReference function)
static const int kMapOffset
Definition: objects.h:1219
static const int kMantissaBitsInTopWord
Definition: objects.h:1323
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:410
const uint32_t kIsNotStringMask
Definition: objects.h:436
const int kNumCalleeSaved
Definition: frames-arm.h:83
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSize
Definition: objects.h:1315
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7421
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kCallerFPOffset
Definition: frames-arm.h:117
static const int kArgumentsLengthIndex
Definition: heap.h:869
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
static const int kFirstCaptureOffset
Definition: jsregexp.h:156
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7125
static const uint32_t kSignMask
Definition: objects.h:1316
static const int kLastInputOffset
Definition: jsregexp.h:154
const int kSmiShiftSize
Definition: v8.h:3899
const int kSmiTagSize
Definition: v8.h:3854
static const int kHeaderSize
Definition: objects.h:4513
const Register r8
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6474
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void GenerateAheadOfTime()
const DwVfpRegister d2
static const int kArgumentsCalleeIndex
Definition: heap.h:871
const int kSmiTag
Definition: v8.h:3853
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
void Generate(MacroAssembler *masm)
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:71
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:6472
static const int kPrototypeOffset
Definition: objects.h:4953
static const int kSize
Definition: objects.h:5990
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
Operand SmiUntagOperand(Register object)
const DwVfpRegister d1
bool Contains(Type type) const
Definition: code-stubs.h:1050
const uint32_t kSymbolTag
Definition: objects.h:445
const Register fp
const uint32_t kAsciiStringTag
Definition: objects.h:451
static const int kConstructStubOffset
Definition: objects.h:5608
static const int kExponentBits
Definition: objects.h:1320
static const int kHashShift
Definition: objects.h:7121
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:4995
void Generate(MacroAssembler *masm)
const SwVfpRegister s15
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:629
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1313
static const int kDataUC16CodeOffset
Definition: objects.h:6476
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
FlagType type() const
Definition: flags.cc:1358
const Register r5
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
static const int kIndexOffset
Definition: objects.h:8132
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1312
const Register r4
const Register r7
void Generate(MacroAssembler *masm)